vmm: Moves gpci into guest_thread
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <parlib/ros_debug.h>
18 #include <stdlib.h>
19
20 static void thread0_sched_init(void);
21 static void thread0_sched_entry(void);
22 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
23 static void thread0_thread_refl_fault(struct uthread *uth,
24                                       struct user_context *ctx);
25 static void thread0_thread_runnable(struct uthread *uth);
26 static void thread0_thread_has_blocked(struct uthread *uth, int flags);
27 static void thread0_thread_exited(struct uthread *uth);
28 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg);
29 static void thread0_sync_init(uth_sync_t *s);
30 static void thread0_sync_destroy(uth_sync_t *s);
31 static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s);
32 static struct uthread *thread0_sync_get_next(uth_sync_t *s);
33 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth);
34 static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b);
35 static bool thread0_sync_is_empty(uth_sync_t *s);
36
37 /* externed into uthread.c */
38 struct schedule_ops thread0_2ls_ops = {
39         .sched_init = thread0_sched_init,
40         .sched_entry = thread0_sched_entry,
41         .thread_blockon_sysc = thread0_thread_blockon_sysc,
42         .thread_refl_fault = thread0_thread_refl_fault,
43         .thread_runnable = thread0_thread_runnable,
44         .thread_paused = thread0_thread_runnable,
45         .thread_has_blocked = thread0_thread_has_blocked,
46         .thread_exited = thread0_thread_exited,
47         .thread_create = thread0_thread_create,
48         .sync_init = thread0_sync_init,
49         .sync_destroy = thread0_sync_destroy,
50         .sync_enqueue = thread0_sync_enqueue,
51         .sync_get_next = thread0_sync_get_next,
52         .sync_get_uth = thread0_sync_get_uth,
53         .sync_swap = thread0_sync_swap,
54         .sync_is_empty = thread0_sync_is_empty,
55 };
56
57 struct schedule_ops *sched_ops __attribute__((weak)) = &thread0_2ls_ops;
58
59 /* externed into uthread.c */
60 struct uthread *thread0_uth;
61
62 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
63  * don't actually attach this mgmt info to it.  But since we just have one
64  * thread, it doesn't matter. */
65 struct thread0_info {
66         bool                                            is_blocked;
67 };
68 static struct thread0_info thread0_info;
69 static struct event_queue *sysc_evq;
70
71 void thread0_handle_syscall(struct event_msg *ev_msg,
72                             unsigned int ev_type, void *data)
73 {
74         thread0_info.is_blocked = FALSE;
75 }
76
77 void thread0_sched_init(void)
78 {
79         int ret;
80
81         ret = posix_memalign((void**)&thread0_uth, __alignof__(struct uthread),
82                              sizeof(struct uthread));
83         assert(!ret);
84         memset(thread0_uth, 0, sizeof(struct uthread)); /* aggressively 0 for bugs*/
85         memset(&thread0_info, 0, sizeof(thread0_info));
86         /* we don't care about the message, so don't bother with a UCQ */
87         sysc_evq = get_eventq(EV_MBOX_BITMAP);
88         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
89         uthread_2ls_init(thread0_uth, thread0_handle_syscall, NULL);
90 }
91
92 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
93 static void thread0_sched_entry(void)
94 {
95         /* TODO: support signal handling whenever we run a uthread */
96         if (current_uthread) {
97                 uthread_prep_pending_signals(current_uthread);
98                 run_current_uthread();
99                 assert(0);
100         }
101         while (1) {
102                 if (!thread0_info.is_blocked) {
103                         uthread_prep_pending_signals(thread0_uth);
104                         run_uthread(thread0_uth);
105                         assert(0);
106                 }
107                 sys_yield(FALSE);
108                 handle_events(0);
109         }
110 }
111
112 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
113 {
114         struct syscall *sysc = (struct syscall*)arg;
115         thread0_thread_has_blocked(uthread, 0);
116         if (!register_evq(sysc, sysc_evq))
117                 thread0_thread_runnable(uthread);
118 }
119
120 static void refl_error(struct uthread *uth, unsigned int trap_nr,
121                        unsigned int err, unsigned long aux)
122 {
123         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
124                trap_nr, err, aux);
125         /* Note that uthread.c already copied out our ctx into the uth
126          * struct */
127         print_user_context(&uth->u_ctx);
128         printf("Turn on printx to spew unhandled, malignant trap info\n");
129         exit(-1);
130 }
131
132 static bool handle_page_fault(struct uthread *uth, unsigned int err,
133                               unsigned long aux)
134 {
135         if (!(err & PF_VMR_BACKED))
136                 return FALSE;
137         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
138         __block_uthread_on_async_sysc(uth);
139         return TRUE;
140 }
141
142 static void thread0_thread_refl_fault(struct uthread *uth,
143                                       struct user_context *ctx)
144 {
145         unsigned int trap_nr = __arch_refl_get_nr(ctx);
146         unsigned int err = __arch_refl_get_err(ctx);
147         unsigned long aux = __arch_refl_get_aux(ctx);
148
149         assert(ctx->type == ROS_HW_CTX);
150         switch (trap_nr) {
151         case HW_TRAP_PAGE_FAULT:
152                 if (!handle_page_fault(uth, err, aux))
153                         refl_error(uth, trap_nr, err, aux);
154                 break;
155         default:
156                 refl_error(uth, trap_nr, err, aux);
157         }
158 }
159
160 static void thread0_thread_runnable(struct uthread *uth)
161 {
162         thread0_info.is_blocked = FALSE;
163 }
164
165 static void thread0_thread_has_blocked(struct uthread *uth, int flags)
166 {
167         assert(!thread0_info.is_blocked);
168         thread0_info.is_blocked = TRUE;
169 }
170
171 /* Actually, a 2LS only needs to implement this if it calls
172  * uth_2ls_thread_exit().  Keep it here to catch bugs. */
173 static void thread0_thread_exited(struct uthread *uth)
174 {
175         assert(0);
176 }
177
178 static struct uthread *thread0_thread_create(void *(*func)(void *), void *arg)
179 {
180         panic("Thread0 sched asked to create more threads!");
181 }
182
183 static void thread0_sync_init(uth_sync_t *s)
184 {
185         memset(s, 0x5a, sizeof(uth_sync_t));
186 }
187
188 static void thread0_sync_destroy(uth_sync_t *s)
189 {
190 }
191
192 static void thread0_sync_enqueue(struct uthread *uth, uth_sync_t *s)
193 {
194 }
195
196 static struct uthread *thread0_sync_get_next(uth_sync_t *s)
197 {
198         if (thread0_info.is_blocked) {
199                 /* Note we don't clear is_blocked.  Runnable does that, which should be
200                  * called before the next get_next (since we have only one thread). */
201                 return thread0_uth;
202         } else {
203                 return NULL;
204         }
205 }
206
207 static bool thread0_sync_get_uth(uth_sync_t *s, struct uthread *uth)
208 {
209         assert(uth == thread0_uth);
210         if (thread0_info.is_blocked) {
211                 /* Note we don't clear is_blocked.  Runnable does that. */
212                 return TRUE;
213         }
214         return FALSE;
215 }
216
217 static void thread0_sync_swap(uth_sync_t *a, uth_sync_t *b)
218 {
219 }
220
221 static bool thread0_sync_is_empty(uth_sync_t *s)
222 {
223         return !thread0_info.is_blocked;
224 }