parlib: Always use the default mutex/cv interface
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <stdlib.h>
18
19 static void thread0_sched_entry(void);
20 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
21 static void thread0_thread_refl_fault(struct uthread *uth,
22                                       struct user_context *ctx);
23 static void thread0_thread_runnable(struct uthread *uth);
24 static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t sync,
25                                        int flags);
26 static uth_sync_t thread0_sync_alloc(void);
27 static void thread0_sync_free(uth_sync_t);
28 static struct uthread *thread0_sync_get_next(uth_sync_t);
29 static bool thread0_sync_get_uth(uth_sync_t s, struct uthread *uth);
30
31 /* externed into uthread.c */
32 struct schedule_ops thread0_2ls_ops = {
33         .sched_entry = thread0_sched_entry,
34         .thread_blockon_sysc = thread0_thread_blockon_sysc,
35         .thread_refl_fault = thread0_thread_refl_fault,
36         .thread_runnable = thread0_thread_runnable,
37         .thread_paused = thread0_thread_runnable,
38         .thread_has_blocked = thread0_thread_has_blocked,
39         .sync_alloc = thread0_sync_alloc,
40         .sync_free = thread0_sync_free,
41         .sync_get_next = thread0_sync_get_next,
42         .sync_get_uth = thread0_sync_get_uth,
43 };
44
45 /* externed into uthread.c */
46 struct uthread *thread0_uth;
47
48 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
49  * don't actually attach this mgmt info to it.  But since we just have one
50  * thread, it doesn't matter. */
51 struct thread0_info {
52         bool                                            is_blocked;
53 };
54 static struct thread0_info thread0_info;
55 static struct event_queue *sysc_evq;
56
57 void thread0_handle_syscall(struct event_msg *ev_msg,
58                             unsigned int ev_type, void *data)
59 {
60         thread0_info.is_blocked = FALSE;
61 }
62
63 void thread0_lib_init(void)
64 {
65         memset(&thread0_info, 0, sizeof(thread0_info));
66         /* we don't care about the message, so don't bother with a UCQ */
67         sysc_evq = get_eventq(EV_MBOX_BITMAP);
68         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
69 }
70
71 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
72 static void thread0_sched_entry(void)
73 {
74         /* TODO: support signal handling whenever we run a uthread */
75         if (current_uthread) {
76                 uthread_prep_pending_signals(current_uthread);
77                 run_current_uthread();
78                 assert(0);
79         }
80         while (1) {
81                 if (!thread0_info.is_blocked) {
82                         uthread_prep_pending_signals(thread0_uth);
83                         run_uthread(thread0_uth);
84                         assert(0);
85                 }
86                 sys_yield(FALSE);
87                 handle_events(0);
88         }
89 }
90
91 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
92 {
93         struct syscall *sysc = (struct syscall*)arg;
94         thread0_thread_has_blocked(uthread, NULL, 0);
95         if (!register_evq(sysc, sysc_evq))
96                 thread0_thread_runnable(uthread);
97 }
98
99 static void refl_error(struct uthread *uth, unsigned int trap_nr,
100                        unsigned int err, unsigned long aux)
101 {
102         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
103                trap_nr, err, aux);
104         /* Note that uthread.c already copied out our ctx into the uth
105          * struct */
106         print_user_context(&uth->u_ctx);
107         printf("Turn on printx to spew unhandled, malignant trap info\n");
108         exit(-1);
109 }
110
111 static bool handle_page_fault(struct uthread *uth, unsigned int err,
112                               unsigned long aux)
113 {
114         if (!(err & PF_VMR_BACKED))
115                 return FALSE;
116         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
117         __block_uthread_on_async_sysc(uth);
118         return TRUE;
119 }
120
121 static void thread0_thread_refl_fault(struct uthread *uth,
122                                       struct user_context *ctx)
123 {
124         unsigned int trap_nr = __arch_refl_get_nr(ctx);
125         unsigned int err = __arch_refl_get_err(ctx);
126         unsigned long aux = __arch_refl_get_aux(ctx);
127
128         assert(ctx->type == ROS_HW_CTX);
129         switch (trap_nr) {
130         case HW_TRAP_PAGE_FAULT:
131                 if (!handle_page_fault(uth, err, aux))
132                         refl_error(uth, trap_nr, err, aux);
133                 break;
134         default:
135                 refl_error(uth, trap_nr, err, aux);
136         }
137 }
138
139 static void thread0_thread_runnable(struct uthread *uth)
140 {
141         thread0_info.is_blocked = FALSE;
142 }
143
144 static void thread0_thread_has_blocked(struct uthread *uth, uth_sync_t sync,
145                                        int flags)
146 {
147         assert(!thread0_info.is_blocked);
148         thread0_info.is_blocked = TRUE;
149 }
150
151 static uth_sync_t thread0_sync_alloc(void)
152 {
153         return (void*)0xf00baa;
154 }
155
156 static void thread0_sync_free(uth_sync_t s)
157 {
158 }
159
160 static struct uthread *thread0_sync_get_next(uth_sync_t s)
161 {
162         if (thread0_info.is_blocked) {
163                 /* Note we don't clear is_blocked.  Runnable does that, which should be
164                  * called before the next get_next (since we have only one thread). */
165                 return thread0_uth;
166         } else {
167                 return NULL;
168         }
169 }
170
171 static bool thread0_sync_get_uth(uth_sync_t s, struct uthread *uth)
172 {
173         assert(uth == thread0_uth);
174         if (thread0_info.is_blocked) {
175                 /* Note we don't clear is_blocked.  Runnable does that. */
176                 return TRUE;
177         }
178         return FALSE;
179 }