cons: Support epolling /dev/null
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <stdlib.h>
18
19 static void thread0_sched_entry(void);
20 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
21 static void thread0_thread_refl_fault(struct uthread *uth,
22                                       struct user_context *ctx);
23 static void thread0_thread_runnable(struct uthread *uth);
24 static void thread0_thread_has_blocked(struct uthread *uth, int flags);
25 static uth_mutex_t thread0_mtx_alloc(void);
26 static void thread0_mtx_free(uth_mutex_t m);
27 static void thread0_mtx_lock(uth_mutex_t m);
28 static void thread0_mtx_unlock(uth_mutex_t m);
29
30 /* externed into uthread.c */
31 struct schedule_ops thread0_2ls_ops = {
32         .sched_entry = thread0_sched_entry,
33         .thread_blockon_sysc = thread0_thread_blockon_sysc,
34         .thread_refl_fault = thread0_thread_refl_fault,
35         .thread_runnable = thread0_thread_runnable,
36         .thread_paused = thread0_thread_runnable,
37         .thread_has_blocked = thread0_thread_has_blocked,
38         .mutex_alloc = thread0_mtx_alloc,
39         .mutex_free = thread0_mtx_free,
40         .mutex_lock = thread0_mtx_lock,
41         .mutex_unlock = thread0_mtx_unlock,
42 };
43
44 /* externed into uthread.c */
45 struct uthread *thread0_uth;
46
47 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
48  * don't actually attach this mgmt info to it.  But since we just have one
49  * thread, it doesn't matter. */
50 struct thread0_info {
51         bool                                            is_blocked;
52 };
53 static struct thread0_info thread0_info;
54 static struct event_queue *sysc_evq;
55
56 void thread0_handle_syscall(struct event_msg *ev_msg,
57                             unsigned int ev_type, void *data)
58 {
59         thread0_info.is_blocked = FALSE;
60 }
61
62 void thread0_lib_init(void)
63 {
64         memset(&thread0_info, 0, sizeof(thread0_info));
65         /* we don't care about the message, so don't bother with a UCQ */
66         sysc_evq = get_eventq(EV_MBOX_BITMAP);
67         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
68 }
69
70 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
71 static void thread0_sched_entry(void)
72 {
73         /* TODO: support signal handling whenever we run a uthread */
74         if (current_uthread) {
75                 uthread_prep_pending_signals(current_uthread);
76                 run_current_uthread();
77                 assert(0);
78         }
79         while (1) {
80                 if (!thread0_info.is_blocked) {
81                         uthread_prep_pending_signals(thread0_uth);
82                         run_uthread(thread0_uth);
83                         assert(0);
84                 }
85                 sys_yield(FALSE);
86                 handle_events(0);
87         }
88 }
89
90 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
91 {
92         struct syscall *sysc = (struct syscall*)arg;
93         thread0_thread_has_blocked(uthread, 0);
94         if (!register_evq(sysc, sysc_evq))
95                 thread0_thread_runnable(uthread);
96 }
97
98 static void refl_error(struct uthread *uth, unsigned int trap_nr,
99                        unsigned int err, unsigned long aux)
100 {
101         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
102                trap_nr, err, aux);
103         /* Note that uthread.c already copied out our ctx into the uth
104          * struct */
105         print_user_context(&uth->u_ctx);
106         printf("Turn on printx to spew unhandled, malignant trap info\n");
107         exit(-1);
108 }
109
110 static bool handle_page_fault(struct uthread *uth, unsigned int err,
111                               unsigned long aux)
112 {
113         if (!(err & PF_VMR_BACKED))
114                 return FALSE;
115         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
116         __block_uthread_on_async_sysc(uth);
117         return TRUE;
118 }
119
120 static void thread0_thread_refl_fault(struct uthread *uth,
121                                       struct user_context *ctx)
122 {
123         unsigned int trap_nr = __arch_refl_get_nr(ctx);
124         unsigned int err = __arch_refl_get_err(ctx);
125         unsigned long aux = __arch_refl_get_aux(ctx);
126
127         assert(ctx->type == ROS_HW_CTX);
128         switch (trap_nr) {
129         case HW_TRAP_PAGE_FAULT:
130                 if (!handle_page_fault(uth, err, aux))
131                         refl_error(uth, trap_nr, err, aux);
132                 break;
133         default:
134                 refl_error(uth, trap_nr, err, aux);
135         }
136 }
137
138 static void thread0_thread_runnable(struct uthread *uth)
139 {
140         thread0_info.is_blocked = FALSE;
141 }
142
143 static void thread0_thread_has_blocked(struct uthread *uth, int flags)
144 {
145         thread0_info.is_blocked = TRUE;
146 }
147
148 /* We only have one thread, so we don't *need* mutexes.  But we'll use a bool to
149  * catch code that could deadlock itself. */
150 static uth_mutex_t thread0_mtx_alloc(void)
151 {
152         bool *mtx = malloc(sizeof(bool));
153
154         assert(mtx);
155         *mtx = FALSE;
156         return (uth_mutex_t)mtx;
157 }
158
159 static void thread0_mtx_free(uth_mutex_t m)
160 {
161         bool *mtx = (bool*)m;
162
163         assert(*mtx == FALSE);
164         free((void*)m);
165 }
166
167 static void thread0_mtx_lock(uth_mutex_t m)
168 {
169         bool *mtx = (bool*)m;
170
171         assert(*mtx == FALSE);
172         *mtx = TRUE;
173 }
174
175 static void thread0_mtx_unlock(uth_mutex_t m)
176 {
177         bool *mtx = (bool*)m;
178
179         assert(*mtx == TRUE);
180         *mtx = FALSE;
181 }