perf: Treat the kernel like [kernel.kallsyms]
[akaros.git] / user / parlib / thread0_sched.c
1 /* Copyright (c) 2015 Google, Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * thread0_sched: a basic scheduler for thread0, used by SCPs without a
6  * multithreaded 2LS linked in.
7  *
8  * This is closely coupled with uthread.c */
9
10 #include <ros/arch/membar.h>
11 #include <parlib/arch/atomic.h>
12 #include <parlib/parlib.h>
13 #include <parlib/vcore.h>
14 #include <parlib/uthread.h>
15 #include <parlib/event.h>
16 #include <parlib/arch/trap.h>
17 #include <stdlib.h>
18
19 static void thread0_sched_entry(void);
20 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *sysc);
21 static void thread0_thread_refl_fault(struct uthread *uth,
22                                       struct user_context *ctx);
23 static void thread0_thread_runnable(struct uthread *uth);
24 static void thread0_thread_has_blocked(struct uthread *uth, int flags);
25 static uth_mutex_t thread0_mtx_alloc(void);
26 static void thread0_mtx_free(uth_mutex_t m);
27 static void thread0_mtx_lock(uth_mutex_t m);
28 static void thread0_mtx_unlock(uth_mutex_t m);
29
30 /* externed into uthread.c */
31 struct schedule_ops thread0_2ls_ops = {
32         .sched_entry = thread0_sched_entry,
33         .thread_blockon_sysc = thread0_thread_blockon_sysc,
34         .thread_refl_fault = thread0_thread_refl_fault,
35         .thread_runnable = thread0_thread_runnable,
36         .thread_paused = thread0_thread_runnable,
37         .thread_has_blocked = thread0_thread_has_blocked,
38         .mutex_alloc = thread0_mtx_alloc,
39         .mutex_free = thread0_mtx_free,
40         .mutex_lock = thread0_mtx_lock,
41         .mutex_unlock = thread0_mtx_unlock,
42 };
43
44 /* externed into uthread.c */
45 struct uthread *thread0_uth;
46
47 /* Our thread0 is actually allocated in uthread as just a struct uthread, so we
48  * don't actually attach this mgmt info to it.  But since we just have one
49  * thread, it doesn't matter. */
50 struct thread0_info {
51         bool                                            is_blocked;
52 };
53 static struct thread0_info thread0_info;
54 static struct event_queue *sysc_evq;
55
56 static void thread0_handle_syscall(struct event_msg *ev_msg,
57                                    unsigned int ev_type, void *data)
58 {
59         thread0_info.is_blocked = FALSE;
60 }
61
62 void thread0_lib_init(void)
63 {
64         memset(&thread0_info, 0, sizeof(thread0_info));
65         /* we don't care about the message, so don't bother with a UCQ */
66         sysc_evq = get_eventq(EV_MBOX_BITMAP);
67         sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP;
68         register_ev_handler(EV_SYSCALL, thread0_handle_syscall, 0);
69 }
70
71 /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */
72 static void thread0_sched_entry(void)
73 {
74         /* TODO: support signal handling whenever we run a uthread */
75         if (current_uthread) {
76                 uthread_prep_pending_signals(current_uthread);
77                 run_current_uthread();
78                 assert(0);
79         }
80         while (1) {
81                 if (!thread0_info.is_blocked) {
82                         uthread_prep_pending_signals(thread0_uth);
83                         run_uthread(thread0_uth);
84                         assert(0);
85                 }
86                 sys_yield(FALSE);
87                 handle_events(0);
88         }
89 }
90
91 static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg)
92 {
93         struct syscall *sysc = (struct syscall*)arg;
94         thread0_thread_has_blocked(uthread, 0);
95         if (!register_evq(sysc, sysc_evq))
96                 thread0_thread_runnable(uthread);
97 }
98
99 static void refl_error(struct uthread *uth, unsigned int trap_nr,
100                        unsigned int err, unsigned long aux)
101 {
102         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
103                trap_nr, err, aux);
104         /* Note that uthread.c already copied out our ctx into the uth
105          * struct */
106         print_user_context(&uth->u_ctx);
107         printf("Turn on printx to spew unhandled, malignant trap info\n");
108         exit(-1);
109 }
110
111 static bool handle_page_fault(struct uthread *uth, unsigned int err,
112                               unsigned long aux)
113 {
114         if (!(err & PF_VMR_BACKED))
115                 return FALSE;
116         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
117         __block_uthread_on_async_sysc(uth);
118         return TRUE;
119 }
120
121 static void thread0_thread_refl_fault(struct uthread *uth,
122                                       struct user_context *ctx)
123 {
124         unsigned int trap_nr = __arch_refl_get_nr(ctx);
125         unsigned int err = __arch_refl_get_err(ctx);
126         unsigned long aux = __arch_refl_get_aux(ctx);
127
128         assert(ctx->type == ROS_HW_CTX);
129         switch (trap_nr) {
130         case HW_TRAP_PAGE_FAULT:
131                 if (!handle_page_fault(uth, err, aux))
132                         refl_error(uth, trap_nr, err, aux);
133                 break;
134         default:
135                 refl_error(uth, trap_nr, err, aux);
136         }
137 }
138
139 static void thread0_thread_runnable(struct uthread *uth)
140 {
141         thread0_info.is_blocked = FALSE;
142 }
143
144 static void thread0_thread_has_blocked(struct uthread *uth, int flags)
145 {
146         thread0_info.is_blocked = TRUE;
147 }
148
149 /* We only have one thread, so we don't need mutexes */
150 static uth_mutex_t thread0_mtx_alloc(void)
151 {
152         /* Returning something non-zero, in case someone compares it to 0 */
153         return (uth_mutex_t)0x1234;
154 }
155
156 static void thread0_mtx_free(uth_mutex_t m)
157 {
158 }
159
160 static void thread0_mtx_lock(uth_mutex_t m)
161 {
162 }
163
164 static void thread0_mtx_unlock(uth_mutex_t m)
165 {
166 }