vmm: refactor userspace's emsr_fakewrite()
[akaros.git] / tools / compilers / gcc-glibc / glibc-2.19-akaros / sysdeps / akaros / parlib-compat.c
1 /* Copyright (c) 2015 Google Inc.
2  * Kevin Klues <klueska@cs.berkeley.edu>
3  * See LICENSE for details.
4  */
5
6 #include <libc-symbols.h>
7 #include <ros/common.h>
8 #include <ros/trapframe.h>
9 #include <ros/syscall.h>
10 #include <parlib/stdio.h>
11 #include <parlib/assert.h>
12 #include <parlib/spinlock.h>
13 #include <parlib/timing.h>
14 #include <parlib/uthread.h>
15 #include <parlib/dtls.h>
16 #include <stdbool.h>
17
18 /* Here we define functions and variables that are really defined in parlib, but
19  * we need them in libc in order to link it. We weak alias them here so that the
20  * parlib definitions will override them later.
21  *
22  * Unfortunately, this trick only works so long as we leave parlib as a static
23  * library. If we ever decide to make parlib a .so, then we will have to revisit
24  * this and use function pointers at runtime or something similar.
25  *
26  * This also doesn't work for ld.so, which doesn't link against parlib.  That's
27  * probably a good thing (uthread constructors would be a mess for ld, I bet).
28  * But that does mean that these stubs need to actually do something for
29  * functions that ld.so calls.
30  *
31  * Also, be careful and paranoid.  If you change or add functions in here,
32  * recompile apps that link against libc - even if they link dynamically.
33  * Otherwise, when they linked with libc.so, *libc itself* (not the actual
34  * program) would not find the parlib functions - it would still use these
35  * functions.  I don't have a good explanation for it, but that's what seemed to
36  * happen.
37  *
38  * For an example, if you write(2, "foo\n", 4) on every lock acquisition, you'll
39  * see one foo per process, which I think comes from ld (back when it used
40  * spin_pdr locks for the rtld lock).  Later functions that call spin_pdr_lock,
41  * whether from the app, parlib, or libc, do not output foo.  This is not the
42  * case if the application was not rebuilt before this change (e.g. bash, ssh,
43  * etc). */
44
45 __thread int __weak_vcoreid = 0;
46 extern __thread int __vcoreid __attribute__ ((weak, alias ("__weak_vcoreid")));
47
48 __thread bool __weak_vcore_context = FALSE;
49 extern __thread bool __vcore_context
50        __attribute__ ((weak, alias ("__weak_vcore_context")));
51
52 int __akaros_printf(const char *format, ...)
53 {
54         assert(0);
55         return -1;
56 }
57 weak_alias(__akaros_printf, akaros_printf)
58
59 int __trace_printf(const char *format, ...)
60 {
61         assert(0);
62         return -1;
63 }
64 weak_alias(__trace_printf, trace_printf)
65
66 void __print_user_context(struct user_context *ctx)
67 {
68         assert(0);
69 }
70 weak_alias(__print_user_context, print_user_context)
71
72 void ___assert_failed(const char *file, int line, const char *msg)
73 {
74         breakpoint();
75         abort();
76 }
77 weak_alias(___assert_failed, _assert_failed)
78
79 uint64_t __nsec2tsc(uint64_t nsec)
80 {
81         assert(0);
82 }
83 weak_alias(__nsec2tsc, nsec2tsc)
84
85 uint64_t __tsc2nsec(uint64_t tsc_time)
86 {
87         assert(0);
88 }
89 weak_alias(__tsc2nsec, tsc2nsec)
90
91 void __spin_pdr_init(struct spin_pdr_lock *pdr_lock)
92 {
93         assert(0);
94 }
95 weak_alias(__spin_pdr_init, spin_pdr_init)
96
97 bool __spin_pdr_trylock(struct spin_pdr_lock *pdr_lock)
98 {
99         assert(0);
100 }
101 weak_alias(__spin_pdr_trylock, spin_pdr_trylock)
102
103 void __spin_pdr_lock(struct spin_pdr_lock *pdr_lock)
104 {
105         assert(0);
106 }
107 weak_alias(__spin_pdr_lock, spin_pdr_lock)
108
109 void __spin_pdr_unlock(struct spin_pdr_lock *pdr_lock)
110 {
111         assert(0);
112 }
113 weak_alias(__spin_pdr_unlock, spin_pdr_unlock)
114
115 void __cpu_relax_vc(uint32_t vcoreid)
116 {
117         cpu_relax();
118 }
119 weak_alias(__cpu_relax_vc, cpu_relax_vc)
120
121 void __uthread_sched_yield(void)
122 {
123         /* In the off-chance we're called before parlib is available, we'll do
124          * the single-threaded, SCP yield. */
125         ros_syscall(SYS_proc_yield, TRUE, 0, 0, 0, 0, 0);
126 }
127 weak_alias(__uthread_sched_yield, uthread_sched_yield)
128
129 void __uth_mutex_init(uth_mutex_t *m)
130 {
131         assert(0);
132 }
133 weak_alias(__uth_mutex_init, uth_mutex_init)
134
135 void __uth_mutex_destroy(uth_mutex_t *m)
136 {
137         assert(0);
138 }
139 weak_alias(__uth_mutex_destroy, uth_mutex_destroy)
140
141 void __uth_mutex_lock(uth_mutex_t *m)
142 {
143         assert(0);
144 }
145 weak_alias(__uth_mutex_lock, uth_mutex_lock)
146
147 bool __uth_mutex_trylock(uth_mutex_t *m)
148 {
149         assert(0);
150 }
151 weak_alias(__uth_mutex_trylock, uth_mutex_trylock)
152
153 void __uth_mutex_unlock(uth_mutex_t *m)
154 {
155         assert(0);
156 }
157 weak_alias(__uth_mutex_unlock, uth_mutex_unlock)
158
159 void __uth_recurse_mutex_init(uth_recurse_mutex_t *r_m)
160 {
161         assert(0);
162 }
163 weak_alias(__uth_recurse_mutex_init, uth_recurse_mutex_init)
164
165 void __uth_recurse_mutex_destroy(uth_recurse_mutex_t *r_m)
166 {
167         assert(0);
168 }
169 weak_alias(__uth_recurse_mutex_destroy, uth_recurse_mutex_destroy)
170
171 void __uth_recurse_mutex_lock(uth_recurse_mutex_t *r_m)
172 {
173         assert(0);
174 }
175 weak_alias(__uth_recurse_mutex_lock, uth_recurse_mutex_lock)
176
177 bool __uth_recurse_mutex_trylock(uth_recurse_mutex_t *r_m)
178 {
179         assert(0);
180 }
181 weak_alias(__uth_recurse_mutex_trylock, uth_recurse_mutex_trylock)
182
183 void __uth_recurse_mutex_unlock(uth_recurse_mutex_t *r_m)
184 {
185         assert(0);
186 }
187 weak_alias(__uth_recurse_mutex_unlock, uth_recurse_mutex_unlock)
188
189 void __uth_rwlock_init(uth_rwlock_t *rwl)
190 {
191         assert(0);
192 }
193 weak_alias(__uth_rwlock_init, uth_rwlock_init)
194
195 void __uth_rwlock_destroy(uth_rwlock_t *rwl)
196 {
197         assert(0);
198 }
199 weak_alias(__uth_rwlock_destroy, uth_rwlock_destroy)
200
201 void __uth_rwlock_rdlock(uth_rwlock_t *rwl)
202 {
203         assert(0);
204 }
205 weak_alias(__uth_rwlock_rdlock, uth_rwlock_rdlock)
206
207 bool __uth_rwlock_try_rdlock(uth_rwlock_t *rwl)
208 {
209         assert(0);
210 }
211 weak_alias(__uth_rwlock_try_rdlock, uth_rwlock_try_rdlock)
212
213 void __uth_rwlock_wrlock(uth_rwlock_t *rwl)
214 {
215         assert(0);
216 }
217 weak_alias(__uth_rwlock_wrlock, uth_rwlock_wrlock)
218
219 bool __uth_rwlock_try_wrlock(uth_rwlock_t *rwl)
220 {
221         assert(0);
222 }
223 weak_alias(__uth_rwlock_try_wrlock, uth_rwlock_try_wrlock)
224
225 void __uth_rwlock_unlock(uth_rwlock_t *rwl)
226 {
227         assert(0);
228 }
229 weak_alias(__uth_rwlock_unlock, uth_rwlock_unlock)
230
231 dtls_key_t __dtls_key_create(dtls_dtor_t dtor)
232 {
233         assert(0);
234 }
235 weak_alias(__dtls_key_create, dtls_key_create)
236
237 void __set_dtls(dtls_key_t key, const void *dtls)
238 {
239         assert(0);
240 }
241 weak_alias(__set_dtls, set_dtls)
242
243 void *__get_dtls(dtls_key_t key)
244 {
245         assert(0);
246 }
247 weak_alias(__get_dtls, get_dtls)