3 #include <parlib/parlib.h>
4 #include <parlib/vcore.h>
5 #include <parlib/serialize.h>
6 #include <parlib/assert.h>
7 #include <parlib/stdio.h>
9 int sys_proc_destroy(int pid, int exitcode)
11 return ros_syscall(SYS_proc_destroy, pid, exitcode, 0, 0, 0, 0);
14 size_t sys_getpcoreid(void)
16 return ros_syscall(SYS_getpcoreid, 0, 0, 0, 0, 0, 0);
21 return ros_syscall(SYS_null, 0, 0, 0, 0, 0, 0);
24 ssize_t sys_shared_page_alloc(void** addr, pid_t p2,
25 int p1_flags, int p2_flags
28 return ros_syscall(SYS_shared_page_alloc, addr,
29 p2, p1_flags, p2_flags, 0, 0);
32 ssize_t sys_shared_page_free(void* addr, pid_t p2)
34 return ros_syscall(SYS_shared_page_free, addr, p2, 0, 0, 0, 0);
39 ros_syscall(SYS_reboot, 0, 0, 0, 0, 0, 0);
42 void sys_yield(bool being_nice)
44 ros_syscall(SYS_proc_yield, being_nice, 0, 0, 0, 0, 0);
47 int sys_proc_create(const char *path, size_t path_l, char *const argv[],
48 char *const envp[], int flags)
50 struct serialized_data *sd = serialize_argv_envp(argv, envp);
55 int ret = ros_syscall(SYS_proc_create, path, path_l,
56 sd->buf, sd->len, flags, 0);
57 free_serialized_data(sd);
61 int sys_proc_run(int pid)
63 return ros_syscall(SYS_proc_run, pid, 0, 0, 0, 0, 0);
66 void *sys_mmap(void *addr, size_t length, int prot, int flags,
67 int fd, size_t offset)
69 return (void*)ros_syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
72 int sys_provision(int pid, unsigned int res_type, long res_val)
74 return ros_syscall(SYS_provision, pid, res_type, res_val, 0, 0, 0);
77 int sys_notify(int pid, unsigned int ev_type, struct event_msg *u_msg)
79 return ros_syscall(SYS_notify, pid, ev_type, u_msg, 0, 0, 0);
82 int sys_self_notify(uint32_t vcoreid, unsigned int ev_type,
83 struct event_msg *u_msg, bool priv)
85 return ros_syscall(SYS_self_notify, vcoreid, ev_type, u_msg, priv, 0, 0);
88 int sys_send_event(struct event_queue *ev_q, struct event_msg *ev_msg,
91 return ros_syscall(SYS_send_event, ev_q, ev_msg, vcoreid, 0, 0, 0);
94 int sys_halt_core(unsigned long usec)
96 return ros_syscall(SYS_halt_core, usec, 0, 0, 0, 0, 0);
101 return (void*)ros_syscall(SYS_init_arsc, 0, 0, 0, 0, 0, 0);
104 int sys_block(unsigned long usec)
106 return ros_syscall(SYS_block, usec, 0, 0, 0, 0, 0);
109 /* enable_my_notif tells the kernel whether or not it is okay to turn on notifs
110 * when our calling vcore 'yields'. This controls whether or not the vcore will
111 * get started from vcore_entry() or not, and whether or not remote cores need
112 * to sys_change_vcore to preempt-recover the calling vcore. Only set this to
113 * FALSE if you are unable to handle starting fresh at vcore_entry(). One
114 * example of this is in mcs_pdr_locks.
117 * 0 if we successfully changed to the target vcore.
118 * -EBUSY if the target vcore is already mapped (a good kind of failure)
119 * -EAGAIN if we failed for some other reason and need to try again. For
120 * example, the caller could be preempted, and we never even attempted to
122 * -EINVAL some userspace bug */
123 int sys_change_vcore(uint32_t vcoreid, bool enable_my_notif)
125 /* Since we might be asking to start up on a fresh stack (if
126 * enable_my_notif), we need to use some non-stack memory for the struct
127 * sysc. Our vcore could get restarted before the syscall finishes (after
128 * unlocking the proc, before finish_sysc()), and the act of finishing would
129 * write onto our stack. Thus we use the per-vcore struct. */
132 /* Sanity check. Uthreads can call this, but only when notifs disabled. */
133 assert(!notif_is_enabled(vcore_id()));
134 /* Need to wait while a previous syscall is not done or locked. Since this
135 * should only be called from VC ctx, we'll just spin. Should be extremely
136 * rare. Note flags is initialized to SC_DONE. */
139 flags = atomic_read(&__vcore_one_sysc.flags);
140 } while (!(flags & SC_DONE) || flags & SC_K_LOCK);
141 __vcore_one_sysc.num = SYS_change_vcore;
142 __vcore_one_sysc.arg0 = vcoreid;
143 __vcore_one_sysc.arg1 = enable_my_notif;
144 /* keep in sync with glibc sysdeps/ros/syscall.c */
145 __ros_arch_syscall((long)&__vcore_one_sysc, 1);
146 /* If we returned, either we wanted to (!enable_my_notif) or we failed.
147 * Need to wait til the sysc is finished to find out why. Again, its okay
151 flags = atomic_read(&__vcore_one_sysc.flags);
152 } while (!(flags & SC_DONE) || flags & SC_K_LOCK);
153 return __vcore_one_sysc.retval;
156 int sys_change_to_m(void)
158 return ros_syscall(SYS_change_to_m, 0, 0, 0, 0, 0, 0);
161 int sys_poke_ksched(int pid, unsigned int res_type)
163 return ros_syscall(SYS_poke_ksched, pid, res_type, 0, 0, 0, 0);
166 int sys_abort_sysc(struct syscall *sysc)
168 return ros_syscall(SYS_abort_sysc, sysc, 0, 0, 0, 0, 0);
171 int sys_abort_sysc_fd(int fd)
173 return ros_syscall(SYS_abort_sysc_fd, fd, 0, 0, 0, 0, 0);
176 int sys_tap_fds(struct fd_tap_req *tap_reqs, size_t nr_reqs)
178 return ros_syscall(SYS_tap_fds, tap_reqs, nr_reqs, 0, 0, 0, 0);
181 void syscall_async(struct syscall *sysc, unsigned long num, ...)
187 sysc->ev_q = 0; /* not necessary, but good for debugging */
188 /* This is a little dangerous, since we'll usually pull more args than were
189 * passed in, ultimately reading gibberish off the stack. */
191 sysc->arg0 = va_arg(args, long);
192 sysc->arg1 = va_arg(args, long);
193 sysc->arg2 = va_arg(args, long);
194 sysc->arg3 = va_arg(args, long);
195 sysc->arg4 = va_arg(args, long);
196 sysc->arg5 = va_arg(args, long);
198 __ros_arch_syscall((long)sysc, 1);
201 void syscall_async_evq(struct syscall *sysc, struct event_queue *evq,
202 unsigned long num, ...)
207 atomic_set(&sysc->flags, SC_UEVENT);
209 /* This is a little dangerous, since we'll usually pull more args than were
210 * passed in, ultimately reading gibberish off the stack. */
212 sysc->arg0 = va_arg(args, long);
213 sysc->arg1 = va_arg(args, long);
214 sysc->arg2 = va_arg(args, long);
215 sysc->arg3 = va_arg(args, long);
216 sysc->arg4 = va_arg(args, long);
217 sysc->arg5 = va_arg(args, long);
219 __ros_arch_syscall((long)sysc, 1);