akaros/user/parlib/syscall.c
<<
>>
Prefs
   1// System call stubs.
   2
   3#include <parlib/parlib.h>
   4#include <parlib/vcore.h>
   5#include <parlib/serialize.h>
   6#include <parlib/assert.h>
   7#include <parlib/stdio.h>
   8
   9int sys_proc_destroy(int pid, int exitcode)
  10{
  11        return ros_syscall(SYS_proc_destroy, pid, exitcode, 0, 0, 0, 0);
  12}
  13
  14size_t sys_getpcoreid(void)
  15{
  16         return ros_syscall(SYS_getpcoreid, 0, 0, 0, 0, 0, 0);
  17}
  18
  19int sys_null(void)
  20{
  21        return ros_syscall(SYS_null, 0, 0, 0, 0, 0, 0);
  22}
  23
  24ssize_t sys_shared_page_alloc(void** addr, pid_t p2, int p1_flags, int p2_flags)
  25{
  26        return ros_syscall(SYS_shared_page_alloc, addr, p2, p1_flags, p2_flags,
  27                           0, 0);
  28}
  29
  30ssize_t sys_shared_page_free(void* addr, pid_t p2) 
  31{
  32        return ros_syscall(SYS_shared_page_free, addr, p2, 0, 0, 0, 0);
  33}
  34
  35void sys_reboot(void)
  36{
  37        ros_syscall(SYS_reboot, 0, 0, 0, 0, 0, 0);
  38}
  39
  40void sys_yield(bool being_nice)
  41{
  42        ros_syscall(SYS_proc_yield, being_nice, 0, 0, 0, 0, 0);
  43}
  44
  45int sys_proc_create(const char *path, size_t path_l, char *const argv[],
  46                    char *const envp[], int flags)
  47{
  48        struct serialized_data *sd = serialize_argv_envp(argv, envp);
  49
  50        if (!sd) {
  51                errno = ENOMEM;
  52                return -1;
  53        }
  54        int ret = ros_syscall(SYS_proc_create, path, path_l,
  55                              sd->buf, sd->len, flags, 0);
  56        free_serialized_data(sd);
  57        return ret;
  58}
  59
  60int sys_proc_run(int pid)
  61{
  62        return ros_syscall(SYS_proc_run, pid, 0, 0, 0, 0, 0);
  63}
  64
  65void *sys_mmap(void *addr, size_t length, int prot, int flags, int fd,
  66               size_t offset)
  67{
  68        return (void*)ros_syscall(SYS_mmap, addr, length, prot, flags, fd,
  69                                  offset);
  70}
  71
  72int sys_provision(int pid, unsigned int res_type, long res_val)
  73{
  74        return ros_syscall(SYS_provision, pid, res_type, res_val, 0, 0, 0);
  75}
  76
  77int sys_notify(int pid, unsigned int ev_type, struct event_msg *u_msg)
  78{
  79        return ros_syscall(SYS_notify, pid, ev_type, u_msg, 0, 0, 0);
  80}
  81
  82int sys_self_notify(uint32_t vcoreid, unsigned int ev_type,
  83                    struct event_msg *u_msg, bool priv)
  84{
  85        return ros_syscall(SYS_self_notify, vcoreid, ev_type, u_msg, priv, 0,
  86                           0);
  87}
  88
  89int sys_send_event(struct event_queue *ev_q, struct event_msg *ev_msg,
  90                   uint32_t vcoreid)
  91{
  92        return ros_syscall(SYS_send_event, ev_q, ev_msg, vcoreid, 0, 0, 0);
  93}
  94
  95int sys_halt_core(unsigned long usec)
  96{
  97        return ros_syscall(SYS_halt_core, usec, 0, 0, 0, 0, 0);
  98}
  99
 100void *sys_init_arsc()
 101{
 102        return (void*)ros_syscall(SYS_init_arsc, 0, 0, 0, 0, 0, 0);
 103}
 104
 105int sys_block(unsigned long usec)
 106{
 107        return ros_syscall(SYS_block, usec, 0, 0, 0, 0, 0);
 108}
 109
 110/* enable_my_notif tells the kernel whether or not it is okay to turn on notifs
 111 * when our calling vcore 'yields'.  This controls whether or not the vcore will
 112 * get started from vcore_entry() or not, and whether or not remote cores need
 113 * to sys_change_vcore to preempt-recover the calling vcore.  Only set this to
 114 * FALSE if you are unable to handle starting fresh at vcore_entry().  One
 115 * example of this is in mcs_pdr_locks.
 116 *
 117 * Will return:
 118 *      0 if we successfully changed to the target vcore.
 119 *      -EBUSY if the target vcore is already mapped (a good kind of failure)
 120 *      -EAGAIN if we failed for some other reason and need to try again.  For
 121 *      example, the caller could be preempted, and we never even attempted to
 122 *      change.
 123 *      -EINVAL some userspace bug */
 124int sys_change_vcore(uint32_t vcoreid, bool enable_my_notif)
 125{
 126        /* Since we might be asking to start up on a fresh stack (if
 127         * enable_my_notif), we need to use some non-stack memory for the struct
 128         * sysc.  Our vcore could get restarted before the syscall finishes
 129         * (after unlocking the proc, before finish_sysc()), and the act of
 130         * finishing would write onto our stack.  Thus we use the per-vcore
 131         * struct. */
 132        int flags;
 133
 134        /* Sanity check.  Uthreads can call this, but only when notifs disabled.
 135         */
 136        assert(!notif_is_enabled(vcore_id()));
 137        /* Need to wait while a previous syscall is not done or locked.  Since
 138         * this should only be called from VC ctx, we'll just spin.  Should be
 139         * extremely rare.  Note flags is initialized to SC_DONE. */
 140        do {
 141                cpu_relax();
 142                flags = atomic_read(&__vcore_one_sysc.flags);
 143        } while (!(flags & SC_DONE) || flags & SC_K_LOCK);
 144        __vcore_one_sysc.num = SYS_change_vcore;
 145        __vcore_one_sysc.arg0 = vcoreid;
 146        __vcore_one_sysc.arg1 = enable_my_notif;
 147        /* keep in sync with glibc sysdeps/ros/syscall.c */
 148        __ros_arch_syscall((long)&__vcore_one_sysc, 1);
 149        /* If we returned, either we wanted to (!enable_my_notif) or we failed.
 150         * Need to wait til the sysc is finished to find out why.  Again, its
 151         * okay to just spin. */
 152        do {
 153                cpu_relax();
 154                flags = atomic_read(&__vcore_one_sysc.flags);
 155        } while (!(flags & SC_DONE) || flags & SC_K_LOCK);
 156        return __vcore_one_sysc.retval;
 157}
 158
 159int sys_change_to_m(void)
 160{
 161        return ros_syscall(SYS_change_to_m, 0, 0, 0, 0, 0, 0);
 162}
 163
 164int sys_poke_ksched(int pid, unsigned int res_type)
 165{
 166        return ros_syscall(SYS_poke_ksched, pid, res_type, 0, 0, 0, 0);
 167}
 168
 169int sys_abort_sysc(struct syscall *sysc)
 170{
 171        return ros_syscall(SYS_abort_sysc, sysc, 0, 0, 0, 0, 0);
 172}
 173
 174int sys_abort_sysc_fd(int fd)
 175{
 176        return ros_syscall(SYS_abort_sysc_fd, fd, 0, 0, 0, 0, 0);
 177}
 178
 179int sys_tap_fds(struct fd_tap_req *tap_reqs, size_t nr_reqs)
 180{
 181        return ros_syscall(SYS_tap_fds, tap_reqs, nr_reqs, 0, 0, 0, 0);
 182}
 183
 184void syscall_async(struct syscall *sysc, unsigned long num, ...)
 185{
 186        va_list args;
 187
 188        sysc->num = num;
 189        sysc->flags = 0;
 190        sysc->ev_q = 0;         /* not necessary, but good for debugging */
 191        /* This is a little dangerous, since we'll usually pull more args than
 192         * were passed in, ultimately reading gibberish off the stack. */
 193        va_start(args, num);
 194        sysc->arg0 = va_arg(args, long);
 195        sysc->arg1 = va_arg(args, long);
 196        sysc->arg2 = va_arg(args, long);
 197        sysc->arg3 = va_arg(args, long);
 198        sysc->arg4 = va_arg(args, long);
 199        sysc->arg5 = va_arg(args, long);
 200        va_end(args);
 201        __ros_arch_syscall((long)sysc, 1);
 202}
 203
 204void syscall_async_evq(struct syscall *sysc, struct event_queue *evq,
 205                       unsigned long num, ...)
 206{
 207        va_list args;
 208
 209        sysc->num = num;
 210        atomic_set(&sysc->flags, SC_UEVENT);
 211        sysc->ev_q = evq;
 212        /* This is a little dangerous, since we'll usually pull more args than
 213         * were passed in, ultimately reading gibberish off the stack. */
 214        va_start(args, num);
 215        sysc->arg0 = va_arg(args, long);
 216        sysc->arg1 = va_arg(args, long);
 217        sysc->arg2 = va_arg(args, long);
 218        sysc->arg3 = va_arg(args, long);
 219        sysc->arg4 = va_arg(args, long);
 220        sysc->arg5 = va_arg(args, long);
 221        va_end(args);
 222        __ros_arch_syscall((long)sysc, 1);
 223}
 224