x86: cleans up syscalls, fixes pop_ros_tf() (XCC)
authorBarret Rhoden <brho@cs.berkeley.edu>
Fri, 12 Nov 2010 04:22:25 +0000 (20:22 -0800)
committerKevin Klues <klueska@cs.berkeley.edu>
Thu, 3 Nov 2011 00:35:56 +0000 (17:35 -0700)
Rebuild your cross compiler.

Since we do async syscalls, pop_ros_tf() needed fixed for the cases
where it calls sys_self_notify() from within the asm.

Side note: I probably wouldn't have noticed this for a while had I not
been cleaning out the errno and extra registers from
__ros_arch_syscall(), which arguably wasn't a big deal.  Turns out it
was worth the time after all.

kern/arch/i686/ros/bits/syscall.h
kern/arch/i686/ros/syscall.h
user/include/i686/vcore.h

index edc4694..f073d67 100644 (file)
 #include <ros/common.h>
 #include <assert.h>
 
-// TODO: fix sysenter to take all 5 params
-static inline intreg_t __syscall_sysenter(uint16_t num, intreg_t a1,
-                                    intreg_t a2, intreg_t a3,
-                                    intreg_t a4, intreg_t a5, intreg_t* err_loc)
+static inline intreg_t __syscall_sysenter(uintreg_t a0, uintreg_t a1)
 {
-       assert(!a5);    /* sysenter doesn't handle 5 arguments yet */
-       // The kernel clobbers ecx and edx => put them in clobber list.
-       // ebx is handled specially because of a glibc register
-       // allocation problem (not enough registers).
+       /* The kernel clobbers ecx, so we save it manually. */
        intreg_t ret = 0;
-       intreg_t err = 0;
-       asm volatile (""
-                     "  pushl %%ebx;        "
-                     "  movl %5, %%ebx;     "
-                     "  pushl %%ecx;        "
+       asm volatile ("  pushl %%ecx;        "
                      "  pushl %%edx;        "
-                     "  pushl %%esi;        "
                      "  pushl %%ebp;        "
                      "  movl %%esp, %%ebp;  "
                      "  leal 1f, %%edx;     "
                      "  sysenter;           "
                      "1:                    "
                      "  popl %%ebp;         "
-                     "  movl %%esi, %1;     "
-                     "  popl %%esi;         "
                      "  popl %%edx;         "
                      "  popl %%ecx;         "
-                     "  popl %%ebx;         "
-                     : "=a" (ret),
-                       "=m" (err)
-                     : "a" (num),
-                       "S" (a1),
-                       "c" (a2),
-                       "r" (a3),
-                       "D" (a4)
+                     : "=a" (ret)
+                     : "a" (a0),
+                       "S" (a1)
                      : "cc", "memory");
-       // TODO: gut errno 
-       if(err != 0 && err_loc != NULL)
-               *err_loc = err;
        return ret;
 }
 
-static inline intreg_t __syscall_trap(uint16_t num, intreg_t a1,
-                             intreg_t a2, intreg_t a3,
-                             intreg_t a4, intreg_t a5, intreg_t* err_loc)
+static inline intreg_t __syscall_trap(uintreg_t a0, uintreg_t a1)
 {
        intreg_t ret;
-       intreg_t err;
-
-       // Generic system call: pass system call number in AX,
-       // up to five parameters in DX, CX, BX, DI, SI.
-       // Interrupt kernel with T_SYSCALL.
-       //
-       // The "volatile" tells the assembler not to optimize
-       // this instruction away just because we don't use the
-       // return value.
-       //
-       // The last clause tells the assembler that this can
-       // potentially change the condition codes and arbitrary
-       // memory locations.
 
        /* If you change this, change pop_ros_tf() */
-       asm volatile(""
-                    " int %2"
-                    : "=a" (ret),
-                      "=S" (err)
+       asm volatile("int %1"
+                    : "=a" (ret)
                     : "i" (T_SYSCALL),
-                      "a" (num),
-                      "d" (a1),
-                      "c" (a2),
-                      "b" (a3),
-                      "D" (a4),
-                      "S" (a5)
+                      "a" (a0),
+                      "d" (a1)
                     : "cc", "memory");
-       // TODO: gut errno 
-       if(err != 0 && err_loc != NULL)
-               *err_loc = err;
        return ret;
 }
 
index 99243a4..ed01c31 100644 (file)
@@ -7,14 +7,13 @@
 
 #include <ros/arch/bits/syscall.h>
 
-/* Traditional interface, though this should only be used for *syscalls */
-static inline long __ros_arch_syscall(long _num, long _a0, long _a1, long _a2,
-                                      long _a3, long _a4)
+static inline long __ros_arch_syscall(long _a0, long _a1, long _a2, long _a3,
+                                      long _a4, long _a5)
 {
        #ifdef __CONFIG_SYSCALL_TRAP__
-               return __syscall_trap(_num, _a0, _a1, _a2, _a3, _a4, 0);
+               return __syscall_trap(_a0, _a1);
        #else
-               return __syscall_sysenter(_num, _a0, _a1, _a2, _a3, _a4, 0);
+               return __syscall_sysenter(_a0, _a1);
        #endif
 }
 
index 54fd7ed..5ccbbe7 100644 (file)
@@ -29,7 +29,7 @@ extern __thread int __vcoreid;
  * Target ESP -> |   u_thread's old stuff   |
  *               |   new eip                |
  *               |   eax save space         |
- *               |   vcoreid                |
+ *               |   &sysc (async syscall)  |
  *               |   notif_pending_loc      |
  *               |   notif_enabled_loc      |
  *
@@ -37,15 +37,21 @@ extern __thread int __vcoreid;
  * notifications, and when it gets resumed it can ultimately run the new
  * context.  Enough state is saved in the running context and stack to continue
  * running. */
+#include <stdio.h>
 static inline void pop_ros_tf(struct user_trapframe *tf, uint32_t vcoreid)
 {
+       struct syscall sysc = {0};
        struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid];
+       /* need to prep the async sysc in case we need to notify ourselves */
+       sysc.num = SYS_self_notify;
+       sysc.arg0 = vcoreid;    /* arg1 and 2 already = 0 (null notif, no u_ne) */
        if (!tf->tf_cs) { /* sysenter TF.  esp and eip are in other regs. */
                tf->tf_esp = tf->tf_regs.reg_ebp;
                tf->tf_eip = tf->tf_regs.reg_edx;
        }
+
        asm volatile ("movl %2,-0x04(%1);    " /* push the PC */
-                     "movl %3,-0x0c(%1);    " /* room for eax, push vcoreid */
+                     "movl %3,-0x0c(%1);    " /* room for eax, push &sysc */
                      "movl %4,-0x10(%1);    " /* push notif_pending loc */
                      "movl %5,-0x14(%1);    " /* push notif_enabled loc */
                      "movl %0,%%esp;        " /* pop the real tf */
@@ -64,30 +70,21 @@ static inline void pop_ros_tf(struct user_trapframe *tf, uint32_t vcoreid)
                      "jz 1f;                " /* if not pending, skip syscall */
                      "popfl;                " /* restore eflags */
                      "movb $0x00,(%%eax);   " /* clear pending */
-                     "pushl %%edx;          " /* save edx, syscall arg1 */
-                     "pushl %%ecx;          " /* save ecx, syscall arg2 */
-                     "pushl %%ebx;          " /* save ebx, syscall arg3 */
-                     "pushl %%esi;          " /* will be clobbered for errno */
-                     "addl $0x10,%%esp;     " /* move back over the 4 push's */
-                     "popl %%edx;           " /* vcoreid, arg1 */
-                     "subl $0x14,%%esp;     " /* jump back to after the 4 push's */
-                     "movl $0x0,%%ecx;      " /* send the null notif, arg2 */
-                     "movl $0x0,%%ebx;      " /* no u_ne message, arg3 */
-                     "movl %6,%%eax;        " /* syscall num */
-                     "int %7;               " /* fire the syscall */
-                     "popl %%esi;           " /* restore regs after syscall */
-                     "popl %%ebx;           "
-                     "popl %%ecx;           "
-                     "popl %%edx;           "
+                                 /* Actual syscall.  Note we don't wait on the async call */
+                     "popl %%eax;           " /* &sysc, trap arg0 */
+                     "pushl %%edx;          " /* save edx, will be trap arg1 */
+                     "movl $0x1,%%edx;      " /* sending one async syscall: arg1 */
+                     "int %6;               " /* fire the syscall */
+                     "popl %%edx;           " /* restore regs after syscall */
                      "jmp 2f;               " /* skip 1:, already popped */
-                     "1: popfl;             " /* restore eflags */
-                     "2: popl %%eax;        " /* discard vcoreid */
-                     "popl %%eax;           " /* restore tf's %eax */
+                     "1: popfl;             " /* restore eflags (on non-sc path) */
+                                 "popl %%eax;           " /* discard &sysc (on non-sc path) */
+                     "2: popl %%eax;        " /* restore tf's %eax (both paths) */
                      "ret;                  " /* return to the new PC */
                      :
-                     : "g"(tf), "r"(tf->tf_esp), "r"(tf->tf_eip), "r"(vcoreid),
+                     : "g"(tf), "r"(tf->tf_esp), "r"(tf->tf_eip), "r"(&sysc),
                        "r"(&vcpd->notif_pending), "r"(&vcpd->notif_enabled),
-                       "i"(SYS_self_notify), "i"(T_SYSCALL)
+                       "i"(T_SYSCALL)
                      : "memory");
 }