Have #ip's protocol ctl()s throw errors
[akaros.git] / kern / include / kthread.h
index 11b6c75..0b9f4bf 100644 (file)
@@ -6,13 +6,17 @@
  * reason, usually during blocking IO operations.  Check out
  * Documentation/kthreads.txt for more info than you care about. */
 
-#ifndef ROS_KERN_KTHREAD_H
-#define ROS_KERN_KTHREAD_H
+#pragma once
 
 #include <ros/common.h>
 #include <trap.h>
 #include <sys/queue.h>
 #include <atomic.h>
+#include <setjmp.h>
+
+struct errbuf {
+       struct jmpbuf jmpbuf;
+};
 
 struct proc;
 struct kthread;
@@ -20,19 +24,29 @@ struct semaphore;
 TAILQ_HEAD(kthread_tailq, kthread);
 TAILQ_HEAD(semaphore_tailq, semaphore);
 
+#define GENBUF_SZ 128  /* plan9 uses this as a scratch space, per syscall */
+
+#define KTH_IS_KTASK                   (1 << 0)
+#define KTH_SAVE_ADDR_SPACE            (1 << 1)
+#define KTH_KTASK_FLAGS                        (KTH_IS_KTASK)
+#define KTH_DEFAULT_FLAGS              (KTH_SAVE_ADDR_SPACE)
+
 /* This captures the essence of a kernel context that we want to suspend.  When
  * a kthread is running, we make sure its stacktop is the default kernel stack,
  * meaning it will receive the interrupts from userspace. */
 struct kthread {
-       struct kernel_ctx                       context;
+       struct jmpbuf                           context;
        uintptr_t                                       stacktop;
        struct proc                                     *proc;
        struct syscall                          *sysc;
-       void                                            *errbuf;        /* TODO: avoiding include loops */
+       struct errbuf                           *errbuf;
        TAILQ_ENTRY(kthread)            link;
        /* ID, other shit, etc */
-       bool                                            is_ktask;       /* default is FALSE */
+       int                                                     flags;
        char                                            *name;
+       char                                            generic_buf[GENBUF_SZ];
+       struct systrace_record          *trace;
+       struct systrace_record          *strace;
 };
 
 /* Semaphore for kthreads to sleep on.  0 or less means you need to sleep */
@@ -50,6 +64,23 @@ struct semaphore {
 #endif
 };
 
+/* omitted elements (the sem debug stuff) are initialized to 0 */
+#define SEMAPHORE_INITIALIZER(name, n)                                         \
+{                                                                              \
+    .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
+       .nr_signals = (n),                                                         \
+    .lock       = SPINLOCK_INITIALIZER,                                        \
+    .irq_okay   = FALSE,                                                       \
+}
+
+#define SEMAPHORE_INITIALIZER_IRQSAVE(name, n)                                 \
+{                                                                              \
+    .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
+       .nr_signals = (n),                                                         \
+    .lock       = SPINLOCK_INITIALIZER_IRQSAVE,                                \
+    .irq_okay   = TRUE,                                                        \
+}
+
 struct cond_var {
        struct semaphore                        sem;
        spinlock_t                                      *lock;          /* usually points to internal_ */
@@ -58,6 +89,17 @@ struct cond_var {
        bool                                            irq_okay;
 };
 
+struct cv_lookup_elm {
+       TAILQ_ENTRY(cv_lookup_elm)      link;
+       TAILQ_ENTRY(cv_lookup_elm)      abortall_link;          /* only used in abort_all */
+       struct cond_var                         *cv;
+       struct kthread                          *kthread;
+       struct syscall                          *sysc;
+       struct proc                                     *proc;
+       atomic_t                                        abort_in_progress;      /* 0 = no */
+};
+TAILQ_HEAD(cv_lookup_tailq, cv_lookup_elm);
+
 uintptr_t get_kstack(void);
 void put_kstack(uintptr_t stacktop);
 uintptr_t *kstack_bottom_addr(uintptr_t stacktop);
@@ -66,7 +108,14 @@ struct kthread *__kthread_zalloc(void);
 void restart_kthread(struct kthread *kthread);
 void kthread_runnable(struct kthread *kthread);
 void kthread_yield(void);
+void kthread_usleep(uint64_t usec);
 void ktask(char *name, void (*fn)(void*), void *arg);
+
+static inline bool is_ktask(struct kthread *kthread)
+{
+       return kthread->flags & KTH_IS_KTASK;
+}
+
 /* Debugging */
 void check_poison(char *msg);
 
@@ -98,4 +147,21 @@ void cv_broadcast(struct cond_var *cv);
 void cv_signal_irqsave(struct cond_var *cv, int8_t *irq_state);
 void cv_broadcast_irqsave(struct cond_var *cv, int8_t *irq_state);
 
-#endif /* ROS_KERN_KTHREAD_H */
+bool abort_sysc(struct proc *p, struct syscall *sysc);
+void abort_all_sysc(struct proc *p);
+int abort_all_sysc_fd(struct proc *p, int fd);
+void __reg_abortable_cv(struct cv_lookup_elm *cle, struct cond_var *cv);
+void dereg_abortable_cv(struct cv_lookup_elm *cle);
+bool should_abort(struct cv_lookup_elm *cle);
+
+uintptr_t switch_to_ktask(void);
+void switch_back_from_ktask(uintptr_t old_ret);
+
+/* qlocks are plan9's binary sempahore, which are wrappers around our sems.
+ * Not sure if they'll need irqsave or normal sems. */
+typedef struct semaphore qlock_t;
+#define qlock_init(x) sem_init((x), 1)
+#define qlock(x) sem_down(x)
+#define qunlock(x) sem_up(x)
+#define canqlock(x) sem_trydown(x)
+#define QLOCK_INITIALIZER(name) SEMAPHORE_INITIALIZER(name, 1)