akaros/kern/include/kthread.h
<<
>>
Prefs
   1/* Copyright (c) 2010-13 The Regents of the University of California
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * See LICENSE for details.
   4 *
   5 * Kernel threading.  These are for blocking within the kernel for whatever
   6 * reason, usually during blocking IO operations.  Check out
   7 * Documentation/kthreads.txt for more info than you care about. */
   8
   9#pragma once
  10
  11#include <ros/common.h>
  12#include <ros/syscall.h>
  13#include <sys/queue.h>
  14#include <atomic.h>
  15#include <setjmp.h>
  16
  17struct errbuf {
  18        struct jmpbuf jmpbuf;
  19};
  20
  21struct proc;
  22struct kthread;
  23struct kth_db_info;
  24TAILQ_HEAD(kthread_tailq, kthread);
  25TAILQ_HEAD(kth_db_tailq, kth_db_info);
  26
  27#define GENBUF_SZ 128   /* plan9 uses this as a scratch space, per syscall */
  28
  29#define KTH_IS_KTASK                    (1 << 0)
  30#define KTH_SAVE_ADDR_SPACE             (1 << 1)
  31#define KTH_IS_RCU_KTASK                (1 << 2)
  32
  33/* These flag sets are for toggling between ktasks and default/process ktasks */
  34/* These are the flags for *any* ktask */
  35#define KTH_KTASK_FLAGS                 (KTH_IS_KTASK)
  36/* These are the flags used for normal process context */
  37#define KTH_DEFAULT_FLAGS               (KTH_SAVE_ADDR_SPACE)
  38
  39/* This captures the essence of a kernel context that we want to suspend.  When
  40 * a kthread is running, we make sure its stacktop is the default kernel stack,
  41 * meaning it will receive the interrupts from userspace. */
  42struct kthread {
  43        struct jmpbuf                   context;
  44        uintptr_t                       stacktop;
  45        struct proc                     *proc;
  46        struct syscall                  *sysc;
  47        struct errbuf                   *errbuf;
  48        TAILQ_ENTRY(kthread)            link;
  49        /* ID, other shit, etc */
  50        int                             flags;
  51        char                            *name;
  52        char                            generic_buf[GENBUF_SZ];
  53        int                             errno;
  54        char                            errstr[MAX_ERRSTR_LEN];
  55        struct systrace_record          *strace;
  56};
  57
  58#define KTH_DB_SEM                      1
  59#define KTH_DB_CV                       2
  60
  61#ifdef CONFIG_SEMAPHORE_DEBUG
  62
  63struct kth_db_info {
  64        TAILQ_ENTRY(kth_db_info)        link;
  65        unsigned int                    type;
  66        bool                            on_list;
  67};
  68
  69#define KTH_DB_INIT .db         = { .type = KTH_DB_SEM },
  70
  71#else
  72
  73struct kth_db_info {
  74};
  75
  76#define KTH_DB_INIT
  77
  78#endif
  79
  80
  81/* Semaphore for kthreads to sleep on.  0 or less means you need to sleep */
  82struct semaphore {
  83        struct kth_db_info              db;
  84        struct kthread_tailq            waiters;
  85        int                             nr_signals;
  86        spinlock_t                      lock;
  87};
  88
  89#define SEMAPHORE_INITIALIZER(name, n)                                         \
  90{                                                                              \
  91    .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
  92    .nr_signals = (n),                                                         \
  93    .lock       = SPINLOCK_INITIALIZER,                                        \
  94    KTH_DB_INIT                                                                \
  95}
  96
  97#define SEMAPHORE_INITIALIZER_IRQSAVE(name, n)                                 \
  98{                                                                              \
  99    .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
 100    .nr_signals = (n),                                                         \
 101    .lock       = SPINLOCK_INITIALIZER_IRQSAVE,                                \
 102    KTH_DB_INIT                                                                \
 103}
 104
 105struct cond_var {
 106        struct kth_db_info              db;
 107        struct kthread_tailq            waiters;
 108        spinlock_t                      *lock;  /* usually points to internal */
 109        spinlock_t                      internal_lock;
 110        unsigned long                   nr_waiters;
 111};
 112
 113struct cv_lookup_elm {
 114        TAILQ_ENTRY(cv_lookup_elm)      link;
 115        TAILQ_ENTRY(cv_lookup_elm)      abortall_link;
 116        struct cond_var                 *cv;
 117        struct kthread                  *kthread;
 118        struct syscall                  *sysc;
 119        struct proc                     *proc;
 120        atomic_t                        abort_in_progress;      /* 0 = no */
 121};
 122TAILQ_HEAD(cv_lookup_tailq, cv_lookup_elm);
 123
 124uintptr_t get_kstack(void);
 125void put_kstack(uintptr_t stacktop);
 126uintptr_t *kstack_bottom_addr(uintptr_t stacktop);
 127void kthread_init(void);
 128struct kthread *__kthread_zalloc(void);
 129void __use_real_kstack(void (*f)(void *arg));
 130void restart_kthread(struct kthread *kthread);
 131void kthread_runnable(struct kthread *kthread);
 132void kthread_yield(void);
 133void kthread_usleep(uint64_t usec);
 134void ktask(char *name, void (*fn)(void*), void *arg);
 135
 136static inline bool is_ktask(struct kthread *kthread)
 137{
 138        return kthread->flags & KTH_IS_KTASK;
 139}
 140
 141static inline bool is_rcu_ktask(struct kthread *kthread)
 142{
 143        return kthread->flags & KTH_IS_RCU_KTASK;
 144}
 145
 146void sem_init(struct semaphore *sem, int signals);
 147void sem_init_irqsave(struct semaphore *sem, int signals);
 148bool sem_trydown_bulk(struct semaphore *sem, int nr_signals);
 149bool sem_trydown(struct semaphore *sem);
 150void sem_down_bulk(struct semaphore *sem, int nr_signals);
 151void sem_down(struct semaphore *sem);
 152bool sem_up(struct semaphore *sem);
 153bool sem_trydown_bulk_irqsave(struct semaphore *sem, int nr_signals);
 154bool sem_trydown_irqsave(struct semaphore *sem);
 155void sem_down_bulk_irqsave(struct semaphore *sem, int nr_signals);
 156void sem_down_irqsave(struct semaphore *sem);
 157bool sem_up_irqsave(struct semaphore *sem);
 158void print_db_blk_info(pid_t pid);
 159
 160void cv_init(struct cond_var *cv);
 161void cv_init_irqsave(struct cond_var *cv);
 162void cv_init_with_lock(struct cond_var *cv, spinlock_t *lock);
 163void cv_init_irqsave_with_lock(struct cond_var *cv, spinlock_t *lock);
 164void cv_lock(struct cond_var *cv);
 165void cv_unlock(struct cond_var *cv);
 166void cv_lock_irqsave(struct cond_var *cv, int8_t *irq_state);
 167void cv_unlock_irqsave(struct cond_var *cv, int8_t *irq_state);
 168void cv_wait_and_unlock(struct cond_var *cv);   /* does not mess with irqs */
 169void cv_wait(struct cond_var *cv);
 170void __cv_signal(struct cond_var *cv);
 171void __cv_broadcast(struct cond_var *cv);
 172void cv_signal(struct cond_var *cv);
 173void cv_broadcast(struct cond_var *cv);
 174void cv_signal_irqsave(struct cond_var *cv, int8_t *irq_state);
 175void cv_broadcast_irqsave(struct cond_var *cv, int8_t *irq_state);
 176
 177bool abort_sysc(struct proc *p, uintptr_t sysc);
 178void abort_all_sysc(struct proc *p);
 179int abort_all_sysc_fd(struct proc *p, int fd);
 180void __reg_abortable_cv(struct cv_lookup_elm *cle, struct cond_var *cv);
 181void dereg_abortable_cv(struct cv_lookup_elm *cle);
 182bool should_abort(struct cv_lookup_elm *cle);
 183
 184uintptr_t switch_to_ktask(void);
 185void switch_back_from_ktask(uintptr_t old_ret);
 186
 187/* qlocks are plan9's binary sempahore, which are wrappers around our sems.
 188 * Not sure if they'll need irqsave or normal sems. */
 189typedef struct semaphore qlock_t;
 190#define qlock_init(x) sem_init((x), 1)
 191#define qlock(x) sem_down(x)
 192#define qunlock(x) sem_up(x)
 193#define canqlock(x) sem_trydown(x)
 194#define QLOCK_INITIALIZER(name) SEMAPHORE_INITIALIZER(name, 1)
 195