x86: Rename VPD (XCC)
[akaros.git] / kern / include / kthread.h
1 /* Copyright (c) 2010-13 The Regents of the University of California
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * Kernel threading.  These are for blocking within the kernel for whatever
6  * reason, usually during blocking IO operations.  Check out
7  * Documentation/kthreads.txt for more info than you care about. */
8
9 #pragma once
10
11 #include <ros/common.h>
12 #include <ros/syscall.h>
13 #include <trap.h>
14 #include <sys/queue.h>
15 #include <atomic.h>
16 #include <setjmp.h>
17
18 struct errbuf {
19         struct jmpbuf jmpbuf;
20 };
21
22 struct proc;
23 struct kthread;
24 struct semaphore;
25 TAILQ_HEAD(kthread_tailq, kthread);
26 TAILQ_HEAD(semaphore_tailq, semaphore);
27
28 #define GENBUF_SZ 128   /* plan9 uses this as a scratch space, per syscall */
29
30 #define KTH_IS_KTASK                    (1 << 0)
31 #define KTH_SAVE_ADDR_SPACE             (1 << 1)
32 #define KTH_KTASK_FLAGS                 (KTH_IS_KTASK)
33 #define KTH_DEFAULT_FLAGS               (KTH_SAVE_ADDR_SPACE)
34
35 /* This captures the essence of a kernel context that we want to suspend.  When
36  * a kthread is running, we make sure its stacktop is the default kernel stack,
37  * meaning it will receive the interrupts from userspace. */
38 struct kthread {
39         struct jmpbuf                           context;
40         uintptr_t                                       stacktop;
41         struct proc                                     *proc;
42         struct syscall                          *sysc;
43         struct errbuf                           *errbuf;
44         TAILQ_ENTRY(kthread)            link;
45         /* ID, other shit, etc */
46         int                                                     flags;
47         char                                            *name;
48         char                                            generic_buf[GENBUF_SZ];
49         int                                                     errno;
50         char                                            errstr[MAX_ERRSTR_LEN];
51         struct systrace_record          *strace;
52 };
53
54 /* Semaphore for kthreads to sleep on.  0 or less means you need to sleep */
55 struct semaphore {
56         struct kthread_tailq            waiters;
57         int                                             nr_signals;
58         spinlock_t                                      lock;
59         bool                                            irq_okay;
60 #ifdef CONFIG_SEMAPHORE_DEBUG
61         TAILQ_ENTRY(semaphore)          link;
62         bool                                            is_on_list;     /* would like better sys/queue.h */
63 #endif
64 };
65
66 /* omitted elements (the sem debug stuff) are initialized to 0 */
67 #define SEMAPHORE_INITIALIZER(name, n)                                         \
68 {                                                                              \
69     .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
70         .nr_signals = (n),                                                         \
71     .lock       = SPINLOCK_INITIALIZER,                                        \
72     .irq_okay   = FALSE,                                                       \
73 }
74
75 #define SEMAPHORE_INITIALIZER_IRQSAVE(name, n)                                 \
76 {                                                                              \
77     .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
78         .nr_signals = (n),                                                         \
79     .lock       = SPINLOCK_INITIALIZER_IRQSAVE,                                \
80     .irq_okay   = TRUE,                                                        \
81 }
82
83 struct cond_var {
84         struct semaphore                        sem;
85         spinlock_t                                      *lock;          /* usually points to internal_ */
86         spinlock_t                                      internal_lock;
87         unsigned long                           nr_waiters;
88         bool                                            irq_okay;
89 };
90
91 struct cv_lookup_elm {
92         TAILQ_ENTRY(cv_lookup_elm)      link;
93         TAILQ_ENTRY(cv_lookup_elm)      abortall_link;          /* only used in abort_all */
94         struct cond_var                         *cv;
95         struct kthread                          *kthread;
96         struct syscall                          *sysc;
97         struct proc                                     *proc;
98         atomic_t                                        abort_in_progress;      /* 0 = no */
99 };
100 TAILQ_HEAD(cv_lookup_tailq, cv_lookup_elm);
101
102 uintptr_t get_kstack(void);
103 void put_kstack(uintptr_t stacktop);
104 uintptr_t *kstack_bottom_addr(uintptr_t stacktop);
105 void kthread_init(void);
106 struct kthread *__kthread_zalloc(void);
107 void __use_real_kstack(void (*f)(void *arg));
108 void restart_kthread(struct kthread *kthread);
109 void kthread_runnable(struct kthread *kthread);
110 void kthread_yield(void);
111 void kthread_usleep(uint64_t usec);
112 void ktask(char *name, void (*fn)(void*), void *arg);
113
114 static inline bool is_ktask(struct kthread *kthread)
115 {
116         return kthread->flags & KTH_IS_KTASK;
117 }
118
119 void sem_init(struct semaphore *sem, int signals);
120 void sem_init_irqsave(struct semaphore *sem, int signals);
121 bool sem_trydown(struct semaphore *sem);
122 void sem_down(struct semaphore *sem);
123 bool sem_up(struct semaphore *sem);
124 bool sem_trydown_irqsave(struct semaphore *sem, int8_t *irq_state);
125 void sem_down_irqsave(struct semaphore *sem, int8_t *irq_state);
126 bool sem_up_irqsave(struct semaphore *sem, int8_t *irq_state);
127 void print_all_sem_info(pid_t pid);
128
129 void cv_init(struct cond_var *cv);
130 void cv_init_irqsave(struct cond_var *cv);
131 void cv_init_with_lock(struct cond_var *cv, spinlock_t *lock);
132 void cv_init_irqsave_with_lock(struct cond_var *cv, spinlock_t *lock);
133 void cv_lock(struct cond_var *cv);
134 void cv_unlock(struct cond_var *cv);
135 void cv_lock_irqsave(struct cond_var *cv, int8_t *irq_state);
136 void cv_unlock_irqsave(struct cond_var *cv, int8_t *irq_state);
137 void cv_wait_and_unlock(struct cond_var *cv);   /* does not mess with irqs */
138 void cv_wait(struct cond_var *cv);
139 void __cv_signal(struct cond_var *cv);
140 void __cv_broadcast(struct cond_var *cv);
141 void cv_signal(struct cond_var *cv);
142 void cv_broadcast(struct cond_var *cv);
143 void cv_signal_irqsave(struct cond_var *cv, int8_t *irq_state);
144 void cv_broadcast_irqsave(struct cond_var *cv, int8_t *irq_state);
145
146 bool abort_sysc(struct proc *p, struct syscall *sysc);
147 void abort_all_sysc(struct proc *p);
148 int abort_all_sysc_fd(struct proc *p, int fd);
149 void __reg_abortable_cv(struct cv_lookup_elm *cle, struct cond_var *cv);
150 void dereg_abortable_cv(struct cv_lookup_elm *cle);
151 bool should_abort(struct cv_lookup_elm *cle);
152
153 uintptr_t switch_to_ktask(void);
154 void switch_back_from_ktask(uintptr_t old_ret);
155
156 /* qlocks are plan9's binary sempahore, which are wrappers around our sems.
157  * Not sure if they'll need irqsave or normal sems. */
158 typedef struct semaphore qlock_t;
159 #define qlock_init(x) sem_init((x), 1)
160 #define qlock(x) sem_down(x)
161 #define qunlock(x) sem_up(x)
162 #define canqlock(x) sem_trydown(x)
163 #define QLOCK_INITIALIZER(name) SEMAPHORE_INITIALIZER(name, 1)