Rework memory allocation flags [1/2]
[akaros.git] / kern / include / kthread.h
1 /* Copyright (c) 2010-13 The Regents of the University of California
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * Kernel threading.  These are for blocking within the kernel for whatever
6  * reason, usually during blocking IO operations.  Check out
7  * Documentation/kthreads.txt for more info than you care about. */
8
9 #pragma once
10
11 #include <ros/common.h>
12 #include <trap.h>
13 #include <sys/queue.h>
14 #include <atomic.h>
15 #include <setjmp.h>
16
17 struct errbuf {
18         struct jmpbuf jmpbuf;
19 };
20
21 struct proc;
22 struct kthread;
23 struct semaphore;
24 TAILQ_HEAD(kthread_tailq, kthread);
25 TAILQ_HEAD(semaphore_tailq, semaphore);
26
27 #define GENBUF_SZ 128   /* plan9 uses this as a scratch space, per syscall */
28
29 #define KTH_IS_KTASK                    (1 << 0)
30 #define KTH_SAVE_ADDR_SPACE             (1 << 1)
31 #define KTH_KTASK_FLAGS                 (KTH_IS_KTASK)
32 #define KTH_DEFAULT_FLAGS               (KTH_SAVE_ADDR_SPACE)
33
34 /* This captures the essence of a kernel context that we want to suspend.  When
35  * a kthread is running, we make sure its stacktop is the default kernel stack,
36  * meaning it will receive the interrupts from userspace. */
37 struct kthread {
38         struct jmpbuf                           context;
39         uintptr_t                                       stacktop;
40         struct proc                                     *proc;
41         struct syscall                          *sysc;
42         struct errbuf                           *errbuf;
43         TAILQ_ENTRY(kthread)            link;
44         /* ID, other shit, etc */
45         int                                                     flags;
46         char                                            *name;
47         char                                            generic_buf[GENBUF_SZ];
48         struct systrace_record          *trace;
49         struct systrace_record          *strace;
50 };
51
52 /* Semaphore for kthreads to sleep on.  0 or less means you need to sleep */
53 struct semaphore {
54         struct kthread_tailq            waiters;
55         int                                             nr_signals;
56         spinlock_t                                      lock;
57         bool                                            irq_okay;
58 #ifdef CONFIG_SEMAPHORE_DEBUG
59         TAILQ_ENTRY(semaphore)          link;
60         bool                                            is_on_list;     /* would like better sys/queue.h */
61         uintptr_t                                       bt_pc;          /* program counter of last down */
62         uintptr_t                                       bt_fp;          /* frame pointer of last down */
63         uint32_t                                        calling_core;
64 #endif
65 };
66
67 /* omitted elements (the sem debug stuff) are initialized to 0 */
68 #define SEMAPHORE_INITIALIZER(name, n)                                         \
69 {                                                                              \
70     .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
71         .nr_signals = (n),                                                         \
72     .lock       = SPINLOCK_INITIALIZER,                                        \
73     .irq_okay   = FALSE,                                                       \
74 }
75
76 #define SEMAPHORE_INITIALIZER_IRQSAVE(name, n)                                 \
77 {                                                                              \
78     .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
79         .nr_signals = (n),                                                         \
80     .lock       = SPINLOCK_INITIALIZER_IRQSAVE,                                \
81     .irq_okay   = TRUE,                                                        \
82 }
83
84 struct cond_var {
85         struct semaphore                        sem;
86         spinlock_t                                      *lock;          /* usually points to internal_ */
87         spinlock_t                                      internal_lock;
88         unsigned long                           nr_waiters;
89         bool                                            irq_okay;
90 };
91
92 struct cv_lookup_elm {
93         TAILQ_ENTRY(cv_lookup_elm)      link;
94         TAILQ_ENTRY(cv_lookup_elm)      abortall_link;          /* only used in abort_all */
95         struct cond_var                         *cv;
96         struct kthread                          *kthread;
97         struct syscall                          *sysc;
98         struct proc                                     *proc;
99         atomic_t                                        abort_in_progress;      /* 0 = no */
100 };
101 TAILQ_HEAD(cv_lookup_tailq, cv_lookup_elm);
102
103 uintptr_t get_kstack(void);
104 void put_kstack(uintptr_t stacktop);
105 uintptr_t *kstack_bottom_addr(uintptr_t stacktop);
106 void kthread_init(void);
107 struct kthread *__kthread_zalloc(void);
108 void restart_kthread(struct kthread *kthread);
109 void kthread_runnable(struct kthread *kthread);
110 void kthread_yield(void);
111 void kthread_usleep(uint64_t usec);
112 void ktask(char *name, void (*fn)(void*), void *arg);
113
114 static inline bool is_ktask(struct kthread *kthread)
115 {
116         return kthread->flags & KTH_IS_KTASK;
117 }
118
119 /* Debugging */
120 void check_poison(char *msg);
121
122 void sem_init(struct semaphore *sem, int signals);
123 void sem_init_irqsave(struct semaphore *sem, int signals);
124 bool sem_trydown(struct semaphore *sem);
125 void sem_down(struct semaphore *sem);
126 bool sem_up(struct semaphore *sem);
127 bool sem_trydown_irqsave(struct semaphore *sem, int8_t *irq_state);
128 void sem_down_irqsave(struct semaphore *sem, int8_t *irq_state);
129 bool sem_up_irqsave(struct semaphore *sem, int8_t *irq_state);
130 void print_sem_info(struct semaphore *sem);
131 void print_all_sem_info(void);
132
133 void cv_init(struct cond_var *cv);
134 void cv_init_irqsave(struct cond_var *cv);
135 void cv_init_with_lock(struct cond_var *cv, spinlock_t *lock);
136 void cv_init_irqsave_with_lock(struct cond_var *cv, spinlock_t *lock);
137 void cv_lock(struct cond_var *cv);
138 void cv_unlock(struct cond_var *cv);
139 void cv_lock_irqsave(struct cond_var *cv, int8_t *irq_state);
140 void cv_unlock_irqsave(struct cond_var *cv, int8_t *irq_state);
141 void cv_wait_and_unlock(struct cond_var *cv);   /* does not mess with irqs */
142 void cv_wait(struct cond_var *cv);
143 void __cv_signal(struct cond_var *cv);
144 void __cv_broadcast(struct cond_var *cv);
145 void cv_signal(struct cond_var *cv);
146 void cv_broadcast(struct cond_var *cv);
147 void cv_signal_irqsave(struct cond_var *cv, int8_t *irq_state);
148 void cv_broadcast_irqsave(struct cond_var *cv, int8_t *irq_state);
149
150 bool abort_sysc(struct proc *p, struct syscall *sysc);
151 void abort_all_sysc(struct proc *p);
152 int abort_all_sysc_fd(struct proc *p, int fd);
153 void __reg_abortable_cv(struct cv_lookup_elm *cle, struct cond_var *cv);
154 void dereg_abortable_cv(struct cv_lookup_elm *cle);
155 bool should_abort(struct cv_lookup_elm *cle);
156
157 uintptr_t switch_to_ktask(void);
158 void switch_back_from_ktask(uintptr_t old_ret);
159
160 /* qlocks are plan9's binary sempahore, which are wrappers around our sems.
161  * Not sure if they'll need irqsave or normal sems. */
162 typedef struct semaphore qlock_t;
163 #define qlock_init(x) sem_init((x), 1)
164 #define qlock(x) sem_down(x)
165 #define qunlock(x) sem_up(x)
166 #define canqlock(x) sem_trydown(x)
167 #define QLOCK_INITIALIZER(name) SEMAPHORE_INITIALIZER(name, 1)