Allow filtering by PID for "db sem"
[akaros.git] / kern / include / kthread.h
1 /* Copyright (c) 2010-13 The Regents of the University of California
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * Kernel threading.  These are for blocking within the kernel for whatever
6  * reason, usually during blocking IO operations.  Check out
7  * Documentation/kthreads.txt for more info than you care about. */
8
9 #pragma once
10
11 #include <ros/common.h>
12 #include <trap.h>
13 #include <sys/queue.h>
14 #include <atomic.h>
15 #include <setjmp.h>
16
17 struct errbuf {
18         struct jmpbuf jmpbuf;
19 };
20
21 struct proc;
22 struct kthread;
23 struct semaphore;
24 TAILQ_HEAD(kthread_tailq, kthread);
25 TAILQ_HEAD(semaphore_tailq, semaphore);
26
27 #define GENBUF_SZ 128   /* plan9 uses this as a scratch space, per syscall */
28
29 #define KTH_IS_KTASK                    (1 << 0)
30 #define KTH_SAVE_ADDR_SPACE             (1 << 1)
31 #define KTH_KTASK_FLAGS                 (KTH_IS_KTASK)
32 #define KTH_DEFAULT_FLAGS               (KTH_SAVE_ADDR_SPACE)
33
34 /* This captures the essence of a kernel context that we want to suspend.  When
35  * a kthread is running, we make sure its stacktop is the default kernel stack,
36  * meaning it will receive the interrupts from userspace. */
37 struct kthread {
38         struct jmpbuf                           context;
39         uintptr_t                                       stacktop;
40         struct proc                                     *proc;
41         struct syscall                          *sysc;
42         struct errbuf                           *errbuf;
43         TAILQ_ENTRY(kthread)            link;
44         /* ID, other shit, etc */
45         int                                                     flags;
46         char                                            *name;
47         char                                            generic_buf[GENBUF_SZ];
48         struct systrace_record          *strace;
49 };
50
51 /* Semaphore for kthreads to sleep on.  0 or less means you need to sleep */
52 struct semaphore {
53         struct kthread_tailq            waiters;
54         int                                             nr_signals;
55         spinlock_t                                      lock;
56         bool                                            irq_okay;
57 #ifdef CONFIG_SEMAPHORE_DEBUG
58         TAILQ_ENTRY(semaphore)          link;
59         bool                                            is_on_list;     /* would like better sys/queue.h */
60 #endif
61 };
62
63 /* omitted elements (the sem debug stuff) are initialized to 0 */
64 #define SEMAPHORE_INITIALIZER(name, n)                                         \
65 {                                                                              \
66     .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
67         .nr_signals = (n),                                                         \
68     .lock       = SPINLOCK_INITIALIZER,                                        \
69     .irq_okay   = FALSE,                                                       \
70 }
71
72 #define SEMAPHORE_INITIALIZER_IRQSAVE(name, n)                                 \
73 {                                                                              \
74     .waiters    = TAILQ_HEAD_INITIALIZER((name).waiters),                      \
75         .nr_signals = (n),                                                         \
76     .lock       = SPINLOCK_INITIALIZER_IRQSAVE,                                \
77     .irq_okay   = TRUE,                                                        \
78 }
79
80 struct cond_var {
81         struct semaphore                        sem;
82         spinlock_t                                      *lock;          /* usually points to internal_ */
83         spinlock_t                                      internal_lock;
84         unsigned long                           nr_waiters;
85         bool                                            irq_okay;
86 };
87
88 struct cv_lookup_elm {
89         TAILQ_ENTRY(cv_lookup_elm)      link;
90         TAILQ_ENTRY(cv_lookup_elm)      abortall_link;          /* only used in abort_all */
91         struct cond_var                         *cv;
92         struct kthread                          *kthread;
93         struct syscall                          *sysc;
94         struct proc                                     *proc;
95         atomic_t                                        abort_in_progress;      /* 0 = no */
96 };
97 TAILQ_HEAD(cv_lookup_tailq, cv_lookup_elm);
98
99 uintptr_t get_kstack(void);
100 void put_kstack(uintptr_t stacktop);
101 uintptr_t *kstack_bottom_addr(uintptr_t stacktop);
102 void kthread_init(void);
103 struct kthread *__kthread_zalloc(void);
104 void __use_real_kstack(void (*f)(void *arg));
105 void restart_kthread(struct kthread *kthread);
106 void kthread_runnable(struct kthread *kthread);
107 void kthread_yield(void);
108 void kthread_usleep(uint64_t usec);
109 void ktask(char *name, void (*fn)(void*), void *arg);
110
111 static inline bool is_ktask(struct kthread *kthread)
112 {
113         return kthread->flags & KTH_IS_KTASK;
114 }
115
116 void sem_init(struct semaphore *sem, int signals);
117 void sem_init_irqsave(struct semaphore *sem, int signals);
118 bool sem_trydown(struct semaphore *sem);
119 void sem_down(struct semaphore *sem);
120 bool sem_up(struct semaphore *sem);
121 bool sem_trydown_irqsave(struct semaphore *sem, int8_t *irq_state);
122 void sem_down_irqsave(struct semaphore *sem, int8_t *irq_state);
123 bool sem_up_irqsave(struct semaphore *sem, int8_t *irq_state);
124 void print_all_sem_info(pid_t pid);
125
126 void cv_init(struct cond_var *cv);
127 void cv_init_irqsave(struct cond_var *cv);
128 void cv_init_with_lock(struct cond_var *cv, spinlock_t *lock);
129 void cv_init_irqsave_with_lock(struct cond_var *cv, spinlock_t *lock);
130 void cv_lock(struct cond_var *cv);
131 void cv_unlock(struct cond_var *cv);
132 void cv_lock_irqsave(struct cond_var *cv, int8_t *irq_state);
133 void cv_unlock_irqsave(struct cond_var *cv, int8_t *irq_state);
134 void cv_wait_and_unlock(struct cond_var *cv);   /* does not mess with irqs */
135 void cv_wait(struct cond_var *cv);
136 void __cv_signal(struct cond_var *cv);
137 void __cv_broadcast(struct cond_var *cv);
138 void cv_signal(struct cond_var *cv);
139 void cv_broadcast(struct cond_var *cv);
140 void cv_signal_irqsave(struct cond_var *cv, int8_t *irq_state);
141 void cv_broadcast_irqsave(struct cond_var *cv, int8_t *irq_state);
142
143 bool abort_sysc(struct proc *p, struct syscall *sysc);
144 void abort_all_sysc(struct proc *p);
145 int abort_all_sysc_fd(struct proc *p, int fd);
146 void __reg_abortable_cv(struct cv_lookup_elm *cle, struct cond_var *cv);
147 void dereg_abortable_cv(struct cv_lookup_elm *cle);
148 bool should_abort(struct cv_lookup_elm *cle);
149
150 uintptr_t switch_to_ktask(void);
151 void switch_back_from_ktask(uintptr_t old_ret);
152
153 /* qlocks are plan9's binary sempahore, which are wrappers around our sems.
154  * Not sure if they'll need irqsave or normal sems. */
155 typedef struct semaphore qlock_t;
156 #define qlock_init(x) sem_init((x), 1)
157 #define qlock(x) sem_down(x)
158 #define qunlock(x) sem_up(x)
159 #define canqlock(x) sem_trydown(x)
160 #define QLOCK_INITIALIZER(name) SEMAPHORE_INITIALIZER(name, 1)