1 /* Copyright (c) 2009, 2010 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * See LICENSE for details.
5 * All things processes! As we move away from the old envs to processes,
6 * we'll move things into here that are designed for multicore processes. */
8 #ifndef ROS_KERN_PROCESS_H
9 #define ROS_KERN_PROCESS_H
11 #include <ros/common.h>
12 #include <ros/notification.h>
17 /* Process States. Not 100% on the names yet. RUNNABLE_* are waiting to go to
18 * RUNNING_*. For instance, RUNNABLE_M is expecting to go to RUNNING_M. It
19 * could be waiting for it's timeslice, or possibly for all the cores it asked
20 * for. You use proc_run() to transition between these states.
22 * Difference between the _M and the _S states:
23 * - _S : legacy process mode
24 * - RUNNING_M implies *guaranteed* core(s). You can be a single core in the
25 * RUNNING_M state. The guarantee is subject to time slicing, but when you
26 * run, you get all of your cores.
27 * - The time slicing is at a coarser granularity for _M states. This means
28 * that when you run an _S on a core, it should be interrupted/time sliced
29 * more often, which also means the core should be classified differently for
30 * a while. Possibly even using it's local APIC timer.
31 * - A process in an _M state will be informed about changes to its state, e.g.,
32 * will have a handler run in the event of a page fault
35 #define PROC_CREATED 0x01
36 #define PROC_RUNNABLE_S 0x02
37 #define PROC_RUNNING_S 0x04
38 #define PROC_WAITING 0x08 // can split out to INT and UINT
39 #define PROC_DYING 0x10
40 #define PROC_RUNNABLE_M 0x20
41 #define PROC_RUNNING_M 0x40
43 #define procstate2str(state) ((state)==PROC_CREATED ? "CREATED" : \
44 (state)==PROC_RUNNABLE_S ? "RUNNABLE_S" : \
45 (state)==PROC_RUNNING_S ? "RUNNING_S" : \
46 (state)==PROC_WAITING ? "WAITING" : \
47 (state)==PROC_DYING ? "DYING" : \
48 (state)==PROC_RUNNABLE_M ? "RUNNABLE_M" : \
49 (state)==PROC_RUNNING_M ? "RUNNING_M" : \
54 TAILQ_HEAD(proc_list, proc); // Declares 'struct proc_list'
56 extern spinlock_t runnablelist_lock;
57 extern struct proc_list LCKD(&runnablelist_lock) proc_runnablelist;
59 /* Can use a htable iterator to iterate through all active procs */
60 extern struct hashtable *pid_hash;
61 extern spinlock_t pid_hash_lock;
63 /* Idle cores: ones able to be exclusively given to a process (worker cores). */
64 extern spinlock_t idle_lock; // never grab this before a proc_lock
65 extern uint32_t LCKD(&idle_lock) (RO idlecoremap)[MAX_NUM_CPUS];
66 extern uint32_t LCKD(&idle_lock) num_idlecores;
70 void proc_init_procinfo(struct proc *p);
72 /* Process management: */
73 error_t proc_alloc(struct proc **pp, struct proc *parent);
74 struct proc *proc_create(struct file *prog, char **argv, char **envp);
75 int __proc_set_state(struct proc *p, uint32_t state) WRITES(p->state);
76 struct proc *pid2proc(pid_t pid);
77 bool proc_controls(struct proc *SAFE actor, struct proc *SAFE target);
78 void proc_run(struct proc *SAFE p);
79 void proc_restartcore(struct proc *SAFE p, trapframe_t *SAFE tf);
80 void proc_destroy(struct proc *SAFE p);
81 void proc_yield(struct proc *SAFE p, bool being_nice);
82 void do_notify(struct proc *p, uint32_t vcoreid, unsigned int notif,
83 struct notif_event *ne);
84 void proc_notify(struct proc *p, unsigned int notif, struct notif_event *ne);
86 /* Exposed for sys_getvcoreid(), til it's unnecessary */
87 uint32_t proc_get_vcoreid(struct proc *SAFE p, uint32_t pcoreid);
89 /* Process core management. Only call these if you are RUNNING_M or RUNNABLE_M.
90 * These all adjust the vcoremap and take appropriate actions (like __startcore
91 * if you were already RUNNING_M. You could be RUNNABLE_M with no vcores when
92 * these are done (basically preempted, and waiting to get run again).
94 * These are internal functions. Error checking is to catch bugs, and you
95 * shouldn't call these functions with parameters you are not sure about (like
96 * an invalid corelist).
98 * They also may cause an IPI to be sent to core it is called on. If so, the
99 * return value will be true. Once you unlock (and enable interrupts) you will
100 * be preempted, and usually lose your stack. There is a helper to unlock and
103 * WARNING: YOU MUST HOLD THE PROC_LOCK BEFORE CALLING THESE! */
104 /* Gives process p the additional num cores listed in corelist */
105 bool __proc_give_cores(struct proc *SAFE p, uint32_t *pcorelist, size_t num);
106 /* Makes process p's coremap look like corelist (add, remove, etc). Not used */
107 bool __proc_set_allcores(struct proc *SAFE p, uint32_t *pcorelist,
108 size_t *num, amr_t message, TV(a0t) arg0,
109 TV(a1t) arg1, TV(a2t) arg2);
110 /* Takes from process p the num cores listed in corelist */
111 bool __proc_take_cores(struct proc *SAFE p, uint32_t *pcorelist,
112 size_t num, amr_t message, TV(a0t) arg0,
113 TV(a1t) arg1, TV(a2t) arg2);
114 bool __proc_take_allcores(struct proc *SAFE p, amr_t message, TV(a0t) arg0,
115 TV(a1t) arg1, TV(a2t) arg2);
116 void __proc_kmsg_pending(struct proc *p, bool ipi_pending);
117 /* Exposed for kern/src/resource.c for now */
118 void __map_vcore(struct proc *p, uint32_t vcoreid, uint32_t pcoreid);
119 void __unmap_vcore(struct proc *p, uint32_t vcoreid);
121 /* Preemption management. Some of these will change */
122 void __proc_preempt_warn(struct proc *p, uint32_t vcoreid, uint64_t when);
123 void __proc_preempt_warnall(struct proc *p, uint64_t when);
124 bool __proc_preempt_core(struct proc *p, uint32_t pcoreid);
125 bool __proc_preempt_all(struct proc *p);
126 void proc_preempt_core(struct proc *p, uint32_t pcoreid, uint64_t usec);
127 void proc_preempt_all(struct proc *p, uint64_t usec);
129 /* Allows the kernel to figure out what process is running on this core. Can be
130 * used just like a pointer to a struct proc. Need these to be macros due to
131 * some circular dependencies with smp.h. */
132 #define current per_cpu_info[core_id()].cur_proc
133 #define set_current_proc(p) per_cpu_info[core_id()].cur_proc = (p)
135 /* Allows the kernel to figure out what *user* tf is on this core's stack. Can
136 * be used just like a pointer to a struct Trapframe. Need these to be macros
137 * due to some circular dependencies with smp.h. This is done here instead of
138 * elsewhere (like trap.h) for other elliptical reasons. Note the distinction
139 * between kernel and user contexts. The kernel always returns to its nested,
140 * interrupted contexts via iret/etc. We don't always do that for user
142 #define current_tf per_cpu_info[core_id()].cur_tf
143 #define set_current_tf(tf) ({ assert(!in_kernel(tf)); \
144 per_cpu_info[core_id()].cur_tf = (tf); })
146 void abandon_core(void);
147 /* Hold the proc_lock, since it'll use the vcoremapping to send an unmapping
148 * message for the region from start to end. */
149 void __proc_tlbshootdown(struct proc *p, uintptr_t start, uintptr_t end);
151 /* Kernel message handlers for process management */
152 void __startcore(trapframe_t *tf, uint32_t srcid, void *a0, void *a1, void *a2);
153 void __notify(trapframe_t *tf, uint32_t srcid, void *a0, void *a1, void *a2);
154 void __preempt(trapframe_t *tf, uint32_t srcid, void *a0, void *a1, void *a2);
155 void __death(trapframe_t *tf, uint32_t srcid, void *a0, void *a1, void *a2);
156 void __tlbshootdown(struct trapframe *tf, uint32_t srcid, void *a0, void *a1,
160 void proc_init_trapframe(trapframe_t *SAFE tf, uint32_t vcoreid,
161 uint32_t entryp, uint32_t stack_top);
162 void proc_secure_trapframe(struct trapframe *tf);
163 void __abandon_core(void);
166 void print_idlecoremap(void);
167 void print_allpids(void);
168 void print_proc_info(pid_t pid);
170 #endif // !ROS_KERN_PROCESS_H