Makes kmsg code arch independent
[akaros.git] / kern / src / trap.c
1 /* Copyright (c) 2012 The Regents of the University of California
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * Arch-independent trap handling and kernel messaging */
6
7 #include <arch/arch.h>
8 #include <smp.h>
9 #include <trap.h>
10 #include <stdio.h>
11 #include <slab.h>
12 #include <assert.h>
13 #include <kdebug.h>
14 #include <kmalloc.h>
15
16 struct kmem_cache *kernel_msg_cache;
17
18 void kernel_msg_init(void)
19 {
20         kernel_msg_cache = kmem_cache_create("kernel_msgs",
21                            sizeof(struct kernel_message), HW_CACHE_ALIGN, 0, 0, 0);
22 }
23
24 uint32_t send_kernel_message(uint32_t dst, amr_t pc, long arg0, long arg1,
25                              long arg2, int type)
26 {
27         kernel_message_t *k_msg;
28         assert(pc);
29         // note this will be freed on the destination core
30         k_msg = kmem_cache_alloc(kernel_msg_cache, 0);
31         k_msg->srcid = core_id();
32         k_msg->dstid = dst;
33         k_msg->pc = pc;
34         k_msg->arg0 = arg0;
35         k_msg->arg1 = arg1;
36         k_msg->arg2 = arg2;
37         switch (type) {
38                 case KMSG_IMMEDIATE:
39                         spin_lock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
40                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].immed_amsgs, k_msg, link);
41                         spin_unlock_irqsave(&per_cpu_info[dst].immed_amsg_lock);
42                         break;
43                 case KMSG_ROUTINE:
44                         spin_lock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
45                         STAILQ_INSERT_TAIL(&per_cpu_info[dst].routine_amsgs, k_msg, link);
46                         spin_unlock_irqsave(&per_cpu_info[dst].routine_amsg_lock);
47                         break;
48                 default:
49                         panic("Unknown type of kernel message!");
50         }
51         /* since we touched memory the other core will touch (the lock), we don't
52          * need an wmb_f() */
53         /* if we're sending a routine message locally, we don't want/need an IPI */
54         if ((dst != k_msg->srcid) || (type == KMSG_IMMEDIATE))
55                 send_ipi(dst, I_KERNEL_MSG);
56         return 0;
57 }
58
59 /* Helper function.  Returns 0 if the list was empty. */
60 static kernel_message_t *get_next_amsg(struct kernel_msg_list *list_head,
61                                        spinlock_t *list_lock)
62 {
63         kernel_message_t *k_msg;
64         spin_lock_irqsave(list_lock);
65         k_msg = STAILQ_FIRST(list_head);
66         if (k_msg)
67                 STAILQ_REMOVE_HEAD(list_head, link);
68         spin_unlock_irqsave(list_lock);
69         return k_msg;
70 }
71
72 /* Kernel message handler.  Extensive documentation is in
73  * Documentation/kernel_messages.txt.
74  *
75  * In general: this processes immediate messages, then routine messages.
76  * Routine messages might not return (__startcore, etc), so we need to be
77  * careful about a few things.
78  *
79  * Note that all of this happens from interrupt context, and interrupts are
80  * currently disabled for this gate.  Interrupts need to be disabled so that the
81  * self-ipi doesn't preempt the execution of this kernel message. */
82 void handle_kmsg_ipi(struct trapframe *tf, void *data)
83 {
84
85         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
86         kernel_message_t msg_cp, *k_msg;
87
88         while (1) { // will break out when there are no more messages
89                 /* Try to get an immediate message.  Exec and free it. */
90                 k_msg = get_next_amsg(&myinfo->immed_amsgs, &myinfo->immed_amsg_lock);
91                 if (k_msg) {
92                         assert(k_msg->pc);
93                         k_msg->pc(tf, k_msg->srcid, k_msg->arg0, k_msg->arg1, k_msg->arg2);
94                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
95                 } else { // no immediate, might be a routine
96                         if (in_kernel(tf))
97                                 return; // don't execute routine msgs if we were in the kernel
98                         k_msg = get_next_amsg(&myinfo->routine_amsgs,
99                                               &myinfo->routine_amsg_lock);
100                         if (!k_msg) // no routines either
101                                 return;
102                         /* copy in, and then free, in case we don't return */
103                         msg_cp = *k_msg;
104                         kmem_cache_free(kernel_msg_cache, (void*)k_msg);
105                         /* Execute the kernel message */
106                         assert(msg_cp.pc);
107                         assert(msg_cp.dstid == core_id());
108                         /* TODO: when batching syscalls, this should be reread from cur_tf*/
109                         msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
110                 }
111         }
112 }
113
114 /* Runs any outstanding routine kernel messages from within the kernel.  Will
115  * make sure immediates still run first (or when they arrive, if processing a
116  * bunch of these messages).  This will disable interrupts, and restore them to
117  * whatever state you left them. */
118 void process_routine_kmsg(struct trapframe *tf)
119 {
120         per_cpu_info_t *myinfo = &per_cpu_info[core_id()];
121         kernel_message_t msg_cp, *k_msg;
122         int8_t irq_state = 0;
123
124         disable_irqsave(&irq_state);
125         /* If we were told what our TF was, use that.  o/w, go with current_tf. */
126         tf = tf ? tf : current_tf;
127         while (1) {
128                 /* normally, we want ints disabled, so we don't have an empty self-ipi
129                  * for every routine message. (imagine a long list of routines).  But we
130                  * do want immediates to run ahead of routines.  This enabling should
131                  * work (might not in some shitty VMs).  Also note we can receive an
132                  * extra self-ipi for routine messages before we turn off irqs again.
133                  * Not a big deal, since we will process it right away. 
134                  * TODO: consider calling __kernel_message() here. */
135                 if (!STAILQ_EMPTY(&myinfo->immed_amsgs)) {
136                         enable_irq();
137                         cpu_relax();
138                         disable_irq();
139                 }
140                 k_msg = get_next_amsg(&myinfo->routine_amsgs,
141                                       &myinfo->routine_amsg_lock);
142                 if (!k_msg) {
143                         enable_irqsave(&irq_state);
144                         return;
145                 }
146                 /* copy in, and then free, in case we don't return */
147                 msg_cp = *k_msg;
148                 kmem_cache_free(kernel_msg_cache, (void*)k_msg);
149                 /* Execute the kernel message */
150                 assert(msg_cp.pc);
151                 assert(msg_cp.dstid == core_id());
152                 msg_cp.pc(tf, msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
153         }
154 }
155
156 /* extremely dangerous and racy: prints out the immed and routine kmsgs for a
157  * specific core (so possibly remotely) */
158 void print_kmsgs(uint32_t coreid)
159 {
160         struct per_cpu_info *pcpui = &per_cpu_info[coreid];
161         void __print_kmsgs(struct kernel_msg_list *list, char *type)
162         {
163                 char *fn_name;
164                 struct kernel_message *kmsg_i;
165                 STAILQ_FOREACH(kmsg_i, list, link) {
166                         fn_name = get_fn_name((long)kmsg_i->pc);
167                         printk("%s KMSG on %d from %d to run %08p(%s)\n", type,
168                                kmsg_i->dstid, kmsg_i->srcid, kmsg_i->pc, fn_name); 
169                         kfree(fn_name);
170                 }
171         }
172         __print_kmsgs(&pcpui->immed_amsgs, "Immedte");
173         __print_kmsgs(&pcpui->routine_amsgs, "Routine");
174 }
175
176 /* Debugging stuff */
177 void kmsg_queue_stat(void)
178 {
179         struct kernel_message *kmsg;
180         bool immed_emp, routine_emp;
181         for (int i = 0; i < num_cpus; i++) {
182                 spin_lock_irqsave(&per_cpu_info[i].immed_amsg_lock);
183                 immed_emp = STAILQ_EMPTY(&per_cpu_info[i].immed_amsgs);
184                 spin_unlock_irqsave(&per_cpu_info[i].immed_amsg_lock);
185                 spin_lock_irqsave(&per_cpu_info[i].routine_amsg_lock);
186                 routine_emp = STAILQ_EMPTY(&per_cpu_info[i].routine_amsgs);
187                 spin_unlock_irqsave(&per_cpu_info[i].routine_amsg_lock);
188                 printk("Core %d's immed_emp: %d, routine_emp %d\n", i, immed_emp, routine_emp);
189                 if (!immed_emp) {
190                         kmsg = STAILQ_FIRST(&per_cpu_info[i].immed_amsgs);
191                         printk("Immed msg on core %d:\n", i);
192                         printk("\tsrc:  %d\n", kmsg->srcid);
193                         printk("\tdst:  %d\n", kmsg->dstid);
194                         printk("\tpc:   %08p\n", kmsg->pc);
195                         printk("\targ0: %08p\n", kmsg->arg0);
196                         printk("\targ1: %08p\n", kmsg->arg1);
197                         printk("\targ2: %08p\n", kmsg->arg2);
198                 }
199                 if (!routine_emp) {
200                         kmsg = STAILQ_FIRST(&per_cpu_info[i].routine_amsgs);
201                         printk("Routine msg on core %d:\n", i);
202                         printk("\tsrc:  %d\n", kmsg->srcid);
203                         printk("\tdst:  %d\n", kmsg->dstid);
204                         printk("\tpc:   %08p\n", kmsg->pc);
205                         printk("\targ0: %08p\n", kmsg->arg0);
206                         printk("\targ1: %08p\n", kmsg->arg1);
207                         printk("\targ2: %08p\n", kmsg->arg2);
208                 }
209                         
210         }
211 }
212