Spinlock debugging infrastructure
[akaros.git] / kern / src / testing.c
1
2 #ifdef __SHARC__
3 #pragma nosharc
4 #endif
5
6 #include <arch/mmu.h>
7 #include <arch/arch.h>
8 #include <arch/bitmask.h>
9 #include <smp.h>
10
11 #include <ros/memlayout.h>
12 #include <ros/common.h>
13
14 #include <atomic.h>
15 #include <stdio.h>
16 #include <assert.h>
17 #include <string.h>
18 #include <testing.h>
19 #include <trap.h>
20 #include <arch/trap.h>
21 #include <process.h>
22 #include <syscall.h>
23 #include <timing.h>
24 #include <kfs.h>
25 #include <multiboot.h>
26 #include <pmap.h>
27 #include <page_alloc.h>
28 #include <pmap.h>
29 #include <slab.h>
30 #include <kmalloc.h>
31
32 #ifdef __i386__
33
34 void test_ipi_sending(void)
35 {
36         extern handler_t (CT(NUM_INTERRUPT_HANDLERS) RO interrupt_handlers)[];
37         int8_t state = 0;
38
39         register_interrupt_handler(interrupt_handlers, I_TESTING,
40                                    test_hello_world_handler, NULL);
41         enable_irqsave(&state);
42         cprintf("\nCORE 0 sending broadcast\n");
43         send_broadcast_ipi(I_TESTING);
44         udelay(3000000);
45         cprintf("\nCORE 0 sending all others\n");
46         send_all_others_ipi(I_TESTING);
47         udelay(3000000);
48         cprintf("\nCORE 0 sending self\n");
49         send_self_ipi(I_TESTING);
50         udelay(3000000);
51         cprintf("\nCORE 0 sending ipi to physical 1\n");
52         send_ipi(0x01, 0, I_TESTING);
53         udelay(3000000);
54         cprintf("\nCORE 0 sending ipi to physical 2\n");
55         send_ipi(0x02, 0, I_TESTING);
56         udelay(3000000);
57         cprintf("\nCORE 0 sending ipi to physical 3\n");
58         send_ipi(0x03, 0, I_TESTING);
59         udelay(3000000);
60         cprintf("\nCORE 0 sending ipi to physical 15\n");
61         send_ipi(0x0f, 0, I_TESTING);
62         udelay(3000000);
63         cprintf("\nCORE 0 sending ipi to logical 2\n");
64         send_ipi(0x02, 1, I_TESTING);
65         udelay(3000000);
66         cprintf("\nCORE 0 sending ipi to logical 1\n");
67         send_ipi(0x01, 1, I_TESTING);
68         udelay(3000000);
69         cprintf("\nDone!\n");
70         disable_irqsave(&state);
71 }
72
73 // Note this never returns and will muck with any other timer work
74 void test_pic_reception(void)
75 {
76         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
77         pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
78         pic_unmask_irq(0);
79         cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
80         cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
81         unmask_lapic_lvt(LAPIC_LVT_LINT0);
82         cprintf("Core %d's LINT0: 0x%08x\n", core_id(), read_mmreg32(LAPIC_LVT_LINT0));
83         enable_irq();
84         while(1);
85 }
86
87 void test_ioapic_pit_reroute(void) 
88 {
89         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
90         ioapic_route_irq(0, 3); 
91
92         cprintf("Starting pit on core 3....\n");
93         udelay(3000000);
94         pit_set_timer(0xFFFE,TIMER_RATEGEN); // totally arbitrary time
95         
96         udelay(3000000);
97         ioapic_unroute_irq(0);
98         udelay(300000);
99         cprintf("Masked pit. Waiting before return...\n");
100         udelay(3000000);
101 }
102
103 #endif // __i386__
104
105
106 void test_print_info(void)
107 {
108         cprintf("\nCORE 0 asking all cores to print info:\n");
109         smp_call_function_all(test_print_info_handler, NULL, 0);
110         cprintf("\nDone!\n");
111 }
112
113 void test_page_coloring(void) 
114 {
115         //Print the different cache properties of our machine
116         print_cache_properties("L1", &l1);
117         cprintf("\n");
118         print_cache_properties("L2", &l2);
119         cprintf("\n");
120         print_cache_properties("L3", &l3);
121         cprintf("\n");
122
123         //Print some stats about our memory
124         cprintf("Max Address: %llu\n", MAX_VADDR);
125         cprintf("Num Pages: %u\n", npages);
126
127         //Declare a local variable for allocating pages 
128         page_t* page;
129
130         cprintf("Contents of the page free list:\n");
131         for(int i=0; i<llc_num_colors; i++) {
132                 cprintf("  COLOR %d:\n", i);
133                 LIST_FOREACH(page, &colored_page_free_list[i], page_link) {
134                         cprintf("    Page: %d\n", page2ppn(page));
135                 }
136         }
137
138         //Run through and allocate all pages through l1_page_alloc
139         cprintf("Allocating from L1 page colors:\n");
140         for(int i=0; i<get_cache_num_page_colors(&l1); i++) {
141                 cprintf("  COLOR %d:\n", i);
142                 while(l1_page_alloc(&page, i) != -ENOMEM)
143                         cprintf("    Page: %d\n", page2ppn(page));
144         }
145
146         //Put all the pages back by reinitializing
147         page_init();
148         
149         //Run through and allocate all pages through l2_page_alloc
150         cprintf("Allocating from L2 page colors:\n");
151         for(int i=0; i<get_cache_num_page_colors(&l2); i++) {
152                 cprintf("  COLOR %d:\n", i);
153                 while(l2_page_alloc(&page, i) != -ENOMEM)
154                         cprintf("    Page: %d\n", page2ppn(page));
155         }
156
157         //Put all the pages back by reinitializing
158         page_init();
159         
160         //Run through and allocate all pages through l3_page_alloc
161         cprintf("Allocating from L3 page colors:\n");
162         for(int i=0; i<get_cache_num_page_colors(&l3); i++) {
163                 cprintf("  COLOR %d:\n", i);
164                 while(l3_page_alloc(&page, i) != -ENOMEM)
165                         cprintf("    Page: %d\n", page2ppn(page));
166         }
167         
168         //Put all the pages back by reinitializing
169         page_init();
170         
171         //Run through and allocate all pages through page_alloc
172         cprintf("Allocating from global allocator:\n");
173         while(page_alloc(&page) != -ENOMEM)
174                 cprintf("    Page: %d\n", page2ppn(page));
175         
176         if(l2_page_alloc(&page, 0) != -ENOMEM)
177                 cprintf("Should not get here, all pages should already be gone!\n");
178         cprintf("All pages gone for sure...\n");
179         
180         //Now lets put a few pages back using page_free..
181         cprintf("Reinserting pages via page_free and reallocating them...\n");
182         page_free(&pages[0]);
183         page_free(&pages[15]);
184         page_free(&pages[7]);
185         page_free(&pages[6]);
186         page_free(&pages[4]);
187
188         while(page_alloc(&page) != -ENOMEM)
189                 cprintf("Page: %d\n", page2ppn(page));  
190 }
191
192 barrier_t test_cpu_array;
193
194 void test_barrier(void)
195 {
196         cprintf("Core 0 initializing barrier\n");
197         init_barrier(&test_cpu_array, num_cpus);
198         cprintf("Core 0 asking all cores to print ids, barrier, rinse, repeat\n");
199         smp_call_function_all(test_barrier_handler, NULL, 0);
200 }
201
202 void test_interrupts_irqsave(void)
203 {
204         int8_t state = 0;
205         printd("Testing Nesting Enabling first, turning ints off:\n");
206         disable_irq();
207         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
208         assert(!irq_is_enabled());
209         printd("Enabling IRQSave\n");
210         enable_irqsave(&state);
211         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
212         assert(irq_is_enabled());
213         printd("Enabling IRQSave Again\n");
214         enable_irqsave(&state);
215         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
216         assert(irq_is_enabled());
217         printd("Disabling IRQSave Once\n");
218         disable_irqsave(&state);
219         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
220         assert(irq_is_enabled());
221         printd("Disabling IRQSave Again\n");
222         disable_irqsave(&state);
223         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
224         assert(!irq_is_enabled());
225         printd("Done.  Should have been 0, 200, 200, 200, 0\n");
226
227         printd("Testing Nesting Disabling first, turning ints on:\n");
228         state = 0;
229         enable_irq();
230         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
231         assert(irq_is_enabled());
232         printd("Disabling IRQSave Once\n");
233         disable_irqsave(&state);
234         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
235         assert(!irq_is_enabled());
236         printd("Disabling IRQSave Again\n");
237         disable_irqsave(&state);
238         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
239         assert(!irq_is_enabled());
240         printd("Enabling IRQSave Once\n");
241         enable_irqsave(&state);
242         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
243         assert(!irq_is_enabled());
244         printd("Enabling IRQSave Again\n");
245         enable_irqsave(&state);
246         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
247         assert(irq_is_enabled());
248         printd("Done.  Should have been 200, 0, 0, 0, 200 \n");
249
250         state = 0;
251         disable_irq();
252         printd("Ints are off, enabling then disabling.\n");
253         enable_irqsave(&state);
254         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
255         assert(irq_is_enabled());
256         disable_irqsave(&state);
257         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
258         assert(!irq_is_enabled());
259         printd("Done.  Should have been 200, 0\n");
260
261         state = 0;
262         enable_irq();
263         printd("Ints are on, enabling then disabling.\n");
264         enable_irqsave(&state);
265         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
266         assert(irq_is_enabled());
267         disable_irqsave(&state);
268         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
269         assert(irq_is_enabled());
270         printd("Done.  Should have been 200, 200\n");
271
272         state = 0;
273         disable_irq();
274         printd("Ints are off, disabling then enabling.\n");
275         disable_irqsave(&state);
276         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
277         assert(!irq_is_enabled());
278         enable_irqsave(&state);
279         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
280         assert(!irq_is_enabled());
281         printd("Done.  Should have been 0, 0\n");
282
283         state = 0;
284         enable_irq();
285         printd("Ints are on, disabling then enabling.\n");
286         disable_irqsave(&state);
287         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
288         assert(!irq_is_enabled());
289         enable_irqsave(&state);
290         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
291         assert(irq_is_enabled());
292         printd("Done.  Should have been 0, 200\n");
293
294         disable_irq();
295         cprintf("Passed enable_irqsave tests\n");
296 }
297
298 void test_bitmasks(void)
299 {
300 #define masksize 67
301         DECL_BITMASK(mask, masksize);
302         printk("size of mask %d\n", sizeof(mask));
303         CLR_BITMASK(mask, masksize);
304         PRINT_BITMASK(mask, masksize);
305         printk("cleared\n");
306         SET_BITMASK_BIT(mask, 0);
307         SET_BITMASK_BIT(mask, 11);
308         SET_BITMASK_BIT(mask, 17);
309         SET_BITMASK_BIT(mask, masksize-1);
310         printk("bits set\n");
311         PRINT_BITMASK(mask, masksize);
312         DECL_BITMASK(mask2, masksize);
313         COPY_BITMASK(mask2, mask, masksize);
314         printk("copy of original mask, should be the same as the prev\n");
315         PRINT_BITMASK(mask2, masksize);
316         CLR_BITMASK_BIT(mask, 11);
317         printk("11 cleared\n");
318         PRINT_BITMASK(mask, masksize);
319         printk("bit 17 is %d (should be 1)\n", GET_BITMASK_BIT(mask, 17));
320         printk("bit 11 is %d (should be 0)\n", GET_BITMASK_BIT(mask, 11));
321         FILL_BITMASK(mask, masksize);
322         PRINT_BITMASK(mask, masksize);
323         printk("should be all 1's, except for a few at the end\n");
324         printk("Is Clear?: %d (should be 0)\n", BITMASK_IS_CLEAR(mask,masksize));
325         CLR_BITMASK(mask, masksize);
326         PRINT_BITMASK(mask, masksize);
327         printk("Is Clear?: %d (should be 1)\n", BITMASK_IS_CLEAR(mask,masksize));
328         printk("should be cleared\n");
329 }
330
331 checklist_t *RO the_global_list;
332
333 void test_checklist_handler(trapframe_t *tf, void* data)
334 {
335         udelay(1000000);
336         cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
337         down_checklist(the_global_list);
338 }
339
340 void test_checklists(void)
341 {
342         INIT_CHECKLIST(a_list, MAX_NUM_CPUS);
343         the_global_list = &a_list;
344         printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
345         printk("mask\n");
346         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
347         SET_BITMASK_BIT(a_list.mask.bits, 11);
348         printk("Set bit 11\n");
349         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
350
351         CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
352         INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CPUS);
353         FILL_BITMASK(a_mask.bits, num_cpus);
354         //CLR_BITMASK_BIT(a_mask.bits, core_id());
355         //SET_BITMASK_BIT(a_mask.bits, 1);
356         //printk("New mask (1, 17, 25):\n");
357         printk("Created new mask, filled up to num_cpus\n");
358         PRINT_BITMASK(a_mask.bits, a_mask.size);
359         printk("committing new mask\n");
360         commit_checklist_wait(&a_list, &a_mask);
361         printk("Old mask (copied onto):\n");
362         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
363         //smp_call_function_single(1, test_checklist_handler, 0, 0);
364
365         smp_call_function_all(test_checklist_handler, NULL, 0);
366
367         printk("Waiting on checklist\n");
368         waiton_checklist(&a_list);
369         printk("Done Waiting!\n");
370
371 }
372
373 atomic_t a, b, c;
374
375 #ifdef __IVY__
376 void test_incrementer_handler(trapframe_t *tf, atomic_t *data)
377 #else
378 void test_incrementer_handler(trapframe_t *tf, void *data)
379 #endif
380 {
381         assert(data);
382         atomic_inc(data);
383 }
384
385 void test_null_handler(trapframe_t *tf, void* data)
386 {
387         asm volatile("nop");
388 }
389
390 void test_smp_call_functions(void)
391 {
392         int i;
393         atomic_init(&a, 0);
394         atomic_init(&b, 0);
395         atomic_init(&c, 0);
396         handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0, *waiter3 = 0,
397                           *waiter4 = 0, *waiter5 = 0;
398         uint8_t me = core_id();
399         printk("\nCore %d: SMP Call Self (nowait):\n", me);
400         printk("---------------------\n");
401         smp_call_function_self(test_hello_world_handler, NULL, 0);
402         printk("\nCore %d: SMP Call Self (wait):\n", me);
403         printk("---------------------\n");
404         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
405         smp_call_wait(waiter0);
406         printk("\nCore %d: SMP Call All (nowait):\n", me);
407         printk("---------------------\n");
408         smp_call_function_all(test_hello_world_handler, NULL, 0);
409         printk("\nCore %d: SMP Call All (wait):\n", me);
410         printk("---------------------\n");
411         smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
412         smp_call_wait(waiter0);
413         printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n", me);
414         printk("---------------------\n");
415         for(i = 1; i < num_cpus; i++)
416                 smp_call_function_single(i, test_hello_world_handler, NULL, 0);
417         printk("\nCore %d: SMP Call Self (wait):\n", me);
418         printk("---------------------\n");
419         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
420         smp_call_wait(waiter0);
421         printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n", me);
422         printk("---------------------\n");
423         for(i = 1; i < num_cpus; i++)
424         {
425                 smp_call_function_single(i, test_hello_world_handler, NULL, &waiter0);
426                 smp_call_wait(waiter0);
427         }
428         printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
429         printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
430         smp_call_function_all(test_incrementer_handler, &a, 0);
431         smp_call_function_all(test_incrementer_handler, &b, 0);
432         smp_call_function_all(test_incrementer_handler, &c, 0);
433         // if i can clobber a previous IPI, the interleaving might do it
434         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
435         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
436         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
437         smp_call_function_single(4 % num_cpus, test_incrementer_handler, &a, 0);
438         smp_call_function_single(5 % num_cpus, test_incrementer_handler, &b, 0);
439         smp_call_function_single(6 % num_cpus, test_incrementer_handler, &c, 0);
440         smp_call_function_all(test_incrementer_handler, &a, 0);
441         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
442         smp_call_function_all(test_incrementer_handler, &b, 0);
443         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
444         smp_call_function_all(test_incrementer_handler, &c, 0);
445         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
446         // wait, so we're sure the others finish before printing.
447         // without this, we could (and did) get 19,18,19, since the B_inc
448         // handler didn't finish yet
449         smp_call_function_self(test_null_handler, NULL, &waiter0);
450         // need to grab all 5 handlers (max), since the code moves to the next free.
451         smp_call_function_self(test_null_handler, NULL, &waiter1);
452         smp_call_function_self(test_null_handler, NULL, &waiter2);
453         smp_call_function_self(test_null_handler, NULL, &waiter3);
454         smp_call_function_self(test_null_handler, NULL, &waiter4);
455         smp_call_wait(waiter0);
456         smp_call_wait(waiter1);
457         smp_call_wait(waiter2);
458         smp_call_wait(waiter3);
459         smp_call_wait(waiter4);
460         printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
461         printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
462         smp_call_function_self(test_null_handler, NULL, &waiter0);
463         printk("Sent one\n");
464         smp_call_function_self(test_null_handler, NULL, &waiter1);
465         printk("Sent two\n");
466         smp_call_wait(waiter0);
467         printk("Wait one\n");
468         smp_call_wait(waiter1);
469         printk("Wait two\n");
470         printk("\tMade it through!\n");
471         printk("Attempting to deadlock by smp_calling more than are available:\n");
472         printk("\tShould see an Insufficient message and a kernel warning.\n");
473         if (smp_call_function_self(test_null_handler, NULL, &waiter0))
474                 printk("\tInsufficient handlers to call function (0)\n");
475         if (smp_call_function_self(test_null_handler, NULL, &waiter1))
476                 printk("\tInsufficient handlers to call function (1)\n");
477         if (smp_call_function_self(test_null_handler, NULL, &waiter2))
478                 printk("\tInsufficient handlers to call function (2)\n");
479         if (smp_call_function_self(test_null_handler, NULL, &waiter3))
480                 printk("\tInsufficient handlers to call function (3)\n");
481         if (smp_call_function_self(test_null_handler, NULL, &waiter4))
482                 printk("\tInsufficient handlers to call function (4)\n");
483         if (smp_call_function_self(test_null_handler, NULL, &waiter5))
484                 printk("\tInsufficient handlers to call function (5)\n");
485         smp_call_wait(waiter0);
486         smp_call_wait(waiter1);
487         smp_call_wait(waiter2);
488         smp_call_wait(waiter3);
489         smp_call_wait(waiter4);
490         smp_call_wait(waiter5);
491         printk("\tMade it through!\n");
492
493         printk("Done\n");
494 }
495
496 #ifdef __i386__
497 void test_lapic_status_bit(void)
498 {
499         register_interrupt_handler(interrupt_handlers, I_TESTING,
500                                    test_incrementer_handler, &a);
501         #define NUM_IPI 100000
502         atomic_set(&a,0);
503         printk("IPIs received (should be 0): %d\n", a);
504         for(int i = 0; i < NUM_IPI; i++) {
505                 send_ipi(7, 0, I_TESTING);
506                 lapic_wait_to_send();
507         }
508         // need to wait a bit to let those IPIs get there
509         udelay(5000000);
510         printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
511         // hopefully that handler never fires again.  leaving it registered for now.
512 }
513 #endif // __i386__
514
515 /******************************************************************************/
516 /*            Test Measurements: Couples with measurement.c                   */
517 // All user processes can R/W the UGDATA page
518 barrier_t*COUNT(1) bar = (barrier_t*COUNT(1))TC(UGDATA);
519 uint32_t*COUNT(1) job_to_run = (uint32_t*COUNT(1))TC(UGDATA + sizeof(barrier_t));
520 env_t* env_batch[64]; // Fairly arbitrary, just the max I plan to use.
521
522 /* Helpers for test_run_measurements */
523 static void wait_for_all_envs_to_die(void)
524 {
525         while (atomic_read(&num_envs))
526                 cpu_relax();
527 }
528
529 // this never returns.
530 static void sync_tests(int start_core, int num_threads, int job_num)
531 {
532         assert(start_core + num_threads <= num_cpus);
533         wait_for_all_envs_to_die();
534         for (int i = start_core; i < start_core + num_threads; i++)
535                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
536         lcr3(env_batch[start_core]->env_cr3);
537         init_barrier(bar, num_threads);
538         *job_to_run = job_num;
539         for (int i = start_core; i < start_core + num_threads; i++)
540                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
541         process_workqueue();
542         // we want to fake a run, to reenter manager for the next case
543         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
544         smp_call_function_single(0, run_env_handler, env, 0);
545         process_workqueue();
546         panic("whoops!\n");
547 }
548
549 static void async_tests(int start_core, int num_threads, int job_num)
550 {
551         int count;
552
553         assert(start_core + num_threads <= num_cpus);
554         wait_for_all_envs_to_die();
555         for (int i = start_core; i < start_core + num_threads; i++)
556                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
557         printk("async_tests: checkpoint 0\n");
558         lcr3(env_batch[start_core]->env_cr3);
559         init_barrier(bar, num_threads);
560         printk("async_tests: checkpoint 1\n");
561         *job_to_run = job_num;
562         for (int i = start_core; i < start_core + num_threads; i++)
563                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
564         count = 0;
565         while (count > -num_threads) {
566                 count = 0;
567                 for (int i = start_core; i < start_core + num_threads; i++) {
568                         count += process_generic_syscalls(env_batch[i], 1);
569                 }
570                 cpu_relax();
571         }
572         // we want to fake a run, to reenter manager for the next case
573         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
574         smp_call_function_single(0, run_env_handler, env, 0);
575         process_workqueue();
576         // this all never returns
577         panic("whoops!\n");
578 }
579
580 void test_run_measurements(uint32_t job_num)
581 {
582         switch (job_num) {
583                 case 0: // Nulls
584                         printk("Case 0:\n");
585                         async_tests(2, 1, job_num);  // start core 2, 1 core total
586                         break;
587                 case 1: // Sync
588                         printk("Case 1:\n");
589                         sync_tests(2, 1, job_num);
590                         break;
591                 case 2:
592                         printk("Case 2:\n");
593                         sync_tests(2, 2, job_num);
594                         break;
595                 case 3:
596                         printk("Case 3:\n");
597                         sync_tests(0, 3, job_num);
598                         break;
599                 case 4:
600                         printk("Case 4:\n");
601                         sync_tests(0, 4, job_num);
602                         break;
603                 case 5:
604                         printk("Case 5:\n");
605                         sync_tests(0, 5, job_num);
606                         break;
607                 case 6:
608                         printk("Case 6:\n");
609                         sync_tests(0, 6, job_num);
610                         break;
611                 case 7:
612                         printk("Case 7:\n");
613                         sync_tests(0, 7, job_num);
614                         break;
615                 case 8:
616                         printk("Case 8:\n");
617                         sync_tests(0, 8, job_num);
618                         break;
619                 case 9:
620                         printk("Case 9:\n");
621                         async_tests(2, 1, job_num);
622                         break;
623                 case 10:
624                         printk("Case 10:\n");
625                         async_tests(2, 2, job_num);
626                         break;
627                 case 11:
628                         printk("Case 11:\n");
629                         async_tests(2, 3, job_num);
630                         break;
631                 case 12:
632                         printk("Case 12:\n");
633                         async_tests(2, 4, job_num);
634                         break;
635                 case 13:
636                         printk("Case 13:\n");
637                         async_tests(2, 5, job_num);
638                         break;
639                 case 14:
640                         printk("Case 14:\n");
641                         async_tests(2, 6, job_num);
642                         break;
643                 default:
644                         warn("Invalid test number!!");
645         }
646         panic("Error in test setup!!");
647 }
648
649 /************************************************************/
650 /* ISR Handler Functions */
651
652 void test_hello_world_handler(trapframe_t *tf, void* data)
653 {
654         int trapno;
655         #if defined(__i386__)
656         trapno = tf->tf_trapno;
657         #elif defined(__sparc_v8__)
658         trapno = (tf->tbr >> 4) & 0xFF;
659         #else
660         trapno = 0;
661         #endif
662
663         cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
664                 trapno, core_id(), tf);
665 }
666
667 spinlock_t print_info_lock = SPINLOCK_INITIALIZER;
668
669 void test_print_info_handler(trapframe_t *tf, void* data)
670 {
671         spin_lock_irqsave(&print_info_lock);
672         cprintf("----------------------------\n");
673         cprintf("This is Core %d\n", core_id());
674 #ifdef __i386__
675         cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
676         cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
677                 read_msr(0x200), read_msr(0x201));
678         cprintf("MTRR Phys1 Base = 0x%016llx, Mask = 0x%016llx\n",
679                 read_msr(0x202), read_msr(0x203));
680         cprintf("MTRR Phys2 Base = 0x%016llx, Mask = 0x%016llx\n",
681                 read_msr(0x204), read_msr(0x205));
682         cprintf("MTRR Phys3 Base = 0x%016llx, Mask = 0x%016llx\n",
683                 read_msr(0x206), read_msr(0x207));
684         cprintf("MTRR Phys4 Base = 0x%016llx, Mask = 0x%016llx\n",
685                 read_msr(0x208), read_msr(0x209));
686         cprintf("MTRR Phys5 Base = 0x%016llx, Mask = 0x%016llx\n",
687                 read_msr(0x20a), read_msr(0x20b));
688         cprintf("MTRR Phys6 Base = 0x%016llx, Mask = 0x%016llx\n",
689                 read_msr(0x20c), read_msr(0x20d));
690         cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
691                 read_msr(0x20e), read_msr(0x20f));
692 #endif // __i386__
693         cprintf("----------------------------\n");
694         spin_unlock_irqsave(&print_info_lock);
695 }
696
697 void test_barrier_handler(trapframe_t *tf, void* data)
698 {
699         cprintf("Round 1: Core %d\n", core_id());
700         waiton_barrier(&test_cpu_array);
701         waiton_barrier(&test_cpu_array);
702         waiton_barrier(&test_cpu_array);
703         waiton_barrier(&test_cpu_array);
704         waiton_barrier(&test_cpu_array);
705         waiton_barrier(&test_cpu_array);
706         cprintf("Round 2: Core %d\n", core_id());
707         waiton_barrier(&test_cpu_array);
708         cprintf("Round 3: Core %d\n", core_id());
709         // uncomment to see it fucked up
710         //cprintf("Round 4: Core %d\n", core_id());
711 }
712
713 #ifdef __IVY__
714 static void test_waiting_handler(trapframe_t *tf, atomic_t *data)
715 #else
716 static void test_waiting_handler(trapframe_t *tf, void *data)
717 #endif
718 {
719         atomic_dec(data);
720 }
721
722 #ifdef __i386__
723 void test_pit(void)
724 {
725         cprintf("Starting test for PIT now (10s)\n");
726         udelay_pit(10000000);
727         cprintf("End now\n");
728         cprintf("Starting test for TSC (if stable) now (10s)\n");
729         udelay(10000000);
730         cprintf("End now\n");
731
732         cprintf("Starting test for LAPIC (if stable) now (10s)\n");
733         enable_irq();
734         lapic_set_timer(10000000, FALSE);
735
736         atomic_t waiting;
737         atomic_init(&waiting, 1);
738         register_interrupt_handler(interrupt_handlers, I_TESTING,
739                                    test_waiting_handler, &waiting);
740         while(atomic_read(&waiting))
741                 cpu_relax();
742         cprintf("End now\n");
743 }
744
745 void test_circ_buffer(void)
746 {
747         int arr[5] = {0, 1, 2, 3, 4};
748
749         for (int i = 0; i < 5; i++) {
750                 FOR_CIRC_BUFFER(i, 5, j)
751                         printk("Starting with current = %d, each value = %d\n", i, j);
752         }
753         return;
754 }
755
756 #ifdef __IVY__
757 void test_am_handler(trapframe_t* tf, uint32_t srcid, uint32_t a0, uint32_t a1,
758                      uint32_t a2)
759 #else
760 void test_am_handler(trapframe_t* tf, uint32_t srcid, void * a0, void * a1,
761                      void * a2)
762 #endif
763 {
764         printk("Received AM on core %d from core %d: arg0= 0x%08x, arg1 = "
765                "0x%08x, arg2 = 0x%08x\n", core_id(), srcid, a0, a1, a2);
766         return;
767 }
768
769 void test_active_messages(void)
770 {
771         // basic tests, make sure we can handle a wraparound and that the error
772         // messages work.
773         printk("sending NUM_ACTIVE_MESSAGES to core 1, sending (#,deadbeef,0)\n");
774         for (int i = 0; i < NUM_ACTIVE_MESSAGES; i++)
775 #ifdef __IVY__
776                 while (send_active_message(1, test_am_handler, i, 0xdeadbeef, 0))
777                         cpu_relax();
778 #else
779                 while (send_active_message(1, test_am_handler, (void *)i,
780                                            (void *)0xdeadbeef, (void *)0))
781                         cpu_relax();
782 #endif
783         udelay(5000000);
784         printk("sending 2*NUM_ACTIVE_MESSAGES to core 1, sending (#,cafebabe,0)\n");
785         for (int i = 0; i < 2*NUM_ACTIVE_MESSAGES; i++)
786 #ifdef __IVY__
787                 while (send_active_message(1, test_am_handler, i, 0xdeadbeef, 0))
788                         cpu_relax();
789 #else
790                 while (send_active_message(1, test_am_handler, (void *)i,
791                                            (void *)0xdeadbeef, (void *)0))
792                         cpu_relax();
793 #endif
794         udelay(5000000);
795         return;
796 }
797 #endif // __i386__
798
799 static void test_single_cache(int iters, size_t size, int align, int flags,
800                               void (*ctor)(void *, size_t),
801                               void (*dtor)(void *, size_t))
802 {
803         struct kmem_cache *test_cache;
804         void *objects[iters];
805         test_cache = kmem_cache_create("test_cache", size, align, flags, ctor, dtor);
806         printk("Testing Kmem Cache:\n");
807         print_kmem_cache(test_cache);
808         for (int i = 0; i < iters; i++) {
809                 objects[i] = kmem_cache_alloc(test_cache, 0);
810                 printk("Buffer %d addr = %p\n", i, objects[i]);
811         }
812         for (int i = 0; i < iters; i++) {
813                 kmem_cache_free(test_cache, objects[i]);
814         }
815         kmem_cache_destroy(test_cache);
816         printk("\n\n\n\n");
817 }
818
819 void test_slab(void)
820 {
821         void a_ctor(void *buf, size_t size)
822         {
823                 printk("constructin tests\n");
824         }
825         void a_dtor(void *buf, size_t size)
826         {
827                 printk("destructin tests\n");
828         }
829         test_single_cache(10, 128, 512, 0, 0, 0);
830         test_single_cache(10, 128, 4, 0, a_ctor, a_dtor);
831         test_single_cache(10, 1024, 16, 0, 0, 0);
832 }
833
834 void test_kmalloc(void)
835 {
836         printk("Testing Kmalloc\n");
837         void *bufs[NUM_KMALLOC_CACHES + 1];     
838         size_t size;
839         for (int i = 0; i < NUM_KMALLOC_CACHES + 1; i++){
840                 size = (KMALLOC_SMALLEST << i) - KMALLOC_OFFSET;
841                 bufs[i] = kmalloc(size, 0);
842                 printk("Size %d, Addr = %p\n", size, bufs[i]);
843         }
844         for (int i = 0; i < NUM_KMALLOC_CACHES; i++) {
845                 printk("Freeing buffer %d\n", i);
846                 kfree(bufs[i]);
847         }
848         printk("Testing a large kmalloc\n");
849         size = (KMALLOC_LARGEST << 2);
850         bufs[0] = kmalloc(size, 0);
851         printk("Size %d, Addr = %p\n", size, bufs[0]);
852         kfree(bufs[0]);
853 }
854