Bare support for dispatching parallel processes
[akaros.git] / kern / src / testing.c
1
2 #ifdef __SHARC__
3 #pragma nosharc
4 #endif
5
6 #include <arch/mmu.h>
7 #include <arch/arch.h>
8 #include <smp.h>
9
10 #include <ros/memlayout.h>
11
12 #include <atomic.h>
13 #include <stdio.h>
14 #include <assert.h>
15 #include <string.h>
16 #include <testing.h>
17 #include <trap.h>
18 #include <arch/trap.h>
19 #include <process.h>
20 #include <syscall.h>
21 #include <timing.h>
22 #include <kfs.h>
23 #include <multiboot.h>
24 #include <pmap.h>
25 #include <page_alloc.h>
26
27 #ifdef __i386__
28
29 void test_ipi_sending(void)
30 {
31         extern handler_t (COUNT(NUM_INTERRUPT_HANDLERS) interrupt_handlers)[];
32         int8_t state = 0;
33
34         register_interrupt_handler(interrupt_handlers, I_TESTING,
35                                    test_hello_world_handler, NULL);
36         enable_irqsave(&state);
37         cprintf("\nCORE 0 sending broadcast\n");
38         send_broadcast_ipi(I_TESTING);
39         udelay(3000000);
40         cprintf("\nCORE 0 sending all others\n");
41         send_all_others_ipi(I_TESTING);
42         udelay(3000000);
43         cprintf("\nCORE 0 sending self\n");
44         send_self_ipi(I_TESTING);
45         udelay(3000000);
46         cprintf("\nCORE 0 sending ipi to physical 1\n");
47         send_ipi(0x01, 0, I_TESTING);
48         udelay(3000000);
49         cprintf("\nCORE 0 sending ipi to physical 2\n");
50         send_ipi(0x02, 0, I_TESTING);
51         udelay(3000000);
52         cprintf("\nCORE 0 sending ipi to physical 3\n");
53         send_ipi(0x03, 0, I_TESTING);
54         udelay(3000000);
55         cprintf("\nCORE 0 sending ipi to physical 15\n");
56         send_ipi(0x0f, 0, I_TESTING);
57         udelay(3000000);
58         cprintf("\nCORE 0 sending ipi to logical 2\n");
59         send_ipi(0x02, 1, I_TESTING);
60         udelay(3000000);
61         cprintf("\nCORE 0 sending ipi to logical 1\n");
62         send_ipi(0x01, 1, I_TESTING);
63         udelay(3000000);
64         cprintf("\nDone!\n");
65         disable_irqsave(&state);
66 }
67
68 // Note this never returns and will muck with any other timer work
69 void test_pic_reception(void)
70 {
71         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
72         pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
73         pic_unmask_irq(0);
74         cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
75         cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
76         unmask_lapic_lvt(LAPIC_LVT_LINT0);
77         cprintf("Core %d's LINT0: 0x%08x\n", core_id(), read_mmreg32(LAPIC_LVT_LINT0));
78         enable_irq();
79         while(1);
80 }
81
82 #endif // __i386__
83
84 void test_print_info(void)
85 {
86         cprintf("\nCORE 0 asking all cores to print info:\n");
87         smp_call_function_all(test_print_info_handler, NULL, 0);
88         cprintf("\nDone!\n");
89 }
90
91 void test_page_coloring(void) 
92 {
93         //Print the different cache properties of our machine
94         print_cache_properties("L1", &l1);
95         cprintf("\n");
96         print_cache_properties("L2", &l2);
97         cprintf("\n");
98         print_cache_properties("L3", &l3);
99         cprintf("\n");
100
101         //Print some stats about our memory
102         cprintf("Max Address: %llu\n", MAX_VADDR);
103         cprintf("Num Pages: %u\n", npages);
104
105         //Declare a local variable for allocating pages 
106         page_t* page;
107
108         //Run through and allocate all pages through l1_page_alloc
109         cprintf("Allocating from L1 page colors:\n");
110         for(int i=0; i<get_cache_num_page_colors(&l1); i++) {
111                 cprintf("  COLOR %d:\n", i);
112                 while(l1_page_alloc(&page, i) != -ENOMEM)
113                         cprintf("    Page: %d\n", page2ppn(page));
114         }
115
116         //Put all the pages back by reinitializing
117         page_init();
118         
119         //Run through and allocate all pages through l2_page_alloc
120         cprintf("Allocating from L2 page colors:\n");
121         for(int i=0; i<get_cache_num_page_colors(&l2); i++) {
122                 cprintf("  COLOR %d:\n", i);
123                 while(l2_page_alloc(&page, i) != -ENOMEM)
124                         cprintf("    Page: %d\n", page2ppn(page));
125         }
126
127         //Put all the pages back by reinitializing
128         page_init();
129         
130         //Run through and allocate all pages through l3_page_alloc
131         cprintf("Allocating from L3 page colors:\n");
132         for(int i=0; i<get_cache_num_page_colors(&l3); i++) {
133                 cprintf("  COLOR %d:\n", i);
134                 while(l3_page_alloc(&page, i) != -ENOMEM)
135                         cprintf("    Page: %d\n", page2ppn(page));
136         }
137         
138         //Put all the pages back by reinitializing
139         page_init();
140         
141         //Run through and allocate all pages through page_alloc
142         cprintf("Allocating from global allocator:\n");
143         while(page_alloc(&page) != -ENOMEM)
144                 cprintf("    Page: %d\n", page2ppn(page));
145         
146         if(l2_page_alloc(&page, 0) != -ENOMEM)
147                 cprintf("Should not get here, all pages should already be gone!\n");
148         cprintf("All pages gone for sure...\n");
149         
150         //Now lets put a few pages back using page_free..
151         cprintf("Reinserting pages via page_free and reallocating them...\n");
152         page_free(&pages[0]);
153         page_free(&pages[15]);
154         page_free(&pages[7]);
155         page_free(&pages[6]);
156         page_free(&pages[4]);
157
158         while(page_alloc(&page) != -ENOMEM)
159                 cprintf("Page: %d\n", page2ppn(page));  
160 }
161
162 extern uint8_t num_cpus;
163 barrier_t test_cpu_array;
164
165 void test_barrier(void)
166 {
167         cprintf("Core 0 initializing barrier\n");
168         init_barrier(&test_cpu_array, num_cpus);
169         cprintf("Core 0 asking all cores to print ids, barrier, rinse, repeat\n");
170         smp_call_function_all(test_barrier_handler, NULL, 0);
171 }
172
173 void test_interrupts_irqsave(void)
174 {
175         int8_t state = 0;
176         printd("Testing Nesting Enabling first, turning ints off:\n");
177         disable_irq();
178         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
179         assert(!irq_is_enabled());
180         printd("Enabling IRQSave\n");
181         enable_irqsave(&state);
182         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
183         assert(irq_is_enabled());
184         printd("Enabling IRQSave Again\n");
185         enable_irqsave(&state);
186         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
187         assert(irq_is_enabled());
188         printd("Disabling IRQSave Once\n");
189         disable_irqsave(&state);
190         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
191         assert(irq_is_enabled());
192         printd("Disabling IRQSave Again\n");
193         disable_irqsave(&state);
194         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
195         assert(!irq_is_enabled());
196         printd("Done.  Should have been 0, 200, 200, 200, 0\n");
197
198         printd("Testing Nesting Disabling first, turning ints on:\n");
199         state = 0;
200         enable_irq();
201         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
202         assert(irq_is_enabled());
203         printd("Disabling IRQSave Once\n");
204         disable_irqsave(&state);
205         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
206         assert(!irq_is_enabled());
207         printd("Disabling IRQSave Again\n");
208         disable_irqsave(&state);
209         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
210         assert(!irq_is_enabled());
211         printd("Enabling IRQSave Once\n");
212         enable_irqsave(&state);
213         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
214         assert(!irq_is_enabled());
215         printd("Enabling IRQSave Again\n");
216         enable_irqsave(&state);
217         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
218         assert(irq_is_enabled());
219         printd("Done.  Should have been 200, 0, 0, 0, 200 \n");
220
221         state = 0;
222         disable_irq();
223         printd("Ints are off, enabling then disabling.\n");
224         enable_irqsave(&state);
225         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
226         assert(irq_is_enabled());
227         disable_irqsave(&state);
228         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
229         assert(!irq_is_enabled());
230         printd("Done.  Should have been 200, 0\n");
231
232         state = 0;
233         enable_irq();
234         printd("Ints are on, enabling then disabling.\n");
235         enable_irqsave(&state);
236         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
237         assert(irq_is_enabled());
238         disable_irqsave(&state);
239         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
240         assert(irq_is_enabled());
241         printd("Done.  Should have been 200, 200\n");
242
243         state = 0;
244         disable_irq();
245         printd("Ints are off, disabling then enabling.\n");
246         disable_irqsave(&state);
247         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
248         assert(!irq_is_enabled());
249         enable_irqsave(&state);
250         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
251         assert(!irq_is_enabled());
252         printd("Done.  Should have been 0, 0\n");
253
254         state = 0;
255         enable_irq();
256         printd("Ints are on, disabling then enabling.\n");
257         disable_irqsave(&state);
258         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
259         assert(!irq_is_enabled());
260         enable_irqsave(&state);
261         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
262         assert(irq_is_enabled());
263         printd("Done.  Should have been 0, 200\n");
264
265         disable_irq();
266         cprintf("Passed enable_irqsave tests\n");
267 }
268
269 void test_bitmasks(void)
270 {
271 #define masksize 67
272         DECL_BITMASK(mask, masksize);
273         printk("size of mask %d\n", sizeof(mask));
274         CLR_BITMASK(mask, masksize);
275         PRINT_BITMASK(mask, masksize);
276         printk("cleared\n");
277         SET_BITMASK_BIT(mask, 0);
278         SET_BITMASK_BIT(mask, 11);
279         SET_BITMASK_BIT(mask, 17);
280         SET_BITMASK_BIT(mask, masksize-1);
281         printk("bits set\n");
282         PRINT_BITMASK(mask, masksize);
283         DECL_BITMASK(mask2, masksize);
284         COPY_BITMASK(mask2, mask, masksize);
285         printk("copy of original mask, should be the same as the prev\n");
286         PRINT_BITMASK(mask2, masksize);
287         CLR_BITMASK_BIT(mask, 11);
288         printk("11 cleared\n");
289         PRINT_BITMASK(mask, masksize);
290         printk("bit 17 is %d (should be 1)\n", GET_BITMASK_BIT(mask, 17));
291         printk("bit 11 is %d (should be 0)\n", GET_BITMASK_BIT(mask, 11));
292         FILL_BITMASK(mask, masksize);
293         PRINT_BITMASK(mask, masksize);
294         printk("should be all 1's, except for a few at the end\n");
295         printk("Is Clear?: %d (should be 0)\n", BITMASK_IS_CLEAR(mask,masksize));
296         CLR_BITMASK(mask, masksize);
297         PRINT_BITMASK(mask, masksize);
298         printk("Is Clear?: %d (should be 1)\n", BITMASK_IS_CLEAR(mask,masksize));
299         printk("should be cleared\n");
300 }
301
302 checklist_t* the_global_list;
303
304 void test_checklist_handler(trapframe_t *tf, void* data)
305 {
306         udelay(1000000);
307         cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
308         down_checklist(the_global_list);
309 }
310
311 extern uint8_t num_cpus;
312
313 void test_checklists(void)
314 {
315         INIT_CHECKLIST(a_list, MAX_NUM_CPUS);
316         the_global_list = &a_list;
317         printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
318         printk("mask\n");
319         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
320         SET_BITMASK_BIT(a_list.mask.bits, 11);
321         printk("Set bit 11\n");
322         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
323
324         CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
325         INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CPUS);
326         FILL_BITMASK(a_mask.bits, num_cpus);
327         //CLR_BITMASK_BIT(a_mask.bits, core_id());
328         //SET_BITMASK_BIT(a_mask.bits, 1);
329         //printk("New mask (1, 17, 25):\n");
330         printk("Created new mask, filled up to num_cpus\n");
331         PRINT_BITMASK(a_mask.bits, a_mask.size);
332         printk("committing new mask\n");
333         commit_checklist_wait(&a_list, &a_mask);
334         printk("Old mask (copied onto):\n");
335         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
336         //smp_call_function_single(1, test_checklist_handler, 0, 0);
337
338         smp_call_function_all(test_checklist_handler, NULL, 0);
339
340         printk("Waiting on checklist\n");
341         waiton_checklist(&a_list);
342         printk("Done Waiting!\n");
343
344 }
345
346 atomic_t a, b, c;
347
348 void test_incrementer_handler(trapframe_t *tf, atomic_t* data)
349 {
350         assert(data);
351         atomic_inc(data);
352 }
353
354 void test_null_handler(trapframe_t *tf, void* data)
355 {
356         asm volatile("nop");
357 }
358
359 void test_smp_call_functions(void)
360 {
361         int i;
362         atomic_init(&a, 0);
363         atomic_init(&b, 0);
364         atomic_init(&c, 0);
365         handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0, *waiter3 = 0,
366                           *waiter4 = 0, *waiter5 = 0;
367         uint8_t me = core_id();
368         printk("\nCore %d: SMP Call Self (nowait):\n", me);
369         printk("---------------------\n");
370         smp_call_function_self(test_hello_world_handler, NULL, 0);
371         printk("\nCore %d: SMP Call Self (wait):\n", me);
372         printk("---------------------\n");
373         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
374         smp_call_wait(waiter0);
375         printk("\nCore %d: SMP Call All (nowait):\n", me);
376         printk("---------------------\n");
377         smp_call_function_all(test_hello_world_handler, NULL, 0);
378         printk("\nCore %d: SMP Call All (wait):\n", me);
379         printk("---------------------\n");
380         smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
381         smp_call_wait(waiter0);
382         printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n", me);
383         printk("---------------------\n");
384         for(i = 1; i < num_cpus; i++)
385                 smp_call_function_single(i, test_hello_world_handler, NULL, 0);
386         printk("\nCore %d: SMP Call Self (wait):\n", me);
387         printk("---------------------\n");
388         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
389         smp_call_wait(waiter0);
390         printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n", me);
391         printk("---------------------\n");
392         for(i = 1; i < num_cpus; i++)
393         {
394                 smp_call_function_single(i, test_hello_world_handler, NULL, &waiter0);
395                 smp_call_wait(waiter0);
396         }
397         printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
398         printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
399         smp_call_function_all(test_incrementer_handler, &a, 0);
400         smp_call_function_all(test_incrementer_handler, &b, 0);
401         smp_call_function_all(test_incrementer_handler, &c, 0);
402         // if i can clobber a previous IPI, the interleaving might do it
403         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
404         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
405         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
406         smp_call_function_single(4 % num_cpus, test_incrementer_handler, &a, 0);
407         smp_call_function_single(5 % num_cpus, test_incrementer_handler, &b, 0);
408         smp_call_function_single(6 % num_cpus, test_incrementer_handler, &c, 0);
409         smp_call_function_all(test_incrementer_handler, &a, 0);
410         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
411         smp_call_function_all(test_incrementer_handler, &b, 0);
412         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
413         smp_call_function_all(test_incrementer_handler, &c, 0);
414         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
415         // wait, so we're sure the others finish before printing.
416         // without this, we could (and did) get 19,18,19, since the B_inc
417         // handler didn't finish yet
418         smp_call_function_self(test_null_handler, NULL, &waiter0);
419         // need to grab all 5 handlers (max), since the code moves to the next free.
420         smp_call_function_self(test_null_handler, NULL, &waiter1);
421         smp_call_function_self(test_null_handler, NULL, &waiter2);
422         smp_call_function_self(test_null_handler, NULL, &waiter3);
423         smp_call_function_self(test_null_handler, NULL, &waiter4);
424         smp_call_wait(waiter0);
425         smp_call_wait(waiter1);
426         smp_call_wait(waiter2);
427         smp_call_wait(waiter3);
428         smp_call_wait(waiter4);
429         printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
430         printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
431         smp_call_function_self(test_null_handler, NULL, &waiter0);
432         printk("Sent one\n");
433         smp_call_function_self(test_null_handler, NULL, &waiter1);
434         printk("Sent two\n");
435         smp_call_wait(waiter0);
436         printk("Wait one\n");
437         smp_call_wait(waiter1);
438         printk("Wait two\n");
439         printk("\tMade it through!\n");
440         printk("Attempting to deadlock by smp_calling more than are available:\n");
441         printk("\tShould see an Insufficient message and a kernel warning.\n");
442         if (smp_call_function_self(test_null_handler, NULL, &waiter0))
443                 printk("\tInsufficient handlers to call function (0)\n");
444         if (smp_call_function_self(test_null_handler, NULL, &waiter1))
445                 printk("\tInsufficient handlers to call function (1)\n");
446         if (smp_call_function_self(test_null_handler, NULL, &waiter2))
447                 printk("\tInsufficient handlers to call function (2)\n");
448         if (smp_call_function_self(test_null_handler, NULL, &waiter3))
449                 printk("\tInsufficient handlers to call function (3)\n");
450         if (smp_call_function_self(test_null_handler, NULL, &waiter4))
451                 printk("\tInsufficient handlers to call function (4)\n");
452         if (smp_call_function_self(test_null_handler, NULL, &waiter5))
453                 printk("\tInsufficient handlers to call function (5)\n");
454         smp_call_wait(waiter0);
455         smp_call_wait(waiter1);
456         smp_call_wait(waiter2);
457         smp_call_wait(waiter3);
458         smp_call_wait(waiter4);
459         smp_call_wait(waiter5);
460         printk("\tMade it through!\n");
461
462         printk("Done\n");
463 }
464
465 #ifdef __i386__
466 void test_lapic_status_bit(void)
467 {
468         register_interrupt_handler(interrupt_handlers, I_TESTING,
469                                    test_incrementer_handler, &a);
470         #define NUM_IPI 100000
471         atomic_set(&a,0);
472         printk("IPIs received (should be 0): %d\n", a);
473         for(int i = 0; i < NUM_IPI; i++) {
474                 send_ipi(7, 0, I_TESTING);
475                 lapic_wait_to_send();
476         }
477         // need to wait a bit to let those IPIs get there
478         udelay(5000000);
479         printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
480         // hopefully that handler never fires again.  leaving it registered for now.
481 }
482 #endif // __i386__
483
484 /******************************************************************************/
485 /*            Test Measurements: Couples with measurement.c                   */
486 // All user processes can R/W the UGDATA page
487 barrier_t*COUNT(1) bar = (barrier_t*COUNT(1))TC(UGDATA);
488 uint32_t*COUNT(1) job_to_run = (uint32_t*COUNT(1))TC(UGDATA + sizeof(barrier_t));
489 env_t* env_batch[64]; // Fairly arbitrary, just the max I plan to use.
490
491 /* Helpers for test_run_measurements */
492 static void wait_for_all_envs_to_die(void)
493 {
494         while (atomic_read(&num_envs))
495                 cpu_relax();
496 }
497
498 // this never returns.
499 static void sync_tests(int start_core, int num_threads, int job_num)
500 {
501         assert(start_core + num_threads <= num_cpus);
502         wait_for_all_envs_to_die();
503         for (int i = start_core; i < start_core + num_threads; i++)
504                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
505         lcr3(env_batch[start_core]->env_cr3);
506         init_barrier(bar, num_threads);
507         *job_to_run = job_num;
508         for (int i = start_core; i < start_core + num_threads; i++)
509                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
510         process_workqueue();
511         // we want to fake a run, to reenter manager for the next case
512         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
513         smp_call_function_single(0, run_env_handler, env, 0);
514         process_workqueue();
515         panic("whoops!\n");
516 }
517
518 static void async_tests(int start_core, int num_threads, int job_num)
519 {
520         int count;
521
522         assert(start_core + num_threads <= num_cpus);
523         wait_for_all_envs_to_die();
524         for (int i = start_core; i < start_core + num_threads; i++)
525                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
526         printk("async_tests: checkpoint 0\n");
527         lcr3(env_batch[start_core]->env_cr3);
528         init_barrier(bar, num_threads);
529         printk("async_tests: checkpoint 1\n");
530         *job_to_run = job_num;
531         for (int i = start_core; i < start_core + num_threads; i++)
532                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
533         count = 0;
534         while (count > -num_threads) {
535                 count = 0;
536                 for (int i = start_core; i < start_core + num_threads; i++) {
537                         count += process_generic_syscalls(env_batch[i], 1);
538                 }
539                 cpu_relax();
540         }
541         // we want to fake a run, to reenter manager for the next case
542         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
543         smp_call_function_single(0, run_env_handler, env, 0);
544         process_workqueue();
545         // this all never returns
546         panic("whoops!\n");
547 }
548
549 void test_run_measurements(uint32_t job_num)
550 {
551         switch (job_num) {
552                 case 0: // Nulls
553                         printk("Case 0:\n");
554                         async_tests(2, 1, job_num);  // start core 2, 1 core total
555                         break;
556                 case 1: // Sync
557                         printk("Case 1:\n");
558                         sync_tests(2, 1, job_num);
559                         break;
560                 case 2:
561                         printk("Case 2:\n");
562                         sync_tests(2, 2, job_num);
563                         break;
564                 case 3:
565                         printk("Case 3:\n");
566                         sync_tests(0, 3, job_num);
567                         break;
568                 case 4:
569                         printk("Case 4:\n");
570                         sync_tests(0, 4, job_num);
571                         break;
572                 case 5:
573                         printk("Case 5:\n");
574                         sync_tests(0, 5, job_num);
575                         break;
576                 case 6:
577                         printk("Case 6:\n");
578                         sync_tests(0, 6, job_num);
579                         break;
580                 case 7:
581                         printk("Case 7:\n");
582                         sync_tests(0, 7, job_num);
583                         break;
584                 case 8:
585                         printk("Case 8:\n");
586                         sync_tests(0, 8, job_num);
587                         break;
588                 case 9:
589                         printk("Case 9:\n");
590                         async_tests(2, 1, job_num);
591                         break;
592                 case 10:
593                         printk("Case 10:\n");
594                         async_tests(2, 2, job_num);
595                         break;
596                 case 11:
597                         printk("Case 11:\n");
598                         async_tests(2, 3, job_num);
599                         break;
600                 case 12:
601                         printk("Case 12:\n");
602                         async_tests(2, 4, job_num);
603                         break;
604                 case 13:
605                         printk("Case 13:\n");
606                         async_tests(2, 5, job_num);
607                         break;
608                 case 14:
609                         printk("Case 14:\n");
610                         async_tests(2, 6, job_num);
611                         break;
612                 default:
613                         warn("Invalid test number!!");
614         }
615         panic("Error in test setup!!");
616 }
617
618 /************************************************************/
619 /* ISR Handler Functions */
620
621 void test_hello_world_handler(trapframe_t *tf, void* data)
622 {
623         int trapno;
624         #if defined(__i386__)
625         trapno = tf->tf_trapno;
626         #elif defined(__sparc_v8__)
627         trapno = (tf->tbr >> 4) & 0xFF;
628         #else
629         trapno = 0;
630         #endif
631
632         cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
633                 trapno, core_id(), tf);
634 }
635
636 uint32_t print_info_lock = 0;
637
638 void test_print_info_handler(trapframe_t *tf, void* data)
639 {
640         spin_lock_irqsave(&print_info_lock);
641         cprintf("----------------------------\n");
642         cprintf("This is Core %d\n", core_id());
643 #ifdef __i386__
644         cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
645         cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
646                 read_msr(0x200), read_msr(0x201));
647         cprintf("MTRR Phys1 Base = 0x%016llx, Mask = 0x%016llx\n",
648                 read_msr(0x202), read_msr(0x203));
649         cprintf("MTRR Phys2 Base = 0x%016llx, Mask = 0x%016llx\n",
650                 read_msr(0x204), read_msr(0x205));
651         cprintf("MTRR Phys3 Base = 0x%016llx, Mask = 0x%016llx\n",
652                 read_msr(0x206), read_msr(0x207));
653         cprintf("MTRR Phys4 Base = 0x%016llx, Mask = 0x%016llx\n",
654                 read_msr(0x208), read_msr(0x209));
655         cprintf("MTRR Phys5 Base = 0x%016llx, Mask = 0x%016llx\n",
656                 read_msr(0x20a), read_msr(0x20b));
657         cprintf("MTRR Phys6 Base = 0x%016llx, Mask = 0x%016llx\n",
658                 read_msr(0x20c), read_msr(0x20d));
659         cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
660                 read_msr(0x20e), read_msr(0x20f));
661 #endif // __i386__
662         cprintf("----------------------------\n");
663         spin_unlock_irqsave(&print_info_lock);
664 }
665
666 void test_barrier_handler(trapframe_t *tf, void* data)
667 {
668         cprintf("Round 1: Core %d\n", core_id());
669         waiton_barrier(&test_cpu_array);
670         waiton_barrier(&test_cpu_array);
671         waiton_barrier(&test_cpu_array);
672         waiton_barrier(&test_cpu_array);
673         waiton_barrier(&test_cpu_array);
674         waiton_barrier(&test_cpu_array);
675         cprintf("Round 2: Core %d\n", core_id());
676         waiton_barrier(&test_cpu_array);
677         cprintf("Round 3: Core %d\n", core_id());
678         // uncomment to see it fucked up
679         //cprintf("Round 4: Core %d\n", core_id());
680 }
681
682 static void test_waiting_handler(trapframe_t *tf, atomic_t * data)
683 {
684         {HANDLER_ATOMIC atomic_dec(data);}
685 }
686
687 #ifdef __i386__
688 void test_pit(void)
689 {
690         cprintf("Starting test for PIT now (10s)\n");
691         udelay_pit(10000000);
692         cprintf("End now\n");
693         cprintf("Starting test for TSC (if stable) now (10s)\n");
694         udelay(10000000);
695         cprintf("End now\n");
696
697         cprintf("Starting test for LAPIC (if stable) now (10s)\n");
698         enable_irq();
699         lapic_set_timer(10000000, FALSE);
700
701         atomic_t waiting;
702         atomic_init(&waiting, 1);
703         register_interrupt_handler(interrupt_handlers, I_TESTING,
704                                    test_waiting_handler, &waiting);
705         while(atomic_read(&waiting))
706                 cpu_relax();
707         cprintf("End now\n");
708 }
709 #endif // __i386__