Major reworking to integrate cache coloring into the kernel.
[akaros.git] / kern / src / testing.c
1
2 #include <arch/mmu.h>
3 #include <arch/arch.h>
4 #include <smp.h>
5
6 #include <ros/memlayout.h>
7
8 #include <atomic.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <string.h>
12 #include <testing.h>
13 #include <trap.h>
14 #include <process.h>
15 #include <syscall.h>
16 #include <timing.h>
17 #include <kfs.h>
18 #include <multiboot.h>
19 #include <pmap.h>
20 #include <page_alloc.h>
21
22 #define test_vector 0xeb
23
24 #ifdef __i386__
25
26 void test_ipi_sending(void)
27 {
28         extern handler_t (COUNT(NUM_INTERRUPT_HANDLERS) interrupt_handlers)[];
29         int8_t state = 0;
30
31         register_interrupt_handler(interrupt_handlers, test_vector,
32                                    test_hello_world_handler, NULL);
33         enable_irqsave(&state);
34         cprintf("\nCORE 0 sending broadcast\n");
35         send_broadcast_ipi(test_vector);
36         udelay(3000000);
37         cprintf("\nCORE 0 sending all others\n");
38         send_all_others_ipi(test_vector);
39         udelay(3000000);
40         cprintf("\nCORE 0 sending self\n");
41         send_self_ipi(test_vector);
42         udelay(3000000);
43         cprintf("\nCORE 0 sending ipi to physical 1\n");
44         send_ipi(0x01, 0, test_vector);
45         udelay(3000000);
46         cprintf("\nCORE 0 sending ipi to physical 2\n");
47         send_ipi(0x02, 0, test_vector);
48         udelay(3000000);
49         cprintf("\nCORE 0 sending ipi to physical 3\n");
50         send_ipi(0x03, 0, test_vector);
51         udelay(3000000);
52         cprintf("\nCORE 0 sending ipi to physical 15\n");
53         send_ipi(0x0f, 0, test_vector);
54         udelay(3000000);
55         cprintf("\nCORE 0 sending ipi to logical 2\n");
56         send_ipi(0x02, 1, test_vector);
57         udelay(3000000);
58         cprintf("\nCORE 0 sending ipi to logical 1\n");
59         send_ipi(0x01, 1, test_vector);
60         udelay(3000000);
61         cprintf("\nDone!\n");
62         disable_irqsave(&state);
63 }
64
65 // Note this never returns and will muck with any other timer work
66 void test_pic_reception(void)
67 {
68         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
69         pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
70         pic_unmask_irq(0);
71         cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
72         cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
73         unmask_lapic_lvt(LAPIC_LVT_LINT0);
74         cprintf("Core %d's LINT0: 0x%08x\n", core_id(), read_mmreg32(LAPIC_LVT_LINT0));
75         enable_irq();
76         while(1);
77 }
78
79 #endif // __i386__
80
81 void test_print_info(void)
82 {
83         cprintf("\nCORE 0 asking all cores to print info:\n");
84         smp_call_function_all(test_print_info_handler, NULL, 0);
85         cprintf("\nDone!\n");
86 }
87
88 void test_page_coloring(void) 
89 {
90         //Print the different cache properties of our machine
91         print_cache_properties("L1", &l1);
92         cprintf("\n");
93         print_cache_properties("L2", &l2);
94         cprintf("\n");
95         print_cache_properties("L3", &l3);
96         cprintf("\n");
97
98         //Print some stats about our memory
99         cprintf("Max Address: %u\n", MAX_VADDR);
100         cprintf("Num Pages: %u\n", npages);
101
102         //Declare a local variable for allocating pages 
103         page_t* page;
104
105         //Run through and allocate all pages through l1_page_alloc
106         cprintf("Allocating from L1 page colors:\n");
107         for(int i=0; i<get_cache_num_page_colors(&l1); i++) {
108                 cprintf("  COLOR %d:\n", i);
109                 while(l1_page_alloc(&page, i) != -ENOMEM)
110                         cprintf("    Page: %d\n", page2ppn(page));
111         }
112
113         //Put all the pages back by reinitializing
114         page_init();
115         
116         //Run through and allocate all pages through l2_page_alloc
117         cprintf("Allocating from L2 page colors:\n");
118         for(int i=0; i<get_cache_num_page_colors(&l2); i++) {
119                 cprintf("  COLOR %d:\n", i);
120                 while(l2_page_alloc(&page, i) != -ENOMEM)
121                         cprintf("    Page: %d\n", page2ppn(page));
122         }
123
124         //Put all the pages back by reinitializing
125         page_init();
126         
127         //Run through and allocate all pages through l3_page_alloc
128         cprintf("Allocating from L3 page colors:\n");
129         for(int i=0; i<get_cache_num_page_colors(&l3); i++) {
130                 cprintf("  COLOR %d:\n", i);
131                 while(l3_page_alloc(&page, i) != -ENOMEM)
132                         cprintf("    Page: %d\n", page2ppn(page));
133         }
134         
135         //Put all the pages back by reinitializing
136         page_init();
137         
138         //Run through and allocate all pages through page_alloc
139         cprintf("Allocating from global allocator:\n");
140         while(page_alloc(&page) != -ENOMEM)
141                 cprintf("    Page: %d\n", page2ppn(page));
142         
143         if(l2_page_alloc(&page, 0) != -ENOMEM)
144                 cprintf("Should not get here, all pages should already be gone!\n");
145         cprintf("All pages gone for sure...\n");
146         
147         //Now lets put a few pages back using page_free..
148         cprintf("Reinserting pages via page_free and reallocating them...\n");
149         page_free(&pages[0]);
150         page_free(&pages[15]);
151         page_free(&pages[7]);
152         page_free(&pages[6]);
153         page_free(&pages[4]);
154
155         while(page_alloc(&page) != -ENOMEM)
156                 cprintf("Page: %d\n", page2ppn(page));  
157 }
158
159 extern uint8_t num_cpus;
160 barrier_t test_cpu_array;
161
162 void test_barrier(void)
163 {
164         cprintf("Core 0 initializing barrier\n");
165         init_barrier(&test_cpu_array, num_cpus);
166         cprintf("Core 0 asking all cores to print ids, barrier, rinse, repeat\n");
167         smp_call_function_all(test_barrier_handler, NULL, 0);
168 }
169
170 void test_interrupts_irqsave(void)
171 {
172         int8_t state = 0;
173         printd("Testing Nesting Enabling first, turning ints off:\n");
174         disable_irq();
175         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
176         assert(!irq_is_enabled());
177         printd("Enabling IRQSave\n");
178         enable_irqsave(&state);
179         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
180         assert(irq_is_enabled());
181         printd("Enabling IRQSave Again\n");
182         enable_irqsave(&state);
183         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
184         assert(irq_is_enabled());
185         printd("Disabling IRQSave Once\n");
186         disable_irqsave(&state);
187         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
188         assert(irq_is_enabled());
189         printd("Disabling IRQSave Again\n");
190         disable_irqsave(&state);
191         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
192         assert(!irq_is_enabled());
193         printd("Done.  Should have been 0, 200, 200, 200, 0\n");
194
195         printd("Testing Nesting Disabling first, turning ints on:\n");
196         state = 0;
197         enable_irq();
198         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
199         assert(irq_is_enabled());
200         printd("Disabling IRQSave Once\n");
201         disable_irqsave(&state);
202         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
203         assert(!irq_is_enabled());
204         printd("Disabling IRQSave Again\n");
205         disable_irqsave(&state);
206         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
207         assert(!irq_is_enabled());
208         printd("Enabling IRQSave Once\n");
209         enable_irqsave(&state);
210         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
211         assert(!irq_is_enabled());
212         printd("Enabling IRQSave Again\n");
213         enable_irqsave(&state);
214         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
215         assert(irq_is_enabled());
216         printd("Done.  Should have been 200, 0, 0, 0, 200 \n");
217
218         state = 0;
219         disable_irq();
220         printd("Ints are off, enabling then disabling.\n");
221         enable_irqsave(&state);
222         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
223         assert(irq_is_enabled());
224         disable_irqsave(&state);
225         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
226         assert(!irq_is_enabled());
227         printd("Done.  Should have been 200, 0\n");
228
229         state = 0;
230         enable_irq();
231         printd("Ints are on, enabling then disabling.\n");
232         enable_irqsave(&state);
233         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
234         assert(irq_is_enabled());
235         disable_irqsave(&state);
236         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
237         assert(irq_is_enabled());
238         printd("Done.  Should have been 200, 200\n");
239
240         state = 0;
241         disable_irq();
242         printd("Ints are off, disabling then enabling.\n");
243         disable_irqsave(&state);
244         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
245         assert(!irq_is_enabled());
246         enable_irqsave(&state);
247         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
248         assert(!irq_is_enabled());
249         printd("Done.  Should have been 0, 0\n");
250
251         state = 0;
252         enable_irq();
253         printd("Ints are on, disabling then enabling.\n");
254         disable_irqsave(&state);
255         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
256         assert(!irq_is_enabled());
257         enable_irqsave(&state);
258         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
259         assert(irq_is_enabled());
260         printd("Done.  Should have been 0, 200\n");
261
262         disable_irq();
263         cprintf("Passed enable_irqsave tests\n");
264 }
265
266 void test_bitmasks(void)
267 {
268 #define masksize 67
269         DECL_BITMASK(mask, masksize);
270         printk("size of mask %d\n", sizeof(mask));
271         CLR_BITMASK(mask, masksize);
272         PRINT_BITMASK(mask, masksize);
273         printk("cleared\n");
274         SET_BITMASK_BIT(mask, 0);
275         SET_BITMASK_BIT(mask, 11);
276         SET_BITMASK_BIT(mask, 17);
277         SET_BITMASK_BIT(mask, masksize-1);
278         printk("bits set\n");
279         PRINT_BITMASK(mask, masksize);
280         DECL_BITMASK(mask2, masksize);
281         COPY_BITMASK(mask2, mask, masksize);
282         printk("copy of original mask, should be the same as the prev\n");
283         PRINT_BITMASK(mask2, masksize);
284         CLR_BITMASK_BIT(mask, 11);
285         printk("11 cleared\n");
286         PRINT_BITMASK(mask, masksize);
287         printk("bit 17 is %d (should be 1)\n", GET_BITMASK_BIT(mask, 17));
288         printk("bit 11 is %d (should be 0)\n", GET_BITMASK_BIT(mask, 11));
289         FILL_BITMASK(mask, masksize);
290         PRINT_BITMASK(mask, masksize);
291         printk("should be all 1's, except for a few at the end\n");
292         printk("Is Clear?: %d (should be 0)\n", BITMASK_IS_CLEAR(mask,masksize));
293         CLR_BITMASK(mask, masksize);
294         PRINT_BITMASK(mask, masksize);
295         printk("Is Clear?: %d (should be 1)\n", BITMASK_IS_CLEAR(mask,masksize));
296         printk("should be cleared\n");
297 }
298
299 checklist_t* the_global_list;
300
301 void test_checklist_handler(trapframe_t *tf, void* data)
302 {
303         udelay(1000000);
304         cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
305         down_checklist(the_global_list);
306 }
307
308 extern uint8_t num_cpus;
309
310 void test_checklists(void)
311 {
312         INIT_CHECKLIST(a_list, MAX_NUM_CPUS);
313         the_global_list = &a_list;
314         printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
315         printk("mask\n");
316         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
317         SET_BITMASK_BIT(a_list.mask.bits, 11);
318         printk("Set bit 11\n");
319         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
320
321         CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
322         INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CPUS);
323         FILL_BITMASK(a_mask.bits, num_cpus);
324         //CLR_BITMASK_BIT(a_mask.bits, core_id());
325         //SET_BITMASK_BIT(a_mask.bits, 1);
326         //printk("New mask (1, 17, 25):\n");
327         printk("Created new mask, filled up to num_cpus\n");
328         PRINT_BITMASK(a_mask.bits, a_mask.size);
329         printk("committing new mask\n");
330         commit_checklist_wait(&a_list, &a_mask);
331         printk("Old mask (copied onto):\n");
332         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
333         //smp_call_function_single(1, test_checklist_handler, 0, 0);
334
335         smp_call_function_all(test_checklist_handler, NULL, 0);
336
337         printk("Waiting on checklist\n");
338         waiton_checklist(&a_list);
339         printk("Done Waiting!\n");
340
341 }
342
343 atomic_t a, b, c;
344
345 void test_incrementer_handler(trapframe_t *tf, atomic_t* data)
346 {
347         assert(data);
348         atomic_inc(data);
349 }
350
351 void test_null_handler(trapframe_t *tf, void* data)
352 {
353         asm volatile("nop");
354 }
355
356 void test_smp_call_functions(void)
357 {
358         int i;
359         atomic_init(&a, 0);
360         atomic_init(&b, 0);
361         atomic_init(&c, 0);
362         handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0, *waiter3 = 0,
363                           *waiter4 = 0, *waiter5 = 0;
364         uint8_t me = core_id();
365         printk("\nCore %d: SMP Call Self (nowait):\n", me);
366         printk("---------------------\n");
367         smp_call_function_self(test_hello_world_handler, NULL, 0);
368         printk("\nCore %d: SMP Call Self (wait):\n", me);
369         printk("---------------------\n");
370         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
371         smp_call_wait(waiter0);
372         printk("\nCore %d: SMP Call All (nowait):\n", me);
373         printk("---------------------\n");
374         smp_call_function_all(test_hello_world_handler, NULL, 0);
375         printk("\nCore %d: SMP Call All (wait):\n", me);
376         printk("---------------------\n");
377         smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
378         smp_call_wait(waiter0);
379         printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n", me);
380         printk("---------------------\n");
381         for(i = 1; i < num_cpus; i++)
382                 smp_call_function_single(i, test_hello_world_handler, NULL, 0);
383         printk("\nCore %d: SMP Call Self (wait):\n", me);
384         printk("---------------------\n");
385         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
386         smp_call_wait(waiter0);
387         printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n", me);
388         printk("---------------------\n");
389         for(i = 1; i < num_cpus; i++)
390         {
391                 smp_call_function_single(i, test_hello_world_handler, NULL, &waiter0);
392                 smp_call_wait(waiter0);
393         }
394         printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
395         printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
396         smp_call_function_all(test_incrementer_handler, &a, 0);
397         smp_call_function_all(test_incrementer_handler, &b, 0);
398         smp_call_function_all(test_incrementer_handler, &c, 0);
399         // if i can clobber a previous IPI, the interleaving might do it
400         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
401         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
402         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
403         smp_call_function_single(4 % num_cpus, test_incrementer_handler, &a, 0);
404         smp_call_function_single(5 % num_cpus, test_incrementer_handler, &b, 0);
405         smp_call_function_single(6 % num_cpus, test_incrementer_handler, &c, 0);
406         smp_call_function_all(test_incrementer_handler, &a, 0);
407         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
408         smp_call_function_all(test_incrementer_handler, &b, 0);
409         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
410         smp_call_function_all(test_incrementer_handler, &c, 0);
411         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
412         // wait, so we're sure the others finish before printing.
413         // without this, we could (and did) get 19,18,19, since the B_inc
414         // handler didn't finish yet
415         smp_call_function_self(test_null_handler, NULL, &waiter0);
416         // need to grab all 5 handlers (max), since the code moves to the next free.
417         smp_call_function_self(test_null_handler, NULL, &waiter1);
418         smp_call_function_self(test_null_handler, NULL, &waiter2);
419         smp_call_function_self(test_null_handler, NULL, &waiter3);
420         smp_call_function_self(test_null_handler, NULL, &waiter4);
421         smp_call_wait(waiter0);
422         smp_call_wait(waiter1);
423         smp_call_wait(waiter2);
424         smp_call_wait(waiter3);
425         smp_call_wait(waiter4);
426         printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
427         printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
428         smp_call_function_self(test_null_handler, NULL, &waiter0);
429         printk("Sent one\n");
430         smp_call_function_self(test_null_handler, NULL, &waiter1);
431         printk("Sent two\n");
432         smp_call_wait(waiter0);
433         printk("Wait one\n");
434         smp_call_wait(waiter1);
435         printk("Wait two\n");
436         printk("\tMade it through!\n");
437         printk("Attempting to deadlock by smp_calling more than are available:\n");
438         printk("\tShould see an Insufficient message and a kernel warning.\n");
439         if (smp_call_function_self(test_null_handler, NULL, &waiter0))
440                 printk("\tInsufficient handlers to call function (0)\n");
441         if (smp_call_function_self(test_null_handler, NULL, &waiter1))
442                 printk("\tInsufficient handlers to call function (1)\n");
443         if (smp_call_function_self(test_null_handler, NULL, &waiter2))
444                 printk("\tInsufficient handlers to call function (2)\n");
445         if (smp_call_function_self(test_null_handler, NULL, &waiter3))
446                 printk("\tInsufficient handlers to call function (3)\n");
447         if (smp_call_function_self(test_null_handler, NULL, &waiter4))
448                 printk("\tInsufficient handlers to call function (4)\n");
449         if (smp_call_function_self(test_null_handler, NULL, &waiter5))
450                 printk("\tInsufficient handlers to call function (5)\n");
451         smp_call_wait(waiter0);
452         smp_call_wait(waiter1);
453         smp_call_wait(waiter2);
454         smp_call_wait(waiter3);
455         smp_call_wait(waiter4);
456         smp_call_wait(waiter5);
457         printk("\tMade it through!\n");
458
459         printk("Done\n");
460 }
461
462 #ifdef __i386__
463 void test_lapic_status_bit(void)
464 {
465         register_interrupt_handler(interrupt_handlers, test_vector,
466                                    test_incrementer_handler, &a);
467         #define NUM_IPI 100000
468         atomic_set(&a,0);
469         printk("IPIs received (should be 0): %d\n", a);
470         for(int i = 0; i < NUM_IPI; i++) {
471                 send_ipi(7, 0, test_vector);
472                 lapic_wait_to_send();
473         }
474         // need to wait a bit to let those IPIs get there
475         udelay(5000000);
476         printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
477         // hopefully that handler never fires again.  leaving it registered for now.
478 }
479 #endif // __i386__
480
481 /******************************************************************************/
482 /*            Test Measurements: Couples with measurement.c                   */
483 // All user processes can R/W the UGDATA page
484 barrier_t*COUNT(1) bar = (barrier_t*COUNT(1))TC(UGDATA);
485 uint32_t*COUNT(1) job_to_run = (uint32_t*COUNT(1))TC(UGDATA + sizeof(barrier_t));
486 env_t* env_batch[64]; // Fairly arbitrary, just the max I plan to use.
487
488 /* Helpers for test_run_measurements */
489 static void wait_for_all_envs_to_die(void)
490 {
491         while (atomic_read(&num_envs))
492                 cpu_relax();
493 }
494
495 // this never returns.
496 static void sync_tests(int start_core, int num_threads, int job_num)
497 {
498         assert(start_core + num_threads <= num_cpus);
499         wait_for_all_envs_to_die();
500         for (int i = start_core; i < start_core + num_threads; i++)
501                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
502         lcr3(env_batch[start_core]->env_cr3);
503         init_barrier(bar, num_threads);
504         *job_to_run = job_num;
505         for (int i = start_core; i < start_core + num_threads; i++)
506                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
507         process_workqueue();
508         // we want to fake a run, to reenter manager for the next case
509         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
510         smp_call_function_single(0, run_env_handler, env, 0);
511         process_workqueue();
512         panic("whoops!\n");
513 }
514
515 static void async_tests(int start_core, int num_threads, int job_num)
516 {
517         int count;
518
519         assert(start_core + num_threads <= num_cpus);
520         wait_for_all_envs_to_die();
521         for (int i = start_core; i < start_core + num_threads; i++)
522                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
523         printk("async_tests: checkpoint 0\n");
524         lcr3(env_batch[start_core]->env_cr3);
525         init_barrier(bar, num_threads);
526         printk("async_tests: checkpoint 1\n");
527         *job_to_run = job_num;
528         for (int i = start_core; i < start_core + num_threads; i++)
529                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
530         count = 0;
531         while (count > -num_threads) {
532                 count = 0;
533                 for (int i = start_core; i < start_core + num_threads; i++) {
534                         count += process_generic_syscalls(env_batch[i], 1);
535                 }
536                 cpu_relax();
537         }
538         // we want to fake a run, to reenter manager for the next case
539         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
540         smp_call_function_single(0, run_env_handler, env, 0);
541         process_workqueue();
542         // this all never returns
543         panic("whoops!\n");
544 }
545
546 void test_run_measurements(uint32_t job_num)
547 {
548         switch (job_num) {
549                 case 0: // Nulls
550                         printk("Case 0:\n");
551                         async_tests(2, 1, job_num);  // start core 2, 1 core total
552                         break;
553                 case 1: // Sync
554                         printk("Case 1:\n");
555                         sync_tests(2, 1, job_num);
556                         break;
557                 case 2:
558                         printk("Case 2:\n");
559                         sync_tests(2, 2, job_num);
560                         break;
561                 case 3:
562                         printk("Case 3:\n");
563                         sync_tests(0, 3, job_num);
564                         break;
565                 case 4:
566                         printk("Case 4:\n");
567                         sync_tests(0, 4, job_num);
568                         break;
569                 case 5:
570                         printk("Case 5:\n");
571                         sync_tests(0, 5, job_num);
572                         break;
573                 case 6:
574                         printk("Case 6:\n");
575                         sync_tests(0, 6, job_num);
576                         break;
577                 case 7:
578                         printk("Case 7:\n");
579                         sync_tests(0, 7, job_num);
580                         break;
581                 case 8:
582                         printk("Case 8:\n");
583                         sync_tests(0, 8, job_num);
584                         break;
585                 case 9:
586                         printk("Case 9:\n");
587                         async_tests(2, 1, job_num);
588                         break;
589                 case 10:
590                         printk("Case 10:\n");
591                         async_tests(2, 2, job_num);
592                         break;
593                 case 11:
594                         printk("Case 11:\n");
595                         async_tests(2, 3, job_num);
596                         break;
597                 case 12:
598                         printk("Case 12:\n");
599                         async_tests(2, 4, job_num);
600                         break;
601                 case 13:
602                         printk("Case 13:\n");
603                         async_tests(2, 5, job_num);
604                         break;
605                 case 14:
606                         printk("Case 14:\n");
607                         async_tests(2, 6, job_num);
608                         break;
609                 default:
610                         warn("Invalid test number!!");
611         }
612         panic("Error in test setup!!");
613 }
614
615 /************************************************************/
616 /* ISR Handler Functions */
617
618 void test_hello_world_handler(trapframe_t *tf, void* data)
619 {
620         int trapno;
621         #if defined(__i386__)
622         trapno = tf->tf_trapno;
623         #elif defined(__sparc_v8__)
624         trapno = (tf->tbr >> 4) & 0xFF;
625         #else
626         trapno = 0;
627         #endif
628
629         cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
630                 trapno, core_id(), tf);
631 }
632
633 uint32_t print_info_lock = 0;
634
635 void test_print_info_handler(trapframe_t *tf, void* data)
636 {
637         spin_lock_irqsave(&print_info_lock);
638         cprintf("----------------------------\n");
639         cprintf("This is Core %d\n", core_id());
640 #ifdef __i386__
641         cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
642         cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
643                 read_msr(0x200), read_msr(0x201));
644         cprintf("MTRR Phys1 Base = 0x%016llx, Mask = 0x%016llx\n",
645                 read_msr(0x202), read_msr(0x203));
646         cprintf("MTRR Phys2 Base = 0x%016llx, Mask = 0x%016llx\n",
647                 read_msr(0x204), read_msr(0x205));
648         cprintf("MTRR Phys3 Base = 0x%016llx, Mask = 0x%016llx\n",
649                 read_msr(0x206), read_msr(0x207));
650         cprintf("MTRR Phys4 Base = 0x%016llx, Mask = 0x%016llx\n",
651                 read_msr(0x208), read_msr(0x209));
652         cprintf("MTRR Phys5 Base = 0x%016llx, Mask = 0x%016llx\n",
653                 read_msr(0x20a), read_msr(0x20b));
654         cprintf("MTRR Phys6 Base = 0x%016llx, Mask = 0x%016llx\n",
655                 read_msr(0x20c), read_msr(0x20d));
656         cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
657                 read_msr(0x20e), read_msr(0x20f));
658 #endif // __i386__
659         cprintf("----------------------------\n");
660         spin_unlock_irqsave(&print_info_lock);
661 }
662
663 void test_barrier_handler(trapframe_t *tf, void* data)
664 {
665         cprintf("Round 1: Core %d\n", core_id());
666         waiton_barrier(&test_cpu_array);
667         waiton_barrier(&test_cpu_array);
668         waiton_barrier(&test_cpu_array);
669         waiton_barrier(&test_cpu_array);
670         waiton_barrier(&test_cpu_array);
671         waiton_barrier(&test_cpu_array);
672         cprintf("Round 2: Core %d\n", core_id());
673         waiton_barrier(&test_cpu_array);
674         cprintf("Round 3: Core %d\n", core_id());
675         // uncomment to see it fucked up
676         //cprintf("Round 4: Core %d\n", core_id());
677 }
678
679 static void test_waiting_handler(trapframe_t *tf, atomic_t * data)
680 {
681         {HANDLER_ATOMIC atomic_dec(data);}
682 }
683
684 #ifdef __i386__
685 void test_pit(void)
686 {
687         cprintf("Starting test for PIT now (10s)\n");
688         udelay_pit(10000000);
689         cprintf("End now\n");
690         cprintf("Starting test for TSC (if stable) now (10s)\n");
691         udelay(10000000);
692         cprintf("End now\n");
693
694         cprintf("Starting test for LAPIC (if stable) now (10s)\n");
695         enable_irq();
696         lapic_set_timer(10000000, FALSE);
697
698         atomic_t waiting;
699         atomic_init(&waiting, 1);
700         register_interrupt_handler(interrupt_handlers, test_vector,
701                                    test_waiting_handler, &waiting);
702         while(atomic_read(&waiting))
703                 cpu_relax();
704         cprintf("End now\n");
705 }
706 #endif // __i386__