Merge branch 'ivy'
[akaros.git] / kern / src / testing.c
1
2 #ifdef __SHARC__
3 #pragma nosharc
4 #endif
5
6 #include <arch/mmu.h>
7 #include <arch/arch.h>
8 #include <smp.h>
9
10 #include <ros/memlayout.h>
11
12 #include <atomic.h>
13 #include <stdio.h>
14 #include <assert.h>
15 #include <string.h>
16 #include <testing.h>
17 #include <trap.h>
18 #include <process.h>
19 #include <syscall.h>
20 #include <timing.h>
21 #include <kfs.h>
22 #include <multiboot.h>
23 #include <pmap.h>
24 #include <page_alloc.h>
25
26 #define test_vector 0xeb
27
28 #ifdef __i386__
29
30 void test_ipi_sending(void)
31 {
32         extern handler_t (COUNT(NUM_INTERRUPT_HANDLERS) interrupt_handlers)[];
33         int8_t state = 0;
34
35         register_interrupt_handler(interrupt_handlers, test_vector,
36                                    test_hello_world_handler, NULL);
37         enable_irqsave(&state);
38         cprintf("\nCORE 0 sending broadcast\n");
39         send_broadcast_ipi(test_vector);
40         udelay(3000000);
41         cprintf("\nCORE 0 sending all others\n");
42         send_all_others_ipi(test_vector);
43         udelay(3000000);
44         cprintf("\nCORE 0 sending self\n");
45         send_self_ipi(test_vector);
46         udelay(3000000);
47         cprintf("\nCORE 0 sending ipi to physical 1\n");
48         send_ipi(0x01, 0, test_vector);
49         udelay(3000000);
50         cprintf("\nCORE 0 sending ipi to physical 2\n");
51         send_ipi(0x02, 0, test_vector);
52         udelay(3000000);
53         cprintf("\nCORE 0 sending ipi to physical 3\n");
54         send_ipi(0x03, 0, test_vector);
55         udelay(3000000);
56         cprintf("\nCORE 0 sending ipi to physical 15\n");
57         send_ipi(0x0f, 0, test_vector);
58         udelay(3000000);
59         cprintf("\nCORE 0 sending ipi to logical 2\n");
60         send_ipi(0x02, 1, test_vector);
61         udelay(3000000);
62         cprintf("\nCORE 0 sending ipi to logical 1\n");
63         send_ipi(0x01, 1, test_vector);
64         udelay(3000000);
65         cprintf("\nDone!\n");
66         disable_irqsave(&state);
67 }
68
69 // Note this never returns and will muck with any other timer work
70 void test_pic_reception(void)
71 {
72         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
73         pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
74         pic_unmask_irq(0);
75         cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
76         cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
77         unmask_lapic_lvt(LAPIC_LVT_LINT0);
78         cprintf("Core %d's LINT0: 0x%08x\n", core_id(), read_mmreg32(LAPIC_LVT_LINT0));
79         enable_irq();
80         while(1);
81 }
82
83 #endif // __i386__
84
85 void test_print_info(void)
86 {
87         cprintf("\nCORE 0 asking all cores to print info:\n");
88         smp_call_function_all(test_print_info_handler, NULL, 0);
89         cprintf("\nDone!\n");
90 }
91
92 void test_page_coloring(void) 
93 {
94         //Print the different cache properties of our machine
95         print_cache_properties("L1", &l1);
96         cprintf("\n");
97         print_cache_properties("L2", &l2);
98         cprintf("\n");
99         print_cache_properties("L3", &l3);
100         cprintf("\n");
101
102         //Print some stats about our memory
103         cprintf("Max Address: %llu\n", MAX_VADDR);
104         cprintf("Num Pages: %u\n", npages);
105
106         //Declare a local variable for allocating pages 
107         page_t* page;
108
109         //Run through and allocate all pages through l1_page_alloc
110         cprintf("Allocating from L1 page colors:\n");
111         for(int i=0; i<get_cache_num_page_colors(&l1); i++) {
112                 cprintf("  COLOR %d:\n", i);
113                 while(l1_page_alloc(&page, i) != -ENOMEM)
114                         cprintf("    Page: %d\n", page2ppn(page));
115         }
116
117         //Put all the pages back by reinitializing
118         page_init();
119         
120         //Run through and allocate all pages through l2_page_alloc
121         cprintf("Allocating from L2 page colors:\n");
122         for(int i=0; i<get_cache_num_page_colors(&l2); i++) {
123                 cprintf("  COLOR %d:\n", i);
124                 while(l2_page_alloc(&page, i) != -ENOMEM)
125                         cprintf("    Page: %d\n", page2ppn(page));
126         }
127
128         //Put all the pages back by reinitializing
129         page_init();
130         
131         //Run through and allocate all pages through l3_page_alloc
132         cprintf("Allocating from L3 page colors:\n");
133         for(int i=0; i<get_cache_num_page_colors(&l3); i++) {
134                 cprintf("  COLOR %d:\n", i);
135                 while(l3_page_alloc(&page, i) != -ENOMEM)
136                         cprintf("    Page: %d\n", page2ppn(page));
137         }
138         
139         //Put all the pages back by reinitializing
140         page_init();
141         
142         //Run through and allocate all pages through page_alloc
143         cprintf("Allocating from global allocator:\n");
144         while(page_alloc(&page) != -ENOMEM)
145                 cprintf("    Page: %d\n", page2ppn(page));
146         
147         if(l2_page_alloc(&page, 0) != -ENOMEM)
148                 cprintf("Should not get here, all pages should already be gone!\n");
149         cprintf("All pages gone for sure...\n");
150         
151         //Now lets put a few pages back using page_free..
152         cprintf("Reinserting pages via page_free and reallocating them...\n");
153         page_free(&pages[0]);
154         page_free(&pages[15]);
155         page_free(&pages[7]);
156         page_free(&pages[6]);
157         page_free(&pages[4]);
158
159         while(page_alloc(&page) != -ENOMEM)
160                 cprintf("Page: %d\n", page2ppn(page));  
161 }
162
163 extern uint8_t num_cpus;
164 barrier_t test_cpu_array;
165
166 void test_barrier(void)
167 {
168         cprintf("Core 0 initializing barrier\n");
169         init_barrier(&test_cpu_array, num_cpus);
170         cprintf("Core 0 asking all cores to print ids, barrier, rinse, repeat\n");
171         smp_call_function_all(test_barrier_handler, NULL, 0);
172 }
173
174 void test_interrupts_irqsave(void)
175 {
176         int8_t state = 0;
177         printd("Testing Nesting Enabling first, turning ints off:\n");
178         disable_irq();
179         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
180         assert(!irq_is_enabled());
181         printd("Enabling IRQSave\n");
182         enable_irqsave(&state);
183         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
184         assert(irq_is_enabled());
185         printd("Enabling IRQSave Again\n");
186         enable_irqsave(&state);
187         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
188         assert(irq_is_enabled());
189         printd("Disabling IRQSave Once\n");
190         disable_irqsave(&state);
191         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
192         assert(irq_is_enabled());
193         printd("Disabling IRQSave Again\n");
194         disable_irqsave(&state);
195         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
196         assert(!irq_is_enabled());
197         printd("Done.  Should have been 0, 200, 200, 200, 0\n");
198
199         printd("Testing Nesting Disabling first, turning ints on:\n");
200         state = 0;
201         enable_irq();
202         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
203         assert(irq_is_enabled());
204         printd("Disabling IRQSave Once\n");
205         disable_irqsave(&state);
206         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
207         assert(!irq_is_enabled());
208         printd("Disabling IRQSave Again\n");
209         disable_irqsave(&state);
210         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
211         assert(!irq_is_enabled());
212         printd("Enabling IRQSave Once\n");
213         enable_irqsave(&state);
214         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
215         assert(!irq_is_enabled());
216         printd("Enabling IRQSave Again\n");
217         enable_irqsave(&state);
218         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
219         assert(irq_is_enabled());
220         printd("Done.  Should have been 200, 0, 0, 0, 200 \n");
221
222         state = 0;
223         disable_irq();
224         printd("Ints are off, enabling then disabling.\n");
225         enable_irqsave(&state);
226         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
227         assert(irq_is_enabled());
228         disable_irqsave(&state);
229         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
230         assert(!irq_is_enabled());
231         printd("Done.  Should have been 200, 0\n");
232
233         state = 0;
234         enable_irq();
235         printd("Ints are on, enabling then disabling.\n");
236         enable_irqsave(&state);
237         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
238         assert(irq_is_enabled());
239         disable_irqsave(&state);
240         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
241         assert(irq_is_enabled());
242         printd("Done.  Should have been 200, 200\n");
243
244         state = 0;
245         disable_irq();
246         printd("Ints are off, disabling then enabling.\n");
247         disable_irqsave(&state);
248         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
249         assert(!irq_is_enabled());
250         enable_irqsave(&state);
251         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
252         assert(!irq_is_enabled());
253         printd("Done.  Should have been 0, 0\n");
254
255         state = 0;
256         enable_irq();
257         printd("Ints are on, disabling then enabling.\n");
258         disable_irqsave(&state);
259         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
260         assert(!irq_is_enabled());
261         enable_irqsave(&state);
262         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
263         assert(irq_is_enabled());
264         printd("Done.  Should have been 0, 200\n");
265
266         disable_irq();
267         cprintf("Passed enable_irqsave tests\n");
268 }
269
270 void test_bitmasks(void)
271 {
272 #define masksize 67
273         DECL_BITMASK(mask, masksize);
274         printk("size of mask %d\n", sizeof(mask));
275         CLR_BITMASK(mask, masksize);
276         PRINT_BITMASK(mask, masksize);
277         printk("cleared\n");
278         SET_BITMASK_BIT(mask, 0);
279         SET_BITMASK_BIT(mask, 11);
280         SET_BITMASK_BIT(mask, 17);
281         SET_BITMASK_BIT(mask, masksize-1);
282         printk("bits set\n");
283         PRINT_BITMASK(mask, masksize);
284         DECL_BITMASK(mask2, masksize);
285         COPY_BITMASK(mask2, mask, masksize);
286         printk("copy of original mask, should be the same as the prev\n");
287         PRINT_BITMASK(mask2, masksize);
288         CLR_BITMASK_BIT(mask, 11);
289         printk("11 cleared\n");
290         PRINT_BITMASK(mask, masksize);
291         printk("bit 17 is %d (should be 1)\n", GET_BITMASK_BIT(mask, 17));
292         printk("bit 11 is %d (should be 0)\n", GET_BITMASK_BIT(mask, 11));
293         FILL_BITMASK(mask, masksize);
294         PRINT_BITMASK(mask, masksize);
295         printk("should be all 1's, except for a few at the end\n");
296         printk("Is Clear?: %d (should be 0)\n", BITMASK_IS_CLEAR(mask,masksize));
297         CLR_BITMASK(mask, masksize);
298         PRINT_BITMASK(mask, masksize);
299         printk("Is Clear?: %d (should be 1)\n", BITMASK_IS_CLEAR(mask,masksize));
300         printk("should be cleared\n");
301 }
302
303 checklist_t* the_global_list;
304
305 void test_checklist_handler(trapframe_t *tf, void* data)
306 {
307         udelay(1000000);
308         cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
309         down_checklist(the_global_list);
310 }
311
312 extern uint8_t num_cpus;
313
314 void test_checklists(void)
315 {
316         INIT_CHECKLIST(a_list, MAX_NUM_CPUS);
317         the_global_list = &a_list;
318         printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
319         printk("mask\n");
320         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
321         SET_BITMASK_BIT(a_list.mask.bits, 11);
322         printk("Set bit 11\n");
323         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
324
325         CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
326         INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CPUS);
327         FILL_BITMASK(a_mask.bits, num_cpus);
328         //CLR_BITMASK_BIT(a_mask.bits, core_id());
329         //SET_BITMASK_BIT(a_mask.bits, 1);
330         //printk("New mask (1, 17, 25):\n");
331         printk("Created new mask, filled up to num_cpus\n");
332         PRINT_BITMASK(a_mask.bits, a_mask.size);
333         printk("committing new mask\n");
334         commit_checklist_wait(&a_list, &a_mask);
335         printk("Old mask (copied onto):\n");
336         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
337         //smp_call_function_single(1, test_checklist_handler, 0, 0);
338
339         smp_call_function_all(test_checklist_handler, NULL, 0);
340
341         printk("Waiting on checklist\n");
342         waiton_checklist(&a_list);
343         printk("Done Waiting!\n");
344
345 }
346
347 atomic_t a, b, c;
348
349 void test_incrementer_handler(trapframe_t *tf, atomic_t* data)
350 {
351         assert(data);
352         atomic_inc(data);
353 }
354
355 void test_null_handler(trapframe_t *tf, void* data)
356 {
357         asm volatile("nop");
358 }
359
360 void test_smp_call_functions(void)
361 {
362         int i;
363         atomic_init(&a, 0);
364         atomic_init(&b, 0);
365         atomic_init(&c, 0);
366         handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0, *waiter3 = 0,
367                           *waiter4 = 0, *waiter5 = 0;
368         uint8_t me = core_id();
369         printk("\nCore %d: SMP Call Self (nowait):\n", me);
370         printk("---------------------\n");
371         smp_call_function_self(test_hello_world_handler, NULL, 0);
372         printk("\nCore %d: SMP Call Self (wait):\n", me);
373         printk("---------------------\n");
374         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
375         smp_call_wait(waiter0);
376         printk("\nCore %d: SMP Call All (nowait):\n", me);
377         printk("---------------------\n");
378         smp_call_function_all(test_hello_world_handler, NULL, 0);
379         printk("\nCore %d: SMP Call All (wait):\n", me);
380         printk("---------------------\n");
381         smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
382         smp_call_wait(waiter0);
383         printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n", me);
384         printk("---------------------\n");
385         for(i = 1; i < num_cpus; i++)
386                 smp_call_function_single(i, test_hello_world_handler, NULL, 0);
387         printk("\nCore %d: SMP Call Self (wait):\n", me);
388         printk("---------------------\n");
389         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
390         smp_call_wait(waiter0);
391         printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n", me);
392         printk("---------------------\n");
393         for(i = 1; i < num_cpus; i++)
394         {
395                 smp_call_function_single(i, test_hello_world_handler, NULL, &waiter0);
396                 smp_call_wait(waiter0);
397         }
398         printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
399         printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
400         smp_call_function_all(test_incrementer_handler, &a, 0);
401         smp_call_function_all(test_incrementer_handler, &b, 0);
402         smp_call_function_all(test_incrementer_handler, &c, 0);
403         // if i can clobber a previous IPI, the interleaving might do it
404         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
405         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
406         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
407         smp_call_function_single(4 % num_cpus, test_incrementer_handler, &a, 0);
408         smp_call_function_single(5 % num_cpus, test_incrementer_handler, &b, 0);
409         smp_call_function_single(6 % num_cpus, test_incrementer_handler, &c, 0);
410         smp_call_function_all(test_incrementer_handler, &a, 0);
411         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
412         smp_call_function_all(test_incrementer_handler, &b, 0);
413         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
414         smp_call_function_all(test_incrementer_handler, &c, 0);
415         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
416         // wait, so we're sure the others finish before printing.
417         // without this, we could (and did) get 19,18,19, since the B_inc
418         // handler didn't finish yet
419         smp_call_function_self(test_null_handler, NULL, &waiter0);
420         // need to grab all 5 handlers (max), since the code moves to the next free.
421         smp_call_function_self(test_null_handler, NULL, &waiter1);
422         smp_call_function_self(test_null_handler, NULL, &waiter2);
423         smp_call_function_self(test_null_handler, NULL, &waiter3);
424         smp_call_function_self(test_null_handler, NULL, &waiter4);
425         smp_call_wait(waiter0);
426         smp_call_wait(waiter1);
427         smp_call_wait(waiter2);
428         smp_call_wait(waiter3);
429         smp_call_wait(waiter4);
430         printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
431         printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
432         smp_call_function_self(test_null_handler, NULL, &waiter0);
433         printk("Sent one\n");
434         smp_call_function_self(test_null_handler, NULL, &waiter1);
435         printk("Sent two\n");
436         smp_call_wait(waiter0);
437         printk("Wait one\n");
438         smp_call_wait(waiter1);
439         printk("Wait two\n");
440         printk("\tMade it through!\n");
441         printk("Attempting to deadlock by smp_calling more than are available:\n");
442         printk("\tShould see an Insufficient message and a kernel warning.\n");
443         if (smp_call_function_self(test_null_handler, NULL, &waiter0))
444                 printk("\tInsufficient handlers to call function (0)\n");
445         if (smp_call_function_self(test_null_handler, NULL, &waiter1))
446                 printk("\tInsufficient handlers to call function (1)\n");
447         if (smp_call_function_self(test_null_handler, NULL, &waiter2))
448                 printk("\tInsufficient handlers to call function (2)\n");
449         if (smp_call_function_self(test_null_handler, NULL, &waiter3))
450                 printk("\tInsufficient handlers to call function (3)\n");
451         if (smp_call_function_self(test_null_handler, NULL, &waiter4))
452                 printk("\tInsufficient handlers to call function (4)\n");
453         if (smp_call_function_self(test_null_handler, NULL, &waiter5))
454                 printk("\tInsufficient handlers to call function (5)\n");
455         smp_call_wait(waiter0);
456         smp_call_wait(waiter1);
457         smp_call_wait(waiter2);
458         smp_call_wait(waiter3);
459         smp_call_wait(waiter4);
460         smp_call_wait(waiter5);
461         printk("\tMade it through!\n");
462
463         printk("Done\n");
464 }
465
466 #ifdef __i386__
467 void test_lapic_status_bit(void)
468 {
469         register_interrupt_handler(interrupt_handlers, test_vector,
470                                    test_incrementer_handler, &a);
471         #define NUM_IPI 100000
472         atomic_set(&a,0);
473         printk("IPIs received (should be 0): %d\n", a);
474         for(int i = 0; i < NUM_IPI; i++) {
475                 send_ipi(7, 0, test_vector);
476                 lapic_wait_to_send();
477         }
478         // need to wait a bit to let those IPIs get there
479         udelay(5000000);
480         printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
481         // hopefully that handler never fires again.  leaving it registered for now.
482 }
483 #endif // __i386__
484
485 /******************************************************************************/
486 /*            Test Measurements: Couples with measurement.c                   */
487 // All user processes can R/W the UGDATA page
488 barrier_t*COUNT(1) bar = (barrier_t*COUNT(1))TC(UGDATA);
489 uint32_t*COUNT(1) job_to_run = (uint32_t*COUNT(1))TC(UGDATA + sizeof(barrier_t));
490 env_t* env_batch[64]; // Fairly arbitrary, just the max I plan to use.
491
492 /* Helpers for test_run_measurements */
493 static void wait_for_all_envs_to_die(void)
494 {
495         while (atomic_read(&num_envs))
496                 cpu_relax();
497 }
498
499 // this never returns.
500 static void sync_tests(int start_core, int num_threads, int job_num)
501 {
502         assert(start_core + num_threads <= num_cpus);
503         wait_for_all_envs_to_die();
504         for (int i = start_core; i < start_core + num_threads; i++)
505                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
506         lcr3(env_batch[start_core]->env_cr3);
507         init_barrier(bar, num_threads);
508         *job_to_run = job_num;
509         for (int i = start_core; i < start_core + num_threads; i++)
510                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
511         process_workqueue();
512         // we want to fake a run, to reenter manager for the next case
513         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
514         smp_call_function_single(0, run_env_handler, env, 0);
515         process_workqueue();
516         panic("whoops!\n");
517 }
518
519 static void async_tests(int start_core, int num_threads, int job_num)
520 {
521         int count;
522
523         assert(start_core + num_threads <= num_cpus);
524         wait_for_all_envs_to_die();
525         for (int i = start_core; i < start_core + num_threads; i++)
526                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
527         printk("async_tests: checkpoint 0\n");
528         lcr3(env_batch[start_core]->env_cr3);
529         init_barrier(bar, num_threads);
530         printk("async_tests: checkpoint 1\n");
531         *job_to_run = job_num;
532         for (int i = start_core; i < start_core + num_threads; i++)
533                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
534         count = 0;
535         while (count > -num_threads) {
536                 count = 0;
537                 for (int i = start_core; i < start_core + num_threads; i++) {
538                         count += process_generic_syscalls(env_batch[i], 1);
539                 }
540                 cpu_relax();
541         }
542         // we want to fake a run, to reenter manager for the next case
543         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
544         smp_call_function_single(0, run_env_handler, env, 0);
545         process_workqueue();
546         // this all never returns
547         panic("whoops!\n");
548 }
549
550 void test_run_measurements(uint32_t job_num)
551 {
552         switch (job_num) {
553                 case 0: // Nulls
554                         printk("Case 0:\n");
555                         async_tests(2, 1, job_num);  // start core 2, 1 core total
556                         break;
557                 case 1: // Sync
558                         printk("Case 1:\n");
559                         sync_tests(2, 1, job_num);
560                         break;
561                 case 2:
562                         printk("Case 2:\n");
563                         sync_tests(2, 2, job_num);
564                         break;
565                 case 3:
566                         printk("Case 3:\n");
567                         sync_tests(0, 3, job_num);
568                         break;
569                 case 4:
570                         printk("Case 4:\n");
571                         sync_tests(0, 4, job_num);
572                         break;
573                 case 5:
574                         printk("Case 5:\n");
575                         sync_tests(0, 5, job_num);
576                         break;
577                 case 6:
578                         printk("Case 6:\n");
579                         sync_tests(0, 6, job_num);
580                         break;
581                 case 7:
582                         printk("Case 7:\n");
583                         sync_tests(0, 7, job_num);
584                         break;
585                 case 8:
586                         printk("Case 8:\n");
587                         sync_tests(0, 8, job_num);
588                         break;
589                 case 9:
590                         printk("Case 9:\n");
591                         async_tests(2, 1, job_num);
592                         break;
593                 case 10:
594                         printk("Case 10:\n");
595                         async_tests(2, 2, job_num);
596                         break;
597                 case 11:
598                         printk("Case 11:\n");
599                         async_tests(2, 3, job_num);
600                         break;
601                 case 12:
602                         printk("Case 12:\n");
603                         async_tests(2, 4, job_num);
604                         break;
605                 case 13:
606                         printk("Case 13:\n");
607                         async_tests(2, 5, job_num);
608                         break;
609                 case 14:
610                         printk("Case 14:\n");
611                         async_tests(2, 6, job_num);
612                         break;
613                 default:
614                         warn("Invalid test number!!");
615         }
616         panic("Error in test setup!!");
617 }
618
619 /************************************************************/
620 /* ISR Handler Functions */
621
622 void test_hello_world_handler(trapframe_t *tf, void* data)
623 {
624         int trapno;
625         #if defined(__i386__)
626         trapno = tf->tf_trapno;
627         #elif defined(__sparc_v8__)
628         trapno = (tf->tbr >> 4) & 0xFF;
629         #else
630         trapno = 0;
631         #endif
632
633         cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
634                 trapno, core_id(), tf);
635 }
636
637 uint32_t print_info_lock = 0;
638
639 void test_print_info_handler(trapframe_t *tf, void* data)
640 {
641         spin_lock_irqsave(&print_info_lock);
642         cprintf("----------------------------\n");
643         cprintf("This is Core %d\n", core_id());
644 #ifdef __i386__
645         cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
646         cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
647                 read_msr(0x200), read_msr(0x201));
648         cprintf("MTRR Phys1 Base = 0x%016llx, Mask = 0x%016llx\n",
649                 read_msr(0x202), read_msr(0x203));
650         cprintf("MTRR Phys2 Base = 0x%016llx, Mask = 0x%016llx\n",
651                 read_msr(0x204), read_msr(0x205));
652         cprintf("MTRR Phys3 Base = 0x%016llx, Mask = 0x%016llx\n",
653                 read_msr(0x206), read_msr(0x207));
654         cprintf("MTRR Phys4 Base = 0x%016llx, Mask = 0x%016llx\n",
655                 read_msr(0x208), read_msr(0x209));
656         cprintf("MTRR Phys5 Base = 0x%016llx, Mask = 0x%016llx\n",
657                 read_msr(0x20a), read_msr(0x20b));
658         cprintf("MTRR Phys6 Base = 0x%016llx, Mask = 0x%016llx\n",
659                 read_msr(0x20c), read_msr(0x20d));
660         cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
661                 read_msr(0x20e), read_msr(0x20f));
662 #endif // __i386__
663         cprintf("----------------------------\n");
664         spin_unlock_irqsave(&print_info_lock);
665 }
666
667 void test_barrier_handler(trapframe_t *tf, void* data)
668 {
669         cprintf("Round 1: Core %d\n", core_id());
670         waiton_barrier(&test_cpu_array);
671         waiton_barrier(&test_cpu_array);
672         waiton_barrier(&test_cpu_array);
673         waiton_barrier(&test_cpu_array);
674         waiton_barrier(&test_cpu_array);
675         waiton_barrier(&test_cpu_array);
676         cprintf("Round 2: Core %d\n", core_id());
677         waiton_barrier(&test_cpu_array);
678         cprintf("Round 3: Core %d\n", core_id());
679         // uncomment to see it fucked up
680         //cprintf("Round 4: Core %d\n", core_id());
681 }
682
683 static void test_waiting_handler(trapframe_t *tf, atomic_t * data)
684 {
685         {HANDLER_ATOMIC atomic_dec(data);}
686 }
687
688 #ifdef __i386__
689 void test_pit(void)
690 {
691         cprintf("Starting test for PIT now (10s)\n");
692         udelay_pit(10000000);
693         cprintf("End now\n");
694         cprintf("Starting test for TSC (if stable) now (10s)\n");
695         udelay(10000000);
696         cprintf("End now\n");
697
698         cprintf("Starting test for LAPIC (if stable) now (10s)\n");
699         enable_irq();
700         lapic_set_timer(10000000, FALSE);
701
702         atomic_t waiting;
703         atomic_init(&waiting, 1);
704         register_interrupt_handler(interrupt_handlers, test_vector,
705                                    test_waiting_handler, &waiting);
706         while(atomic_read(&waiting))
707                 cpu_relax();
708         cprintf("End now\n");
709 }
710 #endif // __i386__