Merge branch 'master' into net-dev (with code changes listed below besides normal...
[akaros.git] / kern / src / testing.c
1
2 #include <arch/mmu.h>
3 #include <arch/arch.h>
4 #include <smp.h>
5
6 #include <ros/memlayout.h>
7
8 #include <atomic.h>
9 #include <stdio.h>
10 #include <assert.h>
11 #include <string.h>
12 #include <testing.h>
13 #include <trap.h>
14 #include <process.h>
15 #include <syscall.h>
16 #include <timing.h>
17 #include <kfs.h>
18 #include <multiboot.h>
19 #include <pmap.h>
20 #include <page_alloc.h>
21
22 #include <pmap.h>
23
24 #define test_vector 0xeb
25
26 #ifdef __i386__
27
28 void test_ipi_sending(void)
29 {
30         extern handler_t (COUNT(NUM_INTERRUPT_HANDLERS) interrupt_handlers)[];
31         int8_t state = 0;
32
33         register_interrupt_handler(interrupt_handlers, test_vector,
34                                    test_hello_world_handler, NULL);
35         enable_irqsave(&state);
36         cprintf("\nCORE 0 sending broadcast\n");
37         send_broadcast_ipi(test_vector);
38         udelay(3000000);
39         cprintf("\nCORE 0 sending all others\n");
40         send_all_others_ipi(test_vector);
41         udelay(3000000);
42         cprintf("\nCORE 0 sending self\n");
43         send_self_ipi(test_vector);
44         udelay(3000000);
45         cprintf("\nCORE 0 sending ipi to physical 1\n");
46         send_ipi(0x01, 0, test_vector);
47         udelay(3000000);
48         cprintf("\nCORE 0 sending ipi to physical 2\n");
49         send_ipi(0x02, 0, test_vector);
50         udelay(3000000);
51         cprintf("\nCORE 0 sending ipi to physical 3\n");
52         send_ipi(0x03, 0, test_vector);
53         udelay(3000000);
54         cprintf("\nCORE 0 sending ipi to physical 15\n");
55         send_ipi(0x0f, 0, test_vector);
56         udelay(3000000);
57         cprintf("\nCORE 0 sending ipi to logical 2\n");
58         send_ipi(0x02, 1, test_vector);
59         udelay(3000000);
60         cprintf("\nCORE 0 sending ipi to logical 1\n");
61         send_ipi(0x01, 1, test_vector);
62         udelay(3000000);
63         cprintf("\nDone!\n");
64         disable_irqsave(&state);
65 }
66
67 // Note this never returns and will muck with any other timer work
68 void test_pic_reception(void)
69 {
70         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
71         pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
72         pic_unmask_irq(0);
73         cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
74         cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
75         unmask_lapic_lvt(LAPIC_LVT_LINT0);
76         cprintf("Core %d's LINT0: 0x%08x\n", core_id(), read_mmreg32(LAPIC_LVT_LINT0));
77         enable_irq();
78         while(1);
79 }
80
81 void test_ioapic_pit_reroute(void) 
82 {
83         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
84         ioapic_route_irq(0, 3); 
85
86         cprintf("Starting pit on core 3....\n");
87         udelay(3000000);
88         pit_set_timer(0xFFFE,TIMER_RATEGEN); // totally arbitrary time
89         
90         udelay(3000000);
91         ioapic_unroute_irq(0);
92         udelay(300000);
93         cprintf("Masked pit. Waiting before return...\n");
94         udelay(3000000);
95 }
96
97 #endif // __i386__
98
99
100 void test_print_info(void)
101 {
102         cprintf("\nCORE 0 asking all cores to print info:\n");
103         smp_call_function_all(test_print_info_handler, NULL, 0);
104         cprintf("\nDone!\n");
105 }
106
107 void test_page_coloring(void) 
108 {
109         //Print the different cache properties of our machine
110         print_cache_properties("L1", &l1);
111         cprintf("\n");
112         print_cache_properties("L2", &l2);
113         cprintf("\n");
114         print_cache_properties("L3", &l3);
115         cprintf("\n");
116
117         //Print some stats about our memory
118         cprintf("Max Address: %llu\n", MAX_VADDR);
119         cprintf("Num Pages: %u\n", npages);
120
121         //Declare a local variable for allocating pages 
122         page_t* page;
123
124         //Run through and allocate all pages through l1_page_alloc
125         cprintf("Allocating from L1 page colors:\n");
126         for(int i=0; i<get_cache_num_page_colors(&l1); i++) {
127                 cprintf("  COLOR %d:\n", i);
128                 while(l1_page_alloc(&page, i) != -ENOMEM)
129                         cprintf("    Page: %d\n", page2ppn(page));
130         }
131
132         //Put all the pages back by reinitializing
133         page_init();
134         
135         //Run through and allocate all pages through l2_page_alloc
136         cprintf("Allocating from L2 page colors:\n");
137         for(int i=0; i<get_cache_num_page_colors(&l2); i++) {
138                 cprintf("  COLOR %d:\n", i);
139                 while(l2_page_alloc(&page, i) != -ENOMEM)
140                         cprintf("    Page: %d\n", page2ppn(page));
141         }
142
143         //Put all the pages back by reinitializing
144         page_init();
145         
146         //Run through and allocate all pages through l3_page_alloc
147         cprintf("Allocating from L3 page colors:\n");
148         for(int i=0; i<get_cache_num_page_colors(&l3); i++) {
149                 cprintf("  COLOR %d:\n", i);
150                 while(l3_page_alloc(&page, i) != -ENOMEM)
151                         cprintf("    Page: %d\n", page2ppn(page));
152         }
153         
154         //Put all the pages back by reinitializing
155         page_init();
156         
157         //Run through and allocate all pages through page_alloc
158         cprintf("Allocating from global allocator:\n");
159         while(page_alloc(&page) != -ENOMEM)
160                 cprintf("    Page: %d\n", page2ppn(page));
161         
162         if(l2_page_alloc(&page, 0) != -ENOMEM)
163                 cprintf("Should not get here, all pages should already be gone!\n");
164         cprintf("All pages gone for sure...\n");
165         
166         //Now lets put a few pages back using page_free..
167         cprintf("Reinserting pages via page_free and reallocating them...\n");
168         page_free(&pages[0]);
169         page_free(&pages[15]);
170         page_free(&pages[7]);
171         page_free(&pages[6]);
172         page_free(&pages[4]);
173
174         while(page_alloc(&page) != -ENOMEM)
175                 cprintf("Page: %d\n", page2ppn(page));  
176 }
177
178 extern uint8_t num_cpus;
179 barrier_t test_cpu_array;
180
181 void test_barrier(void)
182 {
183         cprintf("Core 0 initializing barrier\n");
184         init_barrier(&test_cpu_array, num_cpus);
185         cprintf("Core 0 asking all cores to print ids, barrier, rinse, repeat\n");
186         smp_call_function_all(test_barrier_handler, NULL, 0);
187 }
188
189 void test_interrupts_irqsave(void)
190 {
191         int8_t state = 0;
192         printd("Testing Nesting Enabling first, turning ints off:\n");
193         disable_irq();
194         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
195         assert(!irq_is_enabled());
196         printd("Enabling IRQSave\n");
197         enable_irqsave(&state);
198         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
199         assert(irq_is_enabled());
200         printd("Enabling IRQSave Again\n");
201         enable_irqsave(&state);
202         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
203         assert(irq_is_enabled());
204         printd("Disabling IRQSave Once\n");
205         disable_irqsave(&state);
206         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
207         assert(irq_is_enabled());
208         printd("Disabling IRQSave Again\n");
209         disable_irqsave(&state);
210         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
211         assert(!irq_is_enabled());
212         printd("Done.  Should have been 0, 200, 200, 200, 0\n");
213
214         printd("Testing Nesting Disabling first, turning ints on:\n");
215         state = 0;
216         enable_irq();
217         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
218         assert(irq_is_enabled());
219         printd("Disabling IRQSave Once\n");
220         disable_irqsave(&state);
221         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
222         assert(!irq_is_enabled());
223         printd("Disabling IRQSave Again\n");
224         disable_irqsave(&state);
225         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
226         assert(!irq_is_enabled());
227         printd("Enabling IRQSave Once\n");
228         enable_irqsave(&state);
229         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
230         assert(!irq_is_enabled());
231         printd("Enabling IRQSave Again\n");
232         enable_irqsave(&state);
233         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
234         assert(irq_is_enabled());
235         printd("Done.  Should have been 200, 0, 0, 0, 200 \n");
236
237         state = 0;
238         disable_irq();
239         printd("Ints are off, enabling then disabling.\n");
240         enable_irqsave(&state);
241         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
242         assert(irq_is_enabled());
243         disable_irqsave(&state);
244         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
245         assert(!irq_is_enabled());
246         printd("Done.  Should have been 200, 0\n");
247
248         state = 0;
249         enable_irq();
250         printd("Ints are on, enabling then disabling.\n");
251         enable_irqsave(&state);
252         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
253         assert(irq_is_enabled());
254         disable_irqsave(&state);
255         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
256         assert(irq_is_enabled());
257         printd("Done.  Should have been 200, 200\n");
258
259         state = 0;
260         disable_irq();
261         printd("Ints are off, disabling then enabling.\n");
262         disable_irqsave(&state);
263         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
264         assert(!irq_is_enabled());
265         enable_irqsave(&state);
266         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
267         assert(!irq_is_enabled());
268         printd("Done.  Should have been 0, 0\n");
269
270         state = 0;
271         enable_irq();
272         printd("Ints are on, disabling then enabling.\n");
273         disable_irqsave(&state);
274         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
275         assert(!irq_is_enabled());
276         enable_irqsave(&state);
277         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
278         assert(irq_is_enabled());
279         printd("Done.  Should have been 0, 200\n");
280
281         disable_irq();
282         cprintf("Passed enable_irqsave tests\n");
283 }
284
285 void test_bitmasks(void)
286 {
287 #define masksize 67
288         DECL_BITMASK(mask, masksize);
289         printk("size of mask %d\n", sizeof(mask));
290         CLR_BITMASK(mask, masksize);
291         PRINT_BITMASK(mask, masksize);
292         printk("cleared\n");
293         SET_BITMASK_BIT(mask, 0);
294         SET_BITMASK_BIT(mask, 11);
295         SET_BITMASK_BIT(mask, 17);
296         SET_BITMASK_BIT(mask, masksize-1);
297         printk("bits set\n");
298         PRINT_BITMASK(mask, masksize);
299         DECL_BITMASK(mask2, masksize);
300         COPY_BITMASK(mask2, mask, masksize);
301         printk("copy of original mask, should be the same as the prev\n");
302         PRINT_BITMASK(mask2, masksize);
303         CLR_BITMASK_BIT(mask, 11);
304         printk("11 cleared\n");
305         PRINT_BITMASK(mask, masksize);
306         printk("bit 17 is %d (should be 1)\n", GET_BITMASK_BIT(mask, 17));
307         printk("bit 11 is %d (should be 0)\n", GET_BITMASK_BIT(mask, 11));
308         FILL_BITMASK(mask, masksize);
309         PRINT_BITMASK(mask, masksize);
310         printk("should be all 1's, except for a few at the end\n");
311         printk("Is Clear?: %d (should be 0)\n", BITMASK_IS_CLEAR(mask,masksize));
312         CLR_BITMASK(mask, masksize);
313         PRINT_BITMASK(mask, masksize);
314         printk("Is Clear?: %d (should be 1)\n", BITMASK_IS_CLEAR(mask,masksize));
315         printk("should be cleared\n");
316 }
317
318 checklist_t* the_global_list;
319
320 void test_checklist_handler(trapframe_t *tf, void* data)
321 {
322         udelay(1000000);
323         cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
324         down_checklist(the_global_list);
325 }
326
327 extern uint8_t num_cpus;
328
329 void test_checklists(void)
330 {
331         INIT_CHECKLIST(a_list, MAX_NUM_CPUS);
332         the_global_list = &a_list;
333         printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
334         printk("mask\n");
335         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
336         SET_BITMASK_BIT(a_list.mask.bits, 11);
337         printk("Set bit 11\n");
338         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
339
340         CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
341         INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CPUS);
342         FILL_BITMASK(a_mask.bits, num_cpus);
343         //CLR_BITMASK_BIT(a_mask.bits, core_id());
344         //SET_BITMASK_BIT(a_mask.bits, 1);
345         //printk("New mask (1, 17, 25):\n");
346         printk("Created new mask, filled up to num_cpus\n");
347         PRINT_BITMASK(a_mask.bits, a_mask.size);
348         printk("committing new mask\n");
349         commit_checklist_wait(&a_list, &a_mask);
350         printk("Old mask (copied onto):\n");
351         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
352         //smp_call_function_single(1, test_checklist_handler, 0, 0);
353
354         smp_call_function_all(test_checklist_handler, NULL, 0);
355
356         printk("Waiting on checklist\n");
357         waiton_checklist(&a_list);
358         printk("Done Waiting!\n");
359
360 }
361
362 atomic_t a, b, c;
363
364 void test_incrementer_handler(trapframe_t *tf, atomic_t* data)
365 {
366         assert(data);
367         atomic_inc(data);
368 }
369
370 void test_null_handler(trapframe_t *tf, void* data)
371 {
372         asm volatile("nop");
373 }
374
375 void test_smp_call_functions(void)
376 {
377         int i;
378         atomic_init(&a, 0);
379         atomic_init(&b, 0);
380         atomic_init(&c, 0);
381         handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0, *waiter3 = 0,
382                           *waiter4 = 0, *waiter5 = 0;
383         uint8_t me = core_id();
384         printk("\nCore %d: SMP Call Self (nowait):\n", me);
385         printk("---------------------\n");
386         smp_call_function_self(test_hello_world_handler, NULL, 0);
387         printk("\nCore %d: SMP Call Self (wait):\n", me);
388         printk("---------------------\n");
389         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
390         smp_call_wait(waiter0);
391         printk("\nCore %d: SMP Call All (nowait):\n", me);
392         printk("---------------------\n");
393         smp_call_function_all(test_hello_world_handler, NULL, 0);
394         printk("\nCore %d: SMP Call All (wait):\n", me);
395         printk("---------------------\n");
396         smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
397         smp_call_wait(waiter0);
398         printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n", me);
399         printk("---------------------\n");
400         for(i = 1; i < num_cpus; i++)
401                 smp_call_function_single(i, test_hello_world_handler, NULL, 0);
402         printk("\nCore %d: SMP Call Self (wait):\n", me);
403         printk("---------------------\n");
404         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
405         smp_call_wait(waiter0);
406         printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n", me);
407         printk("---------------------\n");
408         for(i = 1; i < num_cpus; i++)
409         {
410                 smp_call_function_single(i, test_hello_world_handler, NULL, &waiter0);
411                 smp_call_wait(waiter0);
412         }
413         printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
414         printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
415         smp_call_function_all(test_incrementer_handler, &a, 0);
416         smp_call_function_all(test_incrementer_handler, &b, 0);
417         smp_call_function_all(test_incrementer_handler, &c, 0);
418         // if i can clobber a previous IPI, the interleaving might do it
419         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
420         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
421         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
422         smp_call_function_single(4 % num_cpus, test_incrementer_handler, &a, 0);
423         smp_call_function_single(5 % num_cpus, test_incrementer_handler, &b, 0);
424         smp_call_function_single(6 % num_cpus, test_incrementer_handler, &c, 0);
425         smp_call_function_all(test_incrementer_handler, &a, 0);
426         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
427         smp_call_function_all(test_incrementer_handler, &b, 0);
428         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
429         smp_call_function_all(test_incrementer_handler, &c, 0);
430         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
431         // wait, so we're sure the others finish before printing.
432         // without this, we could (and did) get 19,18,19, since the B_inc
433         // handler didn't finish yet
434         smp_call_function_self(test_null_handler, NULL, &waiter0);
435         // need to grab all 5 handlers (max), since the code moves to the next free.
436         smp_call_function_self(test_null_handler, NULL, &waiter1);
437         smp_call_function_self(test_null_handler, NULL, &waiter2);
438         smp_call_function_self(test_null_handler, NULL, &waiter3);
439         smp_call_function_self(test_null_handler, NULL, &waiter4);
440         smp_call_wait(waiter0);
441         smp_call_wait(waiter1);
442         smp_call_wait(waiter2);
443         smp_call_wait(waiter3);
444         smp_call_wait(waiter4);
445         printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
446         printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
447         smp_call_function_self(test_null_handler, NULL, &waiter0);
448         printk("Sent one\n");
449         smp_call_function_self(test_null_handler, NULL, &waiter1);
450         printk("Sent two\n");
451         smp_call_wait(waiter0);
452         printk("Wait one\n");
453         smp_call_wait(waiter1);
454         printk("Wait two\n");
455         printk("\tMade it through!\n");
456         printk("Attempting to deadlock by smp_calling more than are available:\n");
457         printk("\tShould see an Insufficient message and a kernel warning.\n");
458         if (smp_call_function_self(test_null_handler, NULL, &waiter0))
459                 printk("\tInsufficient handlers to call function (0)\n");
460         if (smp_call_function_self(test_null_handler, NULL, &waiter1))
461                 printk("\tInsufficient handlers to call function (1)\n");
462         if (smp_call_function_self(test_null_handler, NULL, &waiter2))
463                 printk("\tInsufficient handlers to call function (2)\n");
464         if (smp_call_function_self(test_null_handler, NULL, &waiter3))
465                 printk("\tInsufficient handlers to call function (3)\n");
466         if (smp_call_function_self(test_null_handler, NULL, &waiter4))
467                 printk("\tInsufficient handlers to call function (4)\n");
468         if (smp_call_function_self(test_null_handler, NULL, &waiter5))
469                 printk("\tInsufficient handlers to call function (5)\n");
470         smp_call_wait(waiter0);
471         smp_call_wait(waiter1);
472         smp_call_wait(waiter2);
473         smp_call_wait(waiter3);
474         smp_call_wait(waiter4);
475         smp_call_wait(waiter5);
476         printk("\tMade it through!\n");
477
478         printk("Done\n");
479 }
480
481 #ifdef __i386__
482 void test_lapic_status_bit(void)
483 {
484         register_interrupt_handler(interrupt_handlers, test_vector,
485                                    test_incrementer_handler, &a);
486         #define NUM_IPI 100000
487         atomic_set(&a,0);
488         printk("IPIs received (should be 0): %d\n", a);
489         for(int i = 0; i < NUM_IPI; i++) {
490                 send_ipi(7, 0, test_vector);
491                 lapic_wait_to_send();
492         }
493         // need to wait a bit to let those IPIs get there
494         udelay(5000000);
495         printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
496         // hopefully that handler never fires again.  leaving it registered for now.
497 }
498 #endif // __i386__
499
500 /******************************************************************************/
501 /*            Test Measurements: Couples with measurement.c                   */
502 // All user processes can R/W the UGDATA page
503 barrier_t*COUNT(1) bar = (barrier_t*COUNT(1))TC(UGDATA);
504 uint32_t*COUNT(1) job_to_run = (uint32_t*COUNT(1))TC(UGDATA + sizeof(barrier_t));
505 env_t* env_batch[64]; // Fairly arbitrary, just the max I plan to use.
506
507 /* Helpers for test_run_measurements */
508 static void wait_for_all_envs_to_die(void)
509 {
510         while (atomic_read(&num_envs))
511                 cpu_relax();
512 }
513
514 // this never returns.
515 static void sync_tests(int start_core, int num_threads, int job_num)
516 {
517         assert(start_core + num_threads <= num_cpus);
518         wait_for_all_envs_to_die();
519         for (int i = start_core; i < start_core + num_threads; i++)
520                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
521         lcr3(env_batch[start_core]->env_cr3);
522         init_barrier(bar, num_threads);
523         *job_to_run = job_num;
524         for (int i = start_core; i < start_core + num_threads; i++)
525                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
526         process_workqueue();
527         // we want to fake a run, to reenter manager for the next case
528         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
529         smp_call_function_single(0, run_env_handler, env, 0);
530         process_workqueue();
531         panic("whoops!\n");
532 }
533
534 static void async_tests(int start_core, int num_threads, int job_num)
535 {
536         int count;
537
538         assert(start_core + num_threads <= num_cpus);
539         wait_for_all_envs_to_die();
540         for (int i = start_core; i < start_core + num_threads; i++)
541                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
542         printk("async_tests: checkpoint 0\n");
543         lcr3(env_batch[start_core]->env_cr3);
544         init_barrier(bar, num_threads);
545         printk("async_tests: checkpoint 1\n");
546         *job_to_run = job_num;
547         for (int i = start_core; i < start_core + num_threads; i++)
548                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
549         count = 0;
550         while (count > -num_threads) {
551                 count = 0;
552                 for (int i = start_core; i < start_core + num_threads; i++) {
553                         count += process_generic_syscalls(env_batch[i], 1);
554                 }
555                 cpu_relax();
556         }
557         // we want to fake a run, to reenter manager for the next case
558         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
559         smp_call_function_single(0, run_env_handler, env, 0);
560         process_workqueue();
561         // this all never returns
562         panic("whoops!\n");
563 }
564
565 void test_run_measurements(uint32_t job_num)
566 {
567         switch (job_num) {
568                 case 0: // Nulls
569                         printk("Case 0:\n");
570                         async_tests(2, 1, job_num);  // start core 2, 1 core total
571                         break;
572                 case 1: // Sync
573                         printk("Case 1:\n");
574                         sync_tests(2, 1, job_num);
575                         break;
576                 case 2:
577                         printk("Case 2:\n");
578                         sync_tests(2, 2, job_num);
579                         break;
580                 case 3:
581                         printk("Case 3:\n");
582                         sync_tests(0, 3, job_num);
583                         break;
584                 case 4:
585                         printk("Case 4:\n");
586                         sync_tests(0, 4, job_num);
587                         break;
588                 case 5:
589                         printk("Case 5:\n");
590                         sync_tests(0, 5, job_num);
591                         break;
592                 case 6:
593                         printk("Case 6:\n");
594                         sync_tests(0, 6, job_num);
595                         break;
596                 case 7:
597                         printk("Case 7:\n");
598                         sync_tests(0, 7, job_num);
599                         break;
600                 case 8:
601                         printk("Case 8:\n");
602                         sync_tests(0, 8, job_num);
603                         break;
604                 case 9:
605                         printk("Case 9:\n");
606                         async_tests(2, 1, job_num);
607                         break;
608                 case 10:
609                         printk("Case 10:\n");
610                         async_tests(2, 2, job_num);
611                         break;
612                 case 11:
613                         printk("Case 11:\n");
614                         async_tests(2, 3, job_num);
615                         break;
616                 case 12:
617                         printk("Case 12:\n");
618                         async_tests(2, 4, job_num);
619                         break;
620                 case 13:
621                         printk("Case 13:\n");
622                         async_tests(2, 5, job_num);
623                         break;
624                 case 14:
625                         printk("Case 14:\n");
626                         async_tests(2, 6, job_num);
627                         break;
628                 default:
629                         warn("Invalid test number!!");
630         }
631         panic("Error in test setup!!");
632 }
633
634 /************************************************************/
635 /* ISR Handler Functions */
636
637 void test_hello_world_handler(trapframe_t *tf, void* data)
638 {
639         int trapno;
640         #if defined(__i386__)
641         trapno = tf->tf_trapno;
642         #elif defined(__sparc_v8__)
643         trapno = (tf->tbr >> 4) & 0xFF;
644         #else
645         trapno = 0;
646         #endif
647
648         cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
649                 trapno, core_id(), tf);
650 }
651
652 uint32_t print_info_lock = 0;
653
654 void test_print_info_handler(trapframe_t *tf, void* data)
655 {
656         spin_lock_irqsave(&print_info_lock);
657         cprintf("----------------------------\n");
658         cprintf("This is Core %d\n", core_id());
659 #ifdef __i386__
660         cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
661         cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
662                 read_msr(0x200), read_msr(0x201));
663         cprintf("MTRR Phys1 Base = 0x%016llx, Mask = 0x%016llx\n",
664                 read_msr(0x202), read_msr(0x203));
665         cprintf("MTRR Phys2 Base = 0x%016llx, Mask = 0x%016llx\n",
666                 read_msr(0x204), read_msr(0x205));
667         cprintf("MTRR Phys3 Base = 0x%016llx, Mask = 0x%016llx\n",
668                 read_msr(0x206), read_msr(0x207));
669         cprintf("MTRR Phys4 Base = 0x%016llx, Mask = 0x%016llx\n",
670                 read_msr(0x208), read_msr(0x209));
671         cprintf("MTRR Phys5 Base = 0x%016llx, Mask = 0x%016llx\n",
672                 read_msr(0x20a), read_msr(0x20b));
673         cprintf("MTRR Phys6 Base = 0x%016llx, Mask = 0x%016llx\n",
674                 read_msr(0x20c), read_msr(0x20d));
675         cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
676                 read_msr(0x20e), read_msr(0x20f));
677 #endif // __i386__
678         cprintf("----------------------------\n");
679         spin_unlock_irqsave(&print_info_lock);
680 }
681
682 void test_barrier_handler(trapframe_t *tf, void* data)
683 {
684         cprintf("Round 1: Core %d\n", core_id());
685         waiton_barrier(&test_cpu_array);
686         waiton_barrier(&test_cpu_array);
687         waiton_barrier(&test_cpu_array);
688         waiton_barrier(&test_cpu_array);
689         waiton_barrier(&test_cpu_array);
690         waiton_barrier(&test_cpu_array);
691         cprintf("Round 2: Core %d\n", core_id());
692         waiton_barrier(&test_cpu_array);
693         cprintf("Round 3: Core %d\n", core_id());
694         // uncomment to see it fucked up
695         //cprintf("Round 4: Core %d\n", core_id());
696 }
697
698 static void test_waiting_handler(trapframe_t *tf, atomic_t * data)
699 {
700         {HANDLER_ATOMIC atomic_dec(data);}
701 }
702
703 #ifdef __i386__
704 void test_pit(void)
705 {
706         cprintf("Starting test for PIT now (10s)\n");
707         udelay_pit(10000000);
708         cprintf("End now\n");
709         cprintf("Starting test for TSC (if stable) now (10s)\n");
710         udelay(10000000);
711         cprintf("End now\n");
712
713         cprintf("Starting test for LAPIC (if stable) now (10s)\n");
714         enable_irq();
715         lapic_set_timer(10000000, FALSE);
716
717         atomic_t waiting;
718         atomic_init(&waiting, 1);
719         register_interrupt_handler(interrupt_handlers, test_vector,
720                                    test_waiting_handler, &waiting);
721         while(atomic_read(&waiting))
722                 cpu_relax();
723         cprintf("End now\n");
724 }
725 #endif // __i386__