Merge branch 'sparc-dev' of ssh://waterman@scm.millennium.berkeley.edu/project/cs...
[akaros.git] / kern / src / testing.c
1
2 #ifdef __SHARC__
3 #pragma nosharc
4 #endif
5
6 #include <arch/mmu.h>
7 #include <arch/arch.h>
8 #include <arch/bitmask.h>
9 #include <smp.h>
10
11 #include <ros/memlayout.h>
12 #include <ros/common.h>
13
14 #include <atomic.h>
15 #include <stdio.h>
16 #include <assert.h>
17 #include <string.h>
18 #include <testing.h>
19 #include <trap.h>
20 #include <arch/trap.h>
21 #include <process.h>
22 #include <syscall.h>
23 #include <timing.h>
24 #include <kfs.h>
25 #include <multiboot.h>
26 #include <pmap.h>
27 #include <page_alloc.h>
28 #include <pmap.h>
29 #include <slab.h>
30 #include <kmalloc.h>
31
32 #define l1 (available_caches.l1)
33 #define l2 (available_caches.l2)
34 #define l3 (available_caches.l3)
35
36 #ifdef __i386__
37
38 void test_ipi_sending(void)
39 {
40         extern handler_t (CT(NUM_INTERRUPT_HANDLERS) RO interrupt_handlers)[];
41         int8_t state = 0;
42
43         register_interrupt_handler(interrupt_handlers, I_TESTING,
44                                    test_hello_world_handler, NULL);
45         enable_irqsave(&state);
46         cprintf("\nCORE 0 sending broadcast\n");
47         send_broadcast_ipi(I_TESTING);
48         udelay(3000000);
49         cprintf("\nCORE 0 sending all others\n");
50         send_all_others_ipi(I_TESTING);
51         udelay(3000000);
52         cprintf("\nCORE 0 sending self\n");
53         send_self_ipi(I_TESTING);
54         udelay(3000000);
55         cprintf("\nCORE 0 sending ipi to physical 1\n");
56         send_ipi(0x01, 0, I_TESTING);
57         udelay(3000000);
58         cprintf("\nCORE 0 sending ipi to physical 2\n");
59         send_ipi(0x02, 0, I_TESTING);
60         udelay(3000000);
61         cprintf("\nCORE 0 sending ipi to physical 3\n");
62         send_ipi(0x03, 0, I_TESTING);
63         udelay(3000000);
64         cprintf("\nCORE 0 sending ipi to physical 15\n");
65         send_ipi(0x0f, 0, I_TESTING);
66         udelay(3000000);
67         cprintf("\nCORE 0 sending ipi to logical 2\n");
68         send_ipi(0x02, 1, I_TESTING);
69         udelay(3000000);
70         cprintf("\nCORE 0 sending ipi to logical 1\n");
71         send_ipi(0x01, 1, I_TESTING);
72         udelay(3000000);
73         cprintf("\nDone!\n");
74         disable_irqsave(&state);
75 }
76
77 // Note this never returns and will muck with any other timer work
78 void test_pic_reception(void)
79 {
80         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
81         pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
82         pic_unmask_irq(0);
83         cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
84         cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
85         unmask_lapic_lvt(LAPIC_LVT_LINT0);
86         cprintf("Core %d's LINT0: 0x%08x\n", core_id(), read_mmreg32(LAPIC_LVT_LINT0));
87         enable_irq();
88         while(1);
89 }
90
91 void test_ioapic_pit_reroute(void) 
92 {
93         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
94         ioapic_route_irq(0, 3); 
95
96         cprintf("Starting pit on core 3....\n");
97         udelay(3000000);
98         pit_set_timer(0xFFFE,TIMER_RATEGEN); // totally arbitrary time
99         
100         udelay(3000000);
101         ioapic_unroute_irq(0);
102         udelay(300000);
103         cprintf("Masked pit. Waiting before return...\n");
104         udelay(3000000);
105 }
106
107 #endif // __i386__
108
109
110 void test_print_info(void)
111 {
112         cprintf("\nCORE 0 asking all cores to print info:\n");
113         smp_call_function_all(test_print_info_handler, NULL, 0);
114         cprintf("\nDone!\n");
115 }
116
117 void test_page_coloring(void) 
118 {
119 /*
120         //Print the different cache properties of our machine
121         print_cache_properties("L1", l1);
122         cprintf("\n");
123         print_cache_properties("L2", l2);
124         cprintf("\n");
125         print_cache_properties("L3", l3);
126         cprintf("\n");
127
128         //Print some stats about our memory
129         cprintf("Max Address: %llu\n", MAX_VADDR);
130         cprintf("Num Pages: %u\n", npages);
131
132         //Declare a local variable for allocating pages 
133         page_t* page;
134
135         cprintf("Contents of the page free list:\n");
136         for(int i=0; i<llc_cache->num_colors; i++) {
137                 cprintf("  COLOR %d:\n", i);
138                 LIST_FOREACH(page, &colored_page_free_list[i], page_link) {
139                         cprintf("    Page: %d\n", page2ppn(page));
140                 }
141         }
142
143         //Run through and allocate all pages through l1_page_alloc
144         cprintf("Allocating from L1 page colors:\n");
145         for(int i=0; i<get_cache_num_page_colors(l1); i++) {
146                 cprintf("  COLOR %d:\n", i);
147                 while(colored_page_alloc(l1, &page, i) != -ENOMEM)
148                         cprintf("    Page: %d\n", page2ppn(page));
149         }
150
151         //Put all the pages back by reinitializing
152         page_init();
153         
154         //Run through and allocate all pages through l2_page_alloc
155         cprintf("Allocating from L2 page colors:\n");
156         for(int i=0; i<get_cache_num_page_colors(l2); i++) {
157                 cprintf("  COLOR %d:\n", i);
158                 while(colored_page_alloc(l2, &page, i) != -ENOMEM)
159                         cprintf("    Page: %d\n", page2ppn(page));
160         }
161
162         //Put all the pages back by reinitializing
163         page_init();
164         
165         //Run through and allocate all pages through l3_page_alloc
166         cprintf("Allocating from L3 page colors:\n");
167         for(int i=0; i<get_cache_num_page_colors(l3); i++) {
168                 cprintf("  COLOR %d:\n", i);
169                 while(colored_page_alloc(l3, &page, i) != -ENOMEM)
170                         cprintf("    Page: %d\n", page2ppn(page));
171         }
172         
173         //Put all the pages back by reinitializing
174         page_init();
175         
176         //Run through and allocate all pages through page_alloc
177         cprintf("Allocating from global allocator:\n");
178         while(upage_alloc(&page) != -ENOMEM)
179                 cprintf("    Page: %d\n", page2ppn(page));
180         
181         if(colored_page_alloc(l2, &page, 0) != -ENOMEM)
182                 cprintf("Should not get here, all pages should already be gone!\n");
183         cprintf("All pages gone for sure...\n");
184         
185         //Now lets put a few pages back using page_free..
186         cprintf("Reinserting pages via page_free and reallocating them...\n");
187         page_free(&pages[0]);
188         page_free(&pages[15]);
189         page_free(&pages[7]);
190         page_free(&pages[6]);
191         page_free(&pages[4]);
192
193         while(upage_alloc(&page) != -ENOMEM)
194                 cprintf("Page: %d\n", page2ppn(page));  
195         
196         page_init();
197 */
198 }
199
200 void test_color_alloc() {
201         size_t checkpoint = 0;
202         uint8_t* colors_map = kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
203         cache_color_alloc(l2, colors_map);
204         cache_color_alloc(l3, colors_map);
205         cache_color_alloc(l3, colors_map);
206         cache_color_alloc(l2, colors_map);
207         cache_color_free(llc_cache, colors_map);
208         cache_color_free(llc_cache, colors_map);
209         cache_color_free(llc_cache, colors_map);
210         cache_color_free(llc_cache, colors_map);
211         cache_color_free(llc_cache, colors_map);
212         cache_color_free(llc_cache, colors_map);
213         cache_color_free(llc_cache, colors_map);
214         cache_color_free(llc_cache, colors_map);
215         cache_color_free(llc_cache, colors_map);
216         cache_color_free(llc_cache, colors_map);
217         cache_color_free(llc_cache, colors_map);
218         cache_color_free(llc_cache, colors_map);
219         cache_color_free(llc_cache, colors_map);
220         cache_color_free(llc_cache, colors_map);
221         cache_color_free(llc_cache, colors_map);
222         cache_color_free(llc_cache, colors_map);
223         cache_color_free(l2, colors_map);
224         cache_color_free(llc_cache, colors_map);
225         cache_color_free(llc_cache, colors_map);
226
227 print_cache_colors:
228         printk("L1 free colors, tot colors: %d\n", l1->num_colors);
229         PRINT_BITMASK(l1->free_colors_map, l1->num_colors);
230         printk("L2 free colors, tot colors: %d\n", l2->num_colors);
231         PRINT_BITMASK(l2->free_colors_map, l2->num_colors);
232         printk("L3 free colors, tot colors: %d\n", l3->num_colors);
233         PRINT_BITMASK(l3->free_colors_map, l3->num_colors);
234         printk("Process allocated colors\n");
235         PRINT_BITMASK(colors_map, llc_cache->num_colors);
236         printk("test_color_alloc() complete!\n");
237 }
238
239 barrier_t test_cpu_array;
240
241 void test_barrier(void)
242 {
243         cprintf("Core 0 initializing barrier\n");
244         init_barrier(&test_cpu_array, num_cpus);
245         cprintf("Core 0 asking all cores to print ids, barrier, rinse, repeat\n");
246         smp_call_function_all(test_barrier_handler, NULL, 0);
247 }
248
249 void test_interrupts_irqsave(void)
250 {
251         int8_t state = 0;
252         printd("Testing Nesting Enabling first, turning ints off:\n");
253         disable_irq();
254         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
255         assert(!irq_is_enabled());
256         printd("Enabling IRQSave\n");
257         enable_irqsave(&state);
258         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
259         assert(irq_is_enabled());
260         printd("Enabling IRQSave Again\n");
261         enable_irqsave(&state);
262         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
263         assert(irq_is_enabled());
264         printd("Disabling IRQSave Once\n");
265         disable_irqsave(&state);
266         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
267         assert(irq_is_enabled());
268         printd("Disabling IRQSave Again\n");
269         disable_irqsave(&state);
270         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
271         assert(!irq_is_enabled());
272         printd("Done.  Should have been 0, 200, 200, 200, 0\n");
273
274         printd("Testing Nesting Disabling first, turning ints on:\n");
275         state = 0;
276         enable_irq();
277         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
278         assert(irq_is_enabled());
279         printd("Disabling IRQSave Once\n");
280         disable_irqsave(&state);
281         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
282         assert(!irq_is_enabled());
283         printd("Disabling IRQSave Again\n");
284         disable_irqsave(&state);
285         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
286         assert(!irq_is_enabled());
287         printd("Enabling IRQSave Once\n");
288         enable_irqsave(&state);
289         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
290         assert(!irq_is_enabled());
291         printd("Enabling IRQSave Again\n");
292         enable_irqsave(&state);
293         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
294         assert(irq_is_enabled());
295         printd("Done.  Should have been 200, 0, 0, 0, 200 \n");
296
297         state = 0;
298         disable_irq();
299         printd("Ints are off, enabling then disabling.\n");
300         enable_irqsave(&state);
301         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
302         assert(irq_is_enabled());
303         disable_irqsave(&state);
304         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
305         assert(!irq_is_enabled());
306         printd("Done.  Should have been 200, 0\n");
307
308         state = 0;
309         enable_irq();
310         printd("Ints are on, enabling then disabling.\n");
311         enable_irqsave(&state);
312         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
313         assert(irq_is_enabled());
314         disable_irqsave(&state);
315         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
316         assert(irq_is_enabled());
317         printd("Done.  Should have been 200, 200\n");
318
319         state = 0;
320         disable_irq();
321         printd("Ints are off, disabling then enabling.\n");
322         disable_irqsave(&state);
323         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
324         assert(!irq_is_enabled());
325         enable_irqsave(&state);
326         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
327         assert(!irq_is_enabled());
328         printd("Done.  Should have been 0, 0\n");
329
330         state = 0;
331         enable_irq();
332         printd("Ints are on, disabling then enabling.\n");
333         disable_irqsave(&state);
334         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
335         assert(!irq_is_enabled());
336         enable_irqsave(&state);
337         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
338         assert(irq_is_enabled());
339         printd("Done.  Should have been 0, 200\n");
340
341         disable_irq();
342         cprintf("Passed enable_irqsave tests\n");
343 }
344
345 void test_bitmasks(void)
346 {
347 #define masksize 67
348         DECL_BITMASK(mask, masksize);
349         printk("size of mask %d\n", sizeof(mask));
350         CLR_BITMASK(mask, masksize);
351         PRINT_BITMASK(mask, masksize);
352         printk("cleared\n");
353         SET_BITMASK_BIT(mask, 0);
354         SET_BITMASK_BIT(mask, 11);
355         SET_BITMASK_BIT(mask, 17);
356         SET_BITMASK_BIT(mask, masksize-1);
357         printk("bits set\n");
358         PRINT_BITMASK(mask, masksize);
359         DECL_BITMASK(mask2, masksize);
360         COPY_BITMASK(mask2, mask, masksize);
361         printk("copy of original mask, should be the same as the prev\n");
362         PRINT_BITMASK(mask2, masksize);
363         CLR_BITMASK_BIT(mask, 11);
364         printk("11 cleared\n");
365         PRINT_BITMASK(mask, masksize);
366         printk("bit 17 is %d (should be 1)\n", GET_BITMASK_BIT(mask, 17));
367         printk("bit 11 is %d (should be 0)\n", GET_BITMASK_BIT(mask, 11));
368         FILL_BITMASK(mask, masksize);
369         PRINT_BITMASK(mask, masksize);
370         printk("should be all 1's, except for a few at the end\n");
371         printk("Is Clear?: %d (should be 0)\n", BITMASK_IS_CLEAR(mask,masksize));
372         CLR_BITMASK(mask, masksize);
373         PRINT_BITMASK(mask, masksize);
374         printk("Is Clear?: %d (should be 1)\n", BITMASK_IS_CLEAR(mask,masksize));
375         printk("should be cleared\n");
376 }
377
378 checklist_t *RO the_global_list;
379
380 void test_checklist_handler(trapframe_t *tf, void* data)
381 {
382         udelay(1000000);
383         cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
384         down_checklist(the_global_list);
385 }
386
387 void test_checklists(void)
388 {
389         INIT_CHECKLIST(a_list, MAX_NUM_CPUS);
390         the_global_list = &a_list;
391         printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
392         printk("mask\n");
393         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
394         SET_BITMASK_BIT(a_list.mask.bits, 11);
395         printk("Set bit 11\n");
396         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
397
398         CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
399         INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CPUS);
400         FILL_BITMASK(a_mask.bits, num_cpus);
401         //CLR_BITMASK_BIT(a_mask.bits, core_id());
402         //SET_BITMASK_BIT(a_mask.bits, 1);
403         //printk("New mask (1, 17, 25):\n");
404         printk("Created new mask, filled up to num_cpus\n");
405         PRINT_BITMASK(a_mask.bits, a_mask.size);
406         printk("committing new mask\n");
407         commit_checklist_wait(&a_list, &a_mask);
408         printk("Old mask (copied onto):\n");
409         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
410         //smp_call_function_single(1, test_checklist_handler, 0, 0);
411
412         smp_call_function_all(test_checklist_handler, NULL, 0);
413
414         printk("Waiting on checklist\n");
415         waiton_checklist(&a_list);
416         printk("Done Waiting!\n");
417
418 }
419
420 atomic_t a, b, c;
421
422 #ifdef __IVY__
423 void test_incrementer_handler(trapframe_t *tf, atomic_t *data)
424 #else
425 void test_incrementer_handler(trapframe_t *tf, void *data)
426 #endif
427 {
428         assert(data);
429         atomic_inc(data);
430 }
431
432 void test_null_handler(trapframe_t *tf, void* data)
433 {
434         asm volatile("nop");
435 }
436
437 void test_smp_call_functions(void)
438 {
439         int i;
440         atomic_init(&a, 0);
441         atomic_init(&b, 0);
442         atomic_init(&c, 0);
443         handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0, *waiter3 = 0,
444                           *waiter4 = 0, *waiter5 = 0;
445         uint8_t me = core_id();
446         printk("\nCore %d: SMP Call Self (nowait):\n", me);
447         printk("---------------------\n");
448         smp_call_function_self(test_hello_world_handler, NULL, 0);
449         printk("\nCore %d: SMP Call Self (wait):\n", me);
450         printk("---------------------\n");
451         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
452         smp_call_wait(waiter0);
453         printk("\nCore %d: SMP Call All (nowait):\n", me);
454         printk("---------------------\n");
455         smp_call_function_all(test_hello_world_handler, NULL, 0);
456         printk("\nCore %d: SMP Call All (wait):\n", me);
457         printk("---------------------\n");
458         smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
459         smp_call_wait(waiter0);
460         printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n", me);
461         printk("---------------------\n");
462         for(i = 1; i < num_cpus; i++)
463                 smp_call_function_single(i, test_hello_world_handler, NULL, 0);
464         printk("\nCore %d: SMP Call Self (wait):\n", me);
465         printk("---------------------\n");
466         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
467         smp_call_wait(waiter0);
468         printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n", me);
469         printk("---------------------\n");
470         for(i = 1; i < num_cpus; i++)
471         {
472                 smp_call_function_single(i, test_hello_world_handler, NULL, &waiter0);
473                 smp_call_wait(waiter0);
474         }
475         printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
476         printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
477         smp_call_function_all(test_incrementer_handler, &a, 0);
478         smp_call_function_all(test_incrementer_handler, &b, 0);
479         smp_call_function_all(test_incrementer_handler, &c, 0);
480         // if i can clobber a previous IPI, the interleaving might do it
481         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
482         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
483         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
484         smp_call_function_single(4 % num_cpus, test_incrementer_handler, &a, 0);
485         smp_call_function_single(5 % num_cpus, test_incrementer_handler, &b, 0);
486         smp_call_function_single(6 % num_cpus, test_incrementer_handler, &c, 0);
487         smp_call_function_all(test_incrementer_handler, &a, 0);
488         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
489         smp_call_function_all(test_incrementer_handler, &b, 0);
490         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
491         smp_call_function_all(test_incrementer_handler, &c, 0);
492         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
493         // wait, so we're sure the others finish before printing.
494         // without this, we could (and did) get 19,18,19, since the B_inc
495         // handler didn't finish yet
496         smp_call_function_self(test_null_handler, NULL, &waiter0);
497         // need to grab all 5 handlers (max), since the code moves to the next free.
498         smp_call_function_self(test_null_handler, NULL, &waiter1);
499         smp_call_function_self(test_null_handler, NULL, &waiter2);
500         smp_call_function_self(test_null_handler, NULL, &waiter3);
501         smp_call_function_self(test_null_handler, NULL, &waiter4);
502         smp_call_wait(waiter0);
503         smp_call_wait(waiter1);
504         smp_call_wait(waiter2);
505         smp_call_wait(waiter3);
506         smp_call_wait(waiter4);
507         printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
508         printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
509         smp_call_function_self(test_null_handler, NULL, &waiter0);
510         printk("Sent one\n");
511         smp_call_function_self(test_null_handler, NULL, &waiter1);
512         printk("Sent two\n");
513         smp_call_wait(waiter0);
514         printk("Wait one\n");
515         smp_call_wait(waiter1);
516         printk("Wait two\n");
517         printk("\tMade it through!\n");
518         printk("Attempting to deadlock by smp_calling more than are available:\n");
519         printk("\tShould see an Insufficient message and a kernel warning.\n");
520         if (smp_call_function_self(test_null_handler, NULL, &waiter0))
521                 printk("\tInsufficient handlers to call function (0)\n");
522         if (smp_call_function_self(test_null_handler, NULL, &waiter1))
523                 printk("\tInsufficient handlers to call function (1)\n");
524         if (smp_call_function_self(test_null_handler, NULL, &waiter2))
525                 printk("\tInsufficient handlers to call function (2)\n");
526         if (smp_call_function_self(test_null_handler, NULL, &waiter3))
527                 printk("\tInsufficient handlers to call function (3)\n");
528         if (smp_call_function_self(test_null_handler, NULL, &waiter4))
529                 printk("\tInsufficient handlers to call function (4)\n");
530         if (smp_call_function_self(test_null_handler, NULL, &waiter5))
531                 printk("\tInsufficient handlers to call function (5)\n");
532         smp_call_wait(waiter0);
533         smp_call_wait(waiter1);
534         smp_call_wait(waiter2);
535         smp_call_wait(waiter3);
536         smp_call_wait(waiter4);
537         smp_call_wait(waiter5);
538         printk("\tMade it through!\n");
539
540         printk("Done\n");
541 }
542
543 #ifdef __i386__
544 void test_lapic_status_bit(void)
545 {
546         register_interrupt_handler(interrupt_handlers, I_TESTING,
547                                    test_incrementer_handler, &a);
548         #define NUM_IPI 100000
549         atomic_set(&a,0);
550         printk("IPIs received (should be 0): %d\n", a);
551         for(int i = 0; i < NUM_IPI; i++) {
552                 send_ipi(7, 0, I_TESTING);
553                 lapic_wait_to_send();
554         }
555         // need to wait a bit to let those IPIs get there
556         udelay(5000000);
557         printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
558         // hopefully that handler never fires again.  leaving it registered for now.
559 }
560 #endif // __i386__
561
562 /******************************************************************************/
563 /*            Test Measurements: Couples with measurement.c                   */
564 // All user processes can R/W the UGDATA page
565 barrier_t*COUNT(1) bar = (barrier_t*COUNT(1))TC(UGDATA);
566 uint32_t*COUNT(1) job_to_run = (uint32_t*COUNT(1))TC(UGDATA + sizeof(barrier_t));
567 env_t* env_batch[64]; // Fairly arbitrary, just the max I plan to use.
568
569 /* Helpers for test_run_measurements */
570 static void wait_for_all_envs_to_die(void)
571 {
572         while (atomic_read(&num_envs))
573                 cpu_relax();
574 }
575
576 // this never returns.
577 static void sync_tests(int start_core, int num_threads, int job_num)
578 {
579         assert(start_core + num_threads <= num_cpus);
580         wait_for_all_envs_to_die();
581         for (int i = start_core; i < start_core + num_threads; i++)
582                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
583         lcr3(env_batch[start_core]->env_cr3);
584         init_barrier(bar, num_threads);
585         *job_to_run = job_num;
586         for (int i = start_core; i < start_core + num_threads; i++)
587                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
588         process_workqueue();
589         // we want to fake a run, to reenter manager for the next case
590         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
591         smp_call_function_single(0, run_env_handler, env, 0);
592         process_workqueue();
593         panic("whoops!\n");
594 }
595
596 static void async_tests(int start_core, int num_threads, int job_num)
597 {
598         int count;
599
600         assert(start_core + num_threads <= num_cpus);
601         wait_for_all_envs_to_die();
602         for (int i = start_core; i < start_core + num_threads; i++)
603                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
604         printk("async_tests: checkpoint 0\n");
605         lcr3(env_batch[start_core]->env_cr3);
606         init_barrier(bar, num_threads);
607         printk("async_tests: checkpoint 1\n");
608         *job_to_run = job_num;
609         for (int i = start_core; i < start_core + num_threads; i++)
610                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
611         count = 0;
612         while (count > -num_threads) {
613                 count = 0;
614                 for (int i = start_core; i < start_core + num_threads; i++) {
615                         count += process_generic_syscalls(env_batch[i], 1);
616                 }
617                 cpu_relax();
618         }
619         // we want to fake a run, to reenter manager for the next case
620         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
621         smp_call_function_single(0, run_env_handler, env, 0);
622         process_workqueue();
623         // this all never returns
624         panic("whoops!\n");
625 }
626
627 void test_run_measurements(uint32_t job_num)
628 {
629         switch (job_num) {
630                 case 0: // Nulls
631                         printk("Case 0:\n");
632                         async_tests(2, 1, job_num);  // start core 2, 1 core total
633                         break;
634                 case 1: // Sync
635                         printk("Case 1:\n");
636                         sync_tests(2, 1, job_num);
637                         break;
638                 case 2:
639                         printk("Case 2:\n");
640                         sync_tests(2, 2, job_num);
641                         break;
642                 case 3:
643                         printk("Case 3:\n");
644                         sync_tests(0, 3, job_num);
645                         break;
646                 case 4:
647                         printk("Case 4:\n");
648                         sync_tests(0, 4, job_num);
649                         break;
650                 case 5:
651                         printk("Case 5:\n");
652                         sync_tests(0, 5, job_num);
653                         break;
654                 case 6:
655                         printk("Case 6:\n");
656                         sync_tests(0, 6, job_num);
657                         break;
658                 case 7:
659                         printk("Case 7:\n");
660                         sync_tests(0, 7, job_num);
661                         break;
662                 case 8:
663                         printk("Case 8:\n");
664                         sync_tests(0, 8, job_num);
665                         break;
666                 case 9:
667                         printk("Case 9:\n");
668                         async_tests(2, 1, job_num);
669                         break;
670                 case 10:
671                         printk("Case 10:\n");
672                         async_tests(2, 2, job_num);
673                         break;
674                 case 11:
675                         printk("Case 11:\n");
676                         async_tests(2, 3, job_num);
677                         break;
678                 case 12:
679                         printk("Case 12:\n");
680                         async_tests(2, 4, job_num);
681                         break;
682                 case 13:
683                         printk("Case 13:\n");
684                         async_tests(2, 5, job_num);
685                         break;
686                 case 14:
687                         printk("Case 14:\n");
688                         async_tests(2, 6, job_num);
689                         break;
690                 default:
691                         warn("Invalid test number!!");
692         }
693         panic("Error in test setup!!");
694 }
695
696 /************************************************************/
697 /* ISR Handler Functions */
698
699 void test_hello_world_handler(trapframe_t *tf, void* data)
700 {
701         int trapno;
702         #if defined(__i386__)
703         trapno = tf->tf_trapno;
704         #elif defined(__sparc_v8__)
705         trapno = (tf->tbr >> 4) & 0xFF;
706         #else
707         trapno = 0;
708         #endif
709
710         cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
711                 trapno, core_id(), tf);
712 }
713
714 uint32_t print_info_lock = 0;
715
716 void test_print_info_handler(trapframe_t *tf, void* data)
717 {
718         spin_lock_irqsave(&print_info_lock);
719         cprintf("----------------------------\n");
720         cprintf("This is Core %d\n", core_id());
721 #ifdef __i386__
722         cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
723         cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
724                 read_msr(0x200), read_msr(0x201));
725         cprintf("MTRR Phys1 Base = 0x%016llx, Mask = 0x%016llx\n",
726                 read_msr(0x202), read_msr(0x203));
727         cprintf("MTRR Phys2 Base = 0x%016llx, Mask = 0x%016llx\n",
728                 read_msr(0x204), read_msr(0x205));
729         cprintf("MTRR Phys3 Base = 0x%016llx, Mask = 0x%016llx\n",
730                 read_msr(0x206), read_msr(0x207));
731         cprintf("MTRR Phys4 Base = 0x%016llx, Mask = 0x%016llx\n",
732                 read_msr(0x208), read_msr(0x209));
733         cprintf("MTRR Phys5 Base = 0x%016llx, Mask = 0x%016llx\n",
734                 read_msr(0x20a), read_msr(0x20b));
735         cprintf("MTRR Phys6 Base = 0x%016llx, Mask = 0x%016llx\n",
736                 read_msr(0x20c), read_msr(0x20d));
737         cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
738                 read_msr(0x20e), read_msr(0x20f));
739 #endif // __i386__
740         cprintf("----------------------------\n");
741         spin_unlock_irqsave(&print_info_lock);
742 }
743
744 void test_barrier_handler(trapframe_t *tf, void* data)
745 {
746         cprintf("Round 1: Core %d\n", core_id());
747         waiton_barrier(&test_cpu_array);
748         waiton_barrier(&test_cpu_array);
749         waiton_barrier(&test_cpu_array);
750         waiton_barrier(&test_cpu_array);
751         waiton_barrier(&test_cpu_array);
752         waiton_barrier(&test_cpu_array);
753         cprintf("Round 2: Core %d\n", core_id());
754         waiton_barrier(&test_cpu_array);
755         cprintf("Round 3: Core %d\n", core_id());
756         // uncomment to see it fucked up
757         //cprintf("Round 4: Core %d\n", core_id());
758 }
759
760 #ifdef __IVY__
761 static void test_waiting_handler(trapframe_t *tf, atomic_t *data)
762 #else
763 static void test_waiting_handler(trapframe_t *tf, void *data)
764 #endif
765 {
766         atomic_dec(data);
767 }
768
769 #ifdef __i386__
770 void test_pit(void)
771 {
772         cprintf("Starting test for PIT now (10s)\n");
773         udelay_pit(10000000);
774         cprintf("End now\n");
775         cprintf("Starting test for TSC (if stable) now (10s)\n");
776         udelay(10000000);
777         cprintf("End now\n");
778
779         cprintf("Starting test for LAPIC (if stable) now (10s)\n");
780         enable_irq();
781         lapic_set_timer(10000000, FALSE);
782
783         atomic_t waiting;
784         atomic_init(&waiting, 1);
785         register_interrupt_handler(interrupt_handlers, I_TESTING,
786                                    test_waiting_handler, &waiting);
787         while(atomic_read(&waiting))
788                 cpu_relax();
789         cprintf("End now\n");
790 }
791
792 void test_circ_buffer(void)
793 {
794         int arr[5] = {0, 1, 2, 3, 4};
795
796         for (int i = 0; i < 5; i++) {
797                 FOR_CIRC_BUFFER(i, 5, j)
798                         printk("Starting with current = %d, each value = %d\n", i, j);
799         }
800         return;
801 }
802
803 #ifdef __IVY__
804 void test_am_handler(trapframe_t* tf, uint32_t srcid, uint32_t a0, uint32_t a1,
805                      uint32_t a2)
806 #else
807 void test_am_handler(trapframe_t* tf, uint32_t srcid, void * a0, void * a1,
808                      void * a2)
809 #endif
810 {
811         printk("Received AM on core %d from core %d: arg0= 0x%08x, arg1 = "
812                "0x%08x, arg2 = 0x%08x\n", core_id(), srcid, a0, a1, a2);
813         return;
814 }
815
816 void test_active_messages(void)
817 {
818         // basic tests, make sure we can handle a wraparound and that the error
819         // messages work.
820         printk("sending NUM_ACTIVE_MESSAGES to core 1, sending (#,deadbeef,0)\n");
821         for (int i = 0; i < NUM_ACTIVE_MESSAGES; i++)
822 #ifdef __IVY__
823                 while (send_active_message(1, test_am_handler, i, 0xdeadbeef, 0))
824                         cpu_relax();
825 #else
826                 while (send_active_message(1, test_am_handler, (void *)i,
827                                            (void *)0xdeadbeef, (void *)0))
828                         cpu_relax();
829 #endif
830         udelay(5000000);
831         printk("sending 2*NUM_ACTIVE_MESSAGES to core 1, sending (#,cafebabe,0)\n");
832         for (int i = 0; i < 2*NUM_ACTIVE_MESSAGES; i++)
833 #ifdef __IVY__
834                 while (send_active_message(1, test_am_handler, i, 0xdeadbeef, 0))
835                         cpu_relax();
836 #else
837                 while (send_active_message(1, test_am_handler, (void *)i,
838                                            (void *)0xdeadbeef, (void *)0))
839                         cpu_relax();
840 #endif
841         udelay(5000000);
842         return;
843 }
844 #endif // __i386__
845
846 static void test_single_cache(int iters, size_t size, int align, int flags,
847                               void (*ctor)(void *, size_t),
848                               void (*dtor)(void *, size_t))
849 {
850         struct kmem_cache *test_cache;
851         void *objects[iters];
852         test_cache = kmem_cache_create("test_cache", size, align, flags, ctor, dtor);
853         printk("Testing Kmem Cache:\n");
854         print_kmem_cache(test_cache);
855         for (int i = 0; i < iters; i++) {
856                 objects[i] = kmem_cache_alloc(test_cache, 0);
857                 printk("Buffer %d addr = %p\n", i, objects[i]);
858         }
859         for (int i = 0; i < iters; i++) {
860                 kmem_cache_free(test_cache, objects[i]);
861         }
862         kmem_cache_destroy(test_cache);
863         printk("\n\n\n\n");
864 }
865
866 void test_slab(void)
867 {
868         void a_ctor(void *buf, size_t size)
869         {
870                 printk("constructin tests\n");
871         }
872         void a_dtor(void *buf, size_t size)
873         {
874                 printk("destructin tests\n");
875         }
876         test_single_cache(10, 128, 512, 0, 0, 0);
877         test_single_cache(10, 128, 4, 0, a_ctor, a_dtor);
878         test_single_cache(10, 1024, 16, 0, 0, 0);
879 }
880
881 void test_kmalloc(void)
882 {
883         printk("Testing Kmalloc\n");
884         void *bufs[NUM_KMALLOC_CACHES + 1];     
885         size_t size;
886         for (int i = 0; i < NUM_KMALLOC_CACHES + 1; i++){
887                 size = (KMALLOC_SMALLEST << i) - KMALLOC_OFFSET;
888                 bufs[i] = kmalloc(size, 0);
889                 printk("Size %d, Addr = %p\n", size, bufs[i]);
890         }
891         for (int i = 0; i < NUM_KMALLOC_CACHES; i++) {
892                 printk("Freeing buffer %d\n", i);
893                 kfree(bufs[i]);
894         }
895         printk("Testing a large kmalloc\n");
896         size = (KMALLOC_LARGEST << 2);
897         bufs[0] = kmalloc(size, 0);
898         printk("Size %d, Addr = %p\n", size, bufs[0]);
899         kfree(bufs[0]);
900 }
901