Adding allocation of colors for processes
[akaros.git] / kern / src / testing.c
1
2 #ifdef __SHARC__
3 #pragma nosharc
4 #endif
5
6 #include <arch/mmu.h>
7 #include <arch/arch.h>
8 #include <arch/bitmask.h>
9 #include <smp.h>
10
11 #include <ros/memlayout.h>
12 #include <ros/common.h>
13
14 #include <atomic.h>
15 #include <stdio.h>
16 #include <assert.h>
17 #include <string.h>
18 #include <testing.h>
19 #include <trap.h>
20 #include <arch/trap.h>
21 #include <process.h>
22 #include <syscall.h>
23 #include <timing.h>
24 #include <kfs.h>
25 #include <multiboot.h>
26 #include <pmap.h>
27 #include <page_alloc.h>
28 #include <pmap.h>
29 #include <slab.h>
30 #include <kmalloc.h>
31
32 #define l1 (available_caches.l1)
33 #define l2 (available_caches.l2)
34 #define l3 (available_caches.l3)
35
36 #ifdef __i386__
37
38 void test_ipi_sending(void)
39 {
40         extern handler_t (CT(NUM_INTERRUPT_HANDLERS) RO interrupt_handlers)[];
41         int8_t state = 0;
42
43         register_interrupt_handler(interrupt_handlers, I_TESTING,
44                                    test_hello_world_handler, NULL);
45         enable_irqsave(&state);
46         cprintf("\nCORE 0 sending broadcast\n");
47         send_broadcast_ipi(I_TESTING);
48         udelay(3000000);
49         cprintf("\nCORE 0 sending all others\n");
50         send_all_others_ipi(I_TESTING);
51         udelay(3000000);
52         cprintf("\nCORE 0 sending self\n");
53         send_self_ipi(I_TESTING);
54         udelay(3000000);
55         cprintf("\nCORE 0 sending ipi to physical 1\n");
56         send_ipi(0x01, 0, I_TESTING);
57         udelay(3000000);
58         cprintf("\nCORE 0 sending ipi to physical 2\n");
59         send_ipi(0x02, 0, I_TESTING);
60         udelay(3000000);
61         cprintf("\nCORE 0 sending ipi to physical 3\n");
62         send_ipi(0x03, 0, I_TESTING);
63         udelay(3000000);
64         cprintf("\nCORE 0 sending ipi to physical 15\n");
65         send_ipi(0x0f, 0, I_TESTING);
66         udelay(3000000);
67         cprintf("\nCORE 0 sending ipi to logical 2\n");
68         send_ipi(0x02, 1, I_TESTING);
69         udelay(3000000);
70         cprintf("\nCORE 0 sending ipi to logical 1\n");
71         send_ipi(0x01, 1, I_TESTING);
72         udelay(3000000);
73         cprintf("\nDone!\n");
74         disable_irqsave(&state);
75 }
76
77 // Note this never returns and will muck with any other timer work
78 void test_pic_reception(void)
79 {
80         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
81         pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
82         pic_unmask_irq(0);
83         cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
84         cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
85         unmask_lapic_lvt(LAPIC_LVT_LINT0);
86         cprintf("Core %d's LINT0: 0x%08x\n", core_id(), read_mmreg32(LAPIC_LVT_LINT0));
87         enable_irq();
88         while(1);
89 }
90
91 void test_ioapic_pit_reroute(void) 
92 {
93         register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
94         ioapic_route_irq(0, 3); 
95
96         cprintf("Starting pit on core 3....\n");
97         udelay(3000000);
98         pit_set_timer(0xFFFE,TIMER_RATEGEN); // totally arbitrary time
99         
100         udelay(3000000);
101         ioapic_unroute_irq(0);
102         udelay(300000);
103         cprintf("Masked pit. Waiting before return...\n");
104         udelay(3000000);
105 }
106
107 #endif // __i386__
108
109
110 void test_print_info(void)
111 {
112         cprintf("\nCORE 0 asking all cores to print info:\n");
113         smp_call_function_all(test_print_info_handler, NULL, 0);
114         cprintf("\nDone!\n");
115 }
116
117 void test_page_coloring(void) 
118 {
119         //Print the different cache properties of our machine
120         print_cache_properties("L1", l1);
121         cprintf("\n");
122         print_cache_properties("L2", l2);
123         cprintf("\n");
124         print_cache_properties("L3", l3);
125         cprintf("\n");
126
127         //Print some stats about our memory
128         cprintf("Max Address: %llu\n", MAX_VADDR);
129         cprintf("Num Pages: %u\n", npages);
130
131         //Declare a local variable for allocating pages 
132         page_t* page;
133
134         cprintf("Contents of the page free list:\n");
135         for(int i=0; i<llc_cache->num_colors; i++) {
136                 cprintf("  COLOR %d:\n", i);
137                 LIST_FOREACH(page, &colored_page_free_list[i], page_link) {
138                         cprintf("    Page: %d\n", page2ppn(page));
139                 }
140         }
141
142         //Run through and allocate all pages through l1_page_alloc
143         cprintf("Allocating from L1 page colors:\n");
144         for(int i=0; i<get_cache_num_page_colors(l1); i++) {
145                 cprintf("  COLOR %d:\n", i);
146                 while(l1_page_alloc(&page, i) != -ENOMEM)
147                         cprintf("    Page: %d\n", page2ppn(page));
148         }
149
150         //Put all the pages back by reinitializing
151         page_init();
152         
153         //Run through and allocate all pages through l2_page_alloc
154         cprintf("Allocating from L2 page colors:\n");
155         for(int i=0; i<get_cache_num_page_colors(l2); i++) {
156                 cprintf("  COLOR %d:\n", i);
157                 while(l2_page_alloc(&page, i) != -ENOMEM)
158                         cprintf("    Page: %d\n", page2ppn(page));
159         }
160
161         //Put all the pages back by reinitializing
162         page_init();
163         
164         //Run through and allocate all pages through l3_page_alloc
165         cprintf("Allocating from L3 page colors:\n");
166         for(int i=0; i<get_cache_num_page_colors(l3); i++) {
167                 cprintf("  COLOR %d:\n", i);
168                 while(l3_page_alloc(&page, i) != -ENOMEM)
169                         cprintf("    Page: %d\n", page2ppn(page));
170         }
171         
172         //Put all the pages back by reinitializing
173         page_init();
174         
175         //Run through and allocate all pages through page_alloc
176         cprintf("Allocating from global allocator:\n");
177         while(page_alloc(&page) != -ENOMEM)
178                 cprintf("    Page: %d\n", page2ppn(page));
179         
180         if(l2_page_alloc(&page, 0) != -ENOMEM)
181                 cprintf("Should not get here, all pages should already be gone!\n");
182         cprintf("All pages gone for sure...\n");
183         
184         //Now lets put a few pages back using page_free..
185         cprintf("Reinserting pages via page_free and reallocating them...\n");
186         page_free(&pages[0]);
187         page_free(&pages[15]);
188         page_free(&pages[7]);
189         page_free(&pages[6]);
190         page_free(&pages[4]);
191
192         while(page_alloc(&page) != -ENOMEM)
193                 cprintf("Page: %d\n", page2ppn(page));  
194 }
195
196 void test_color_alloc() {
197         size_t checkpoint = 0;
198         struct proc* p = kfs_proc_create(kfs_lookup_path("parlib_matrix"));
199         cache_color_alloc(l2, p);
200         cache_color_alloc(l3, p);
201         cache_color_alloc(l3, p);
202         cache_color_alloc(l2, p);
203         cache_color_free(llc_cache, p);
204         cache_color_free(llc_cache, p);
205         cache_color_free(llc_cache, p);
206         cache_color_free(llc_cache, p);
207         cache_color_free(llc_cache, p);
208         cache_color_free(llc_cache, p);
209         cache_color_free(llc_cache, p);
210         cache_color_free(llc_cache, p);
211         cache_color_free(llc_cache, p);
212         cache_color_free(llc_cache, p);
213         cache_color_free(llc_cache, p);
214         cache_color_free(llc_cache, p);
215         cache_color_free(llc_cache, p);
216         cache_color_free(llc_cache, p);
217         cache_color_free(llc_cache, p);
218         cache_color_free(llc_cache, p);
219         cache_color_free(l2, p);
220         cache_color_free(llc_cache, p);
221         cache_color_free(llc_cache, p);
222         printk("L1 free colors, tot colors: %d\n", l1->num_colors);
223         PRINT_BITMASK(l1->free_colors_map, l1->num_colors);
224         printk("L2 free colors, tot colors: %d\n", l2->num_colors);
225         PRINT_BITMASK(l2->free_colors_map, l2->num_colors);
226         printk("L3 free colors, tot colors: %d\n", l3->num_colors);
227         PRINT_BITMASK(l3->free_colors_map, l3->num_colors);
228         printk("Process allocated colors\n");
229         PRINT_BITMASK(p->cache_colors_map, llc_cache->num_colors);
230         printk("test_color_alloc() complete!\n");
231 }
232
233 barrier_t test_cpu_array;
234
235 void test_barrier(void)
236 {
237         cprintf("Core 0 initializing barrier\n");
238         init_barrier(&test_cpu_array, num_cpus);
239         cprintf("Core 0 asking all cores to print ids, barrier, rinse, repeat\n");
240         smp_call_function_all(test_barrier_handler, NULL, 0);
241 }
242
243 void test_interrupts_irqsave(void)
244 {
245         int8_t state = 0;
246         printd("Testing Nesting Enabling first, turning ints off:\n");
247         disable_irq();
248         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
249         assert(!irq_is_enabled());
250         printd("Enabling IRQSave\n");
251         enable_irqsave(&state);
252         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
253         assert(irq_is_enabled());
254         printd("Enabling IRQSave Again\n");
255         enable_irqsave(&state);
256         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
257         assert(irq_is_enabled());
258         printd("Disabling IRQSave Once\n");
259         disable_irqsave(&state);
260         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
261         assert(irq_is_enabled());
262         printd("Disabling IRQSave Again\n");
263         disable_irqsave(&state);
264         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
265         assert(!irq_is_enabled());
266         printd("Done.  Should have been 0, 200, 200, 200, 0\n");
267
268         printd("Testing Nesting Disabling first, turning ints on:\n");
269         state = 0;
270         enable_irq();
271         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
272         assert(irq_is_enabled());
273         printd("Disabling IRQSave Once\n");
274         disable_irqsave(&state);
275         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
276         assert(!irq_is_enabled());
277         printd("Disabling IRQSave Again\n");
278         disable_irqsave(&state);
279         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
280         assert(!irq_is_enabled());
281         printd("Enabling IRQSave Once\n");
282         enable_irqsave(&state);
283         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
284         assert(!irq_is_enabled());
285         printd("Enabling IRQSave Again\n");
286         enable_irqsave(&state);
287         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
288         assert(irq_is_enabled());
289         printd("Done.  Should have been 200, 0, 0, 0, 200 \n");
290
291         state = 0;
292         disable_irq();
293         printd("Ints are off, enabling then disabling.\n");
294         enable_irqsave(&state);
295         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
296         assert(irq_is_enabled());
297         disable_irqsave(&state);
298         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
299         assert(!irq_is_enabled());
300         printd("Done.  Should have been 200, 0\n");
301
302         state = 0;
303         enable_irq();
304         printd("Ints are on, enabling then disabling.\n");
305         enable_irqsave(&state);
306         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
307         assert(irq_is_enabled());
308         disable_irqsave(&state);
309         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
310         assert(irq_is_enabled());
311         printd("Done.  Should have been 200, 200\n");
312
313         state = 0;
314         disable_irq();
315         printd("Ints are off, disabling then enabling.\n");
316         disable_irqsave(&state);
317         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
318         assert(!irq_is_enabled());
319         enable_irqsave(&state);
320         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
321         assert(!irq_is_enabled());
322         printd("Done.  Should have been 0, 0\n");
323
324         state = 0;
325         enable_irq();
326         printd("Ints are on, disabling then enabling.\n");
327         disable_irqsave(&state);
328         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
329         assert(!irq_is_enabled());
330         enable_irqsave(&state);
331         printd("Interrupts are: %x\n", read_eflags() & FL_IF);
332         assert(irq_is_enabled());
333         printd("Done.  Should have been 0, 200\n");
334
335         disable_irq();
336         cprintf("Passed enable_irqsave tests\n");
337 }
338
339 void test_bitmasks(void)
340 {
341 #define masksize 67
342         DECL_BITMASK(mask, masksize);
343         printk("size of mask %d\n", sizeof(mask));
344         CLR_BITMASK(mask, masksize);
345         PRINT_BITMASK(mask, masksize);
346         printk("cleared\n");
347         SET_BITMASK_BIT(mask, 0);
348         SET_BITMASK_BIT(mask, 11);
349         SET_BITMASK_BIT(mask, 17);
350         SET_BITMASK_BIT(mask, masksize-1);
351         printk("bits set\n");
352         PRINT_BITMASK(mask, masksize);
353         DECL_BITMASK(mask2, masksize);
354         COPY_BITMASK(mask2, mask, masksize);
355         printk("copy of original mask, should be the same as the prev\n");
356         PRINT_BITMASK(mask2, masksize);
357         CLR_BITMASK_BIT(mask, 11);
358         printk("11 cleared\n");
359         PRINT_BITMASK(mask, masksize);
360         printk("bit 17 is %d (should be 1)\n", GET_BITMASK_BIT(mask, 17));
361         printk("bit 11 is %d (should be 0)\n", GET_BITMASK_BIT(mask, 11));
362         FILL_BITMASK(mask, masksize);
363         PRINT_BITMASK(mask, masksize);
364         printk("should be all 1's, except for a few at the end\n");
365         printk("Is Clear?: %d (should be 0)\n", BITMASK_IS_CLEAR(mask,masksize));
366         CLR_BITMASK(mask, masksize);
367         PRINT_BITMASK(mask, masksize);
368         printk("Is Clear?: %d (should be 1)\n", BITMASK_IS_CLEAR(mask,masksize));
369         printk("should be cleared\n");
370 }
371
372 checklist_t *RO the_global_list;
373
374 void test_checklist_handler(trapframe_t *tf, void* data)
375 {
376         udelay(1000000);
377         cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
378         down_checklist(the_global_list);
379 }
380
381 void test_checklists(void)
382 {
383         INIT_CHECKLIST(a_list, MAX_NUM_CPUS);
384         the_global_list = &a_list;
385         printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
386         printk("mask\n");
387         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
388         SET_BITMASK_BIT(a_list.mask.bits, 11);
389         printk("Set bit 11\n");
390         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
391
392         CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
393         INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CPUS);
394         FILL_BITMASK(a_mask.bits, num_cpus);
395         //CLR_BITMASK_BIT(a_mask.bits, core_id());
396         //SET_BITMASK_BIT(a_mask.bits, 1);
397         //printk("New mask (1, 17, 25):\n");
398         printk("Created new mask, filled up to num_cpus\n");
399         PRINT_BITMASK(a_mask.bits, a_mask.size);
400         printk("committing new mask\n");
401         commit_checklist_wait(&a_list, &a_mask);
402         printk("Old mask (copied onto):\n");
403         PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
404         //smp_call_function_single(1, test_checklist_handler, 0, 0);
405
406         smp_call_function_all(test_checklist_handler, NULL, 0);
407
408         printk("Waiting on checklist\n");
409         waiton_checklist(&a_list);
410         printk("Done Waiting!\n");
411
412 }
413
414 atomic_t a, b, c;
415
416 #ifdef __IVY__
417 void test_incrementer_handler(trapframe_t *tf, atomic_t *data)
418 #else
419 void test_incrementer_handler(trapframe_t *tf, void *data)
420 #endif
421 {
422         assert(data);
423         atomic_inc(data);
424 }
425
426 void test_null_handler(trapframe_t *tf, void* data)
427 {
428         asm volatile("nop");
429 }
430
431 void test_smp_call_functions(void)
432 {
433         int i;
434         atomic_init(&a, 0);
435         atomic_init(&b, 0);
436         atomic_init(&c, 0);
437         handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0, *waiter3 = 0,
438                           *waiter4 = 0, *waiter5 = 0;
439         uint8_t me = core_id();
440         printk("\nCore %d: SMP Call Self (nowait):\n", me);
441         printk("---------------------\n");
442         smp_call_function_self(test_hello_world_handler, NULL, 0);
443         printk("\nCore %d: SMP Call Self (wait):\n", me);
444         printk("---------------------\n");
445         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
446         smp_call_wait(waiter0);
447         printk("\nCore %d: SMP Call All (nowait):\n", me);
448         printk("---------------------\n");
449         smp_call_function_all(test_hello_world_handler, NULL, 0);
450         printk("\nCore %d: SMP Call All (wait):\n", me);
451         printk("---------------------\n");
452         smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
453         smp_call_wait(waiter0);
454         printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n", me);
455         printk("---------------------\n");
456         for(i = 1; i < num_cpus; i++)
457                 smp_call_function_single(i, test_hello_world_handler, NULL, 0);
458         printk("\nCore %d: SMP Call Self (wait):\n", me);
459         printk("---------------------\n");
460         smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
461         smp_call_wait(waiter0);
462         printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n", me);
463         printk("---------------------\n");
464         for(i = 1; i < num_cpus; i++)
465         {
466                 smp_call_function_single(i, test_hello_world_handler, NULL, &waiter0);
467                 smp_call_wait(waiter0);
468         }
469         printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
470         printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
471         smp_call_function_all(test_incrementer_handler, &a, 0);
472         smp_call_function_all(test_incrementer_handler, &b, 0);
473         smp_call_function_all(test_incrementer_handler, &c, 0);
474         // if i can clobber a previous IPI, the interleaving might do it
475         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
476         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
477         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
478         smp_call_function_single(4 % num_cpus, test_incrementer_handler, &a, 0);
479         smp_call_function_single(5 % num_cpus, test_incrementer_handler, &b, 0);
480         smp_call_function_single(6 % num_cpus, test_incrementer_handler, &c, 0);
481         smp_call_function_all(test_incrementer_handler, &a, 0);
482         smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
483         smp_call_function_all(test_incrementer_handler, &b, 0);
484         smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
485         smp_call_function_all(test_incrementer_handler, &c, 0);
486         smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
487         // wait, so we're sure the others finish before printing.
488         // without this, we could (and did) get 19,18,19, since the B_inc
489         // handler didn't finish yet
490         smp_call_function_self(test_null_handler, NULL, &waiter0);
491         // need to grab all 5 handlers (max), since the code moves to the next free.
492         smp_call_function_self(test_null_handler, NULL, &waiter1);
493         smp_call_function_self(test_null_handler, NULL, &waiter2);
494         smp_call_function_self(test_null_handler, NULL, &waiter3);
495         smp_call_function_self(test_null_handler, NULL, &waiter4);
496         smp_call_wait(waiter0);
497         smp_call_wait(waiter1);
498         smp_call_wait(waiter2);
499         smp_call_wait(waiter3);
500         smp_call_wait(waiter4);
501         printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
502         printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
503         smp_call_function_self(test_null_handler, NULL, &waiter0);
504         printk("Sent one\n");
505         smp_call_function_self(test_null_handler, NULL, &waiter1);
506         printk("Sent two\n");
507         smp_call_wait(waiter0);
508         printk("Wait one\n");
509         smp_call_wait(waiter1);
510         printk("Wait two\n");
511         printk("\tMade it through!\n");
512         printk("Attempting to deadlock by smp_calling more than are available:\n");
513         printk("\tShould see an Insufficient message and a kernel warning.\n");
514         if (smp_call_function_self(test_null_handler, NULL, &waiter0))
515                 printk("\tInsufficient handlers to call function (0)\n");
516         if (smp_call_function_self(test_null_handler, NULL, &waiter1))
517                 printk("\tInsufficient handlers to call function (1)\n");
518         if (smp_call_function_self(test_null_handler, NULL, &waiter2))
519                 printk("\tInsufficient handlers to call function (2)\n");
520         if (smp_call_function_self(test_null_handler, NULL, &waiter3))
521                 printk("\tInsufficient handlers to call function (3)\n");
522         if (smp_call_function_self(test_null_handler, NULL, &waiter4))
523                 printk("\tInsufficient handlers to call function (4)\n");
524         if (smp_call_function_self(test_null_handler, NULL, &waiter5))
525                 printk("\tInsufficient handlers to call function (5)\n");
526         smp_call_wait(waiter0);
527         smp_call_wait(waiter1);
528         smp_call_wait(waiter2);
529         smp_call_wait(waiter3);
530         smp_call_wait(waiter4);
531         smp_call_wait(waiter5);
532         printk("\tMade it through!\n");
533
534         printk("Done\n");
535 }
536
537 #ifdef __i386__
538 void test_lapic_status_bit(void)
539 {
540         register_interrupt_handler(interrupt_handlers, I_TESTING,
541                                    test_incrementer_handler, &a);
542         #define NUM_IPI 100000
543         atomic_set(&a,0);
544         printk("IPIs received (should be 0): %d\n", a);
545         for(int i = 0; i < NUM_IPI; i++) {
546                 send_ipi(7, 0, I_TESTING);
547                 lapic_wait_to_send();
548         }
549         // need to wait a bit to let those IPIs get there
550         udelay(5000000);
551         printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
552         // hopefully that handler never fires again.  leaving it registered for now.
553 }
554 #endif // __i386__
555
556 /******************************************************************************/
557 /*            Test Measurements: Couples with measurement.c                   */
558 // All user processes can R/W the UGDATA page
559 barrier_t*COUNT(1) bar = (barrier_t*COUNT(1))TC(UGDATA);
560 uint32_t*COUNT(1) job_to_run = (uint32_t*COUNT(1))TC(UGDATA + sizeof(barrier_t));
561 env_t* env_batch[64]; // Fairly arbitrary, just the max I plan to use.
562
563 /* Helpers for test_run_measurements */
564 static void wait_for_all_envs_to_die(void)
565 {
566         while (atomic_read(&num_envs))
567                 cpu_relax();
568 }
569
570 // this never returns.
571 static void sync_tests(int start_core, int num_threads, int job_num)
572 {
573         assert(start_core + num_threads <= num_cpus);
574         wait_for_all_envs_to_die();
575         for (int i = start_core; i < start_core + num_threads; i++)
576                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
577         lcr3(env_batch[start_core]->env_cr3);
578         init_barrier(bar, num_threads);
579         *job_to_run = job_num;
580         for (int i = start_core; i < start_core + num_threads; i++)
581                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
582         process_workqueue();
583         // we want to fake a run, to reenter manager for the next case
584         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
585         smp_call_function_single(0, run_env_handler, env, 0);
586         process_workqueue();
587         panic("whoops!\n");
588 }
589
590 static void async_tests(int start_core, int num_threads, int job_num)
591 {
592         int count;
593
594         assert(start_core + num_threads <= num_cpus);
595         wait_for_all_envs_to_die();
596         for (int i = start_core; i < start_core + num_threads; i++)
597                 env_batch[i] = kfs_proc_create(kfs_lookup_path("roslib_measurements"));
598         printk("async_tests: checkpoint 0\n");
599         lcr3(env_batch[start_core]->env_cr3);
600         init_barrier(bar, num_threads);
601         printk("async_tests: checkpoint 1\n");
602         *job_to_run = job_num;
603         for (int i = start_core; i < start_core + num_threads; i++)
604                 smp_call_function_single(i, run_env_handler, env_batch[i], 0);
605         count = 0;
606         while (count > -num_threads) {
607                 count = 0;
608                 for (int i = start_core; i < start_core + num_threads; i++) {
609                         count += process_generic_syscalls(env_batch[i], 1);
610                 }
611                 cpu_relax();
612         }
613         // we want to fake a run, to reenter manager for the next case
614         env_t *env = kfs_proc_create(kfs_lookup_path("roslib_null"));
615         smp_call_function_single(0, run_env_handler, env, 0);
616         process_workqueue();
617         // this all never returns
618         panic("whoops!\n");
619 }
620
621 void test_run_measurements(uint32_t job_num)
622 {
623         switch (job_num) {
624                 case 0: // Nulls
625                         printk("Case 0:\n");
626                         async_tests(2, 1, job_num);  // start core 2, 1 core total
627                         break;
628                 case 1: // Sync
629                         printk("Case 1:\n");
630                         sync_tests(2, 1, job_num);
631                         break;
632                 case 2:
633                         printk("Case 2:\n");
634                         sync_tests(2, 2, job_num);
635                         break;
636                 case 3:
637                         printk("Case 3:\n");
638                         sync_tests(0, 3, job_num);
639                         break;
640                 case 4:
641                         printk("Case 4:\n");
642                         sync_tests(0, 4, job_num);
643                         break;
644                 case 5:
645                         printk("Case 5:\n");
646                         sync_tests(0, 5, job_num);
647                         break;
648                 case 6:
649                         printk("Case 6:\n");
650                         sync_tests(0, 6, job_num);
651                         break;
652                 case 7:
653                         printk("Case 7:\n");
654                         sync_tests(0, 7, job_num);
655                         break;
656                 case 8:
657                         printk("Case 8:\n");
658                         sync_tests(0, 8, job_num);
659                         break;
660                 case 9:
661                         printk("Case 9:\n");
662                         async_tests(2, 1, job_num);
663                         break;
664                 case 10:
665                         printk("Case 10:\n");
666                         async_tests(2, 2, job_num);
667                         break;
668                 case 11:
669                         printk("Case 11:\n");
670                         async_tests(2, 3, job_num);
671                         break;
672                 case 12:
673                         printk("Case 12:\n");
674                         async_tests(2, 4, job_num);
675                         break;
676                 case 13:
677                         printk("Case 13:\n");
678                         async_tests(2, 5, job_num);
679                         break;
680                 case 14:
681                         printk("Case 14:\n");
682                         async_tests(2, 6, job_num);
683                         break;
684                 default:
685                         warn("Invalid test number!!");
686         }
687         panic("Error in test setup!!");
688 }
689
690 /************************************************************/
691 /* ISR Handler Functions */
692
693 void test_hello_world_handler(trapframe_t *tf, void* data)
694 {
695         int trapno;
696         #if defined(__i386__)
697         trapno = tf->tf_trapno;
698         #elif defined(__sparc_v8__)
699         trapno = (tf->tbr >> 4) & 0xFF;
700         #else
701         trapno = 0;
702         #endif
703
704         cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
705                 trapno, core_id(), tf);
706 }
707
708 uint32_t print_info_lock = 0;
709
710 void test_print_info_handler(trapframe_t *tf, void* data)
711 {
712         spin_lock_irqsave(&print_info_lock);
713         cprintf("----------------------------\n");
714         cprintf("This is Core %d\n", core_id());
715 #ifdef __i386__
716         cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
717         cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
718                 read_msr(0x200), read_msr(0x201));
719         cprintf("MTRR Phys1 Base = 0x%016llx, Mask = 0x%016llx\n",
720                 read_msr(0x202), read_msr(0x203));
721         cprintf("MTRR Phys2 Base = 0x%016llx, Mask = 0x%016llx\n",
722                 read_msr(0x204), read_msr(0x205));
723         cprintf("MTRR Phys3 Base = 0x%016llx, Mask = 0x%016llx\n",
724                 read_msr(0x206), read_msr(0x207));
725         cprintf("MTRR Phys4 Base = 0x%016llx, Mask = 0x%016llx\n",
726                 read_msr(0x208), read_msr(0x209));
727         cprintf("MTRR Phys5 Base = 0x%016llx, Mask = 0x%016llx\n",
728                 read_msr(0x20a), read_msr(0x20b));
729         cprintf("MTRR Phys6 Base = 0x%016llx, Mask = 0x%016llx\n",
730                 read_msr(0x20c), read_msr(0x20d));
731         cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
732                 read_msr(0x20e), read_msr(0x20f));
733 #endif // __i386__
734         cprintf("----------------------------\n");
735         spin_unlock_irqsave(&print_info_lock);
736 }
737
738 void test_barrier_handler(trapframe_t *tf, void* data)
739 {
740         cprintf("Round 1: Core %d\n", core_id());
741         waiton_barrier(&test_cpu_array);
742         waiton_barrier(&test_cpu_array);
743         waiton_barrier(&test_cpu_array);
744         waiton_barrier(&test_cpu_array);
745         waiton_barrier(&test_cpu_array);
746         waiton_barrier(&test_cpu_array);
747         cprintf("Round 2: Core %d\n", core_id());
748         waiton_barrier(&test_cpu_array);
749         cprintf("Round 3: Core %d\n", core_id());
750         // uncomment to see it fucked up
751         //cprintf("Round 4: Core %d\n", core_id());
752 }
753
754 #ifdef __IVY__
755 static void test_waiting_handler(trapframe_t *tf, atomic_t *data)
756 #else
757 static void test_waiting_handler(trapframe_t *tf, void *data)
758 #endif
759 {
760         atomic_dec(data);
761 }
762
763 #ifdef __i386__
764 void test_pit(void)
765 {
766         cprintf("Starting test for PIT now (10s)\n");
767         udelay_pit(10000000);
768         cprintf("End now\n");
769         cprintf("Starting test for TSC (if stable) now (10s)\n");
770         udelay(10000000);
771         cprintf("End now\n");
772
773         cprintf("Starting test for LAPIC (if stable) now (10s)\n");
774         enable_irq();
775         lapic_set_timer(10000000, FALSE);
776
777         atomic_t waiting;
778         atomic_init(&waiting, 1);
779         register_interrupt_handler(interrupt_handlers, I_TESTING,
780                                    test_waiting_handler, &waiting);
781         while(atomic_read(&waiting))
782                 cpu_relax();
783         cprintf("End now\n");
784 }
785
786 void test_circ_buffer(void)
787 {
788         int arr[5] = {0, 1, 2, 3, 4};
789
790         for (int i = 0; i < 5; i++) {
791                 FOR_CIRC_BUFFER(i, 5, j)
792                         printk("Starting with current = %d, each value = %d\n", i, j);
793         }
794         return;
795 }
796
797 #ifdef __IVY__
798 void test_am_handler(trapframe_t* tf, uint32_t srcid, uint32_t a0, uint32_t a1,
799                      uint32_t a2)
800 #else
801 void test_am_handler(trapframe_t* tf, uint32_t srcid, void * a0, void * a1,
802                      void * a2)
803 #endif
804 {
805         printk("Received AM on core %d from core %d: arg0= 0x%08x, arg1 = "
806                "0x%08x, arg2 = 0x%08x\n", core_id(), srcid, a0, a1, a2);
807         return;
808 }
809
810 void test_active_messages(void)
811 {
812         // basic tests, make sure we can handle a wraparound and that the error
813         // messages work.
814         printk("sending NUM_ACTIVE_MESSAGES to core 1, sending (#,deadbeef,0)\n");
815         for (int i = 0; i < NUM_ACTIVE_MESSAGES; i++)
816 #ifdef __IVY__
817                 while (send_active_message(1, test_am_handler, i, 0xdeadbeef, 0))
818                         cpu_relax();
819 #else
820                 while (send_active_message(1, test_am_handler, (void *)i,
821                                            (void *)0xdeadbeef, (void *)0))
822                         cpu_relax();
823 #endif
824         udelay(5000000);
825         printk("sending 2*NUM_ACTIVE_MESSAGES to core 1, sending (#,cafebabe,0)\n");
826         for (int i = 0; i < 2*NUM_ACTIVE_MESSAGES; i++)
827 #ifdef __IVY__
828                 while (send_active_message(1, test_am_handler, i, 0xdeadbeef, 0))
829                         cpu_relax();
830 #else
831                 while (send_active_message(1, test_am_handler, (void *)i,
832                                            (void *)0xdeadbeef, (void *)0))
833                         cpu_relax();
834 #endif
835         udelay(5000000);
836         return;
837 }
838 #endif // __i386__
839
840 static void test_single_cache(int iters, size_t size, int align, int flags,
841                               void (*ctor)(void *, size_t),
842                               void (*dtor)(void *, size_t))
843 {
844         struct kmem_cache *test_cache;
845         void *objects[iters];
846         test_cache = kmem_cache_create("test_cache", size, align, flags, ctor, dtor);
847         printk("Testing Kmem Cache:\n");
848         print_kmem_cache(test_cache);
849         for (int i = 0; i < iters; i++) {
850                 objects[i] = kmem_cache_alloc(test_cache, 0);
851                 printk("Buffer %d addr = %p\n", i, objects[i]);
852         }
853         for (int i = 0; i < iters; i++) {
854                 kmem_cache_free(test_cache, objects[i]);
855         }
856         kmem_cache_destroy(test_cache);
857         printk("\n\n\n\n");
858 }
859
860 void test_slab(void)
861 {
862         void a_ctor(void *buf, size_t size)
863         {
864                 printk("constructin tests\n");
865         }
866         void a_dtor(void *buf, size_t size)
867         {
868                 printk("destructin tests\n");
869         }
870         test_single_cache(10, 128, 512, 0, 0, 0);
871         test_single_cache(10, 128, 4, 0, a_ctor, a_dtor);
872         test_single_cache(10, 1024, 16, 0, 0, 0);
873 }
874
875 void test_kmalloc(void)
876 {
877         printk("Testing Kmalloc\n");
878         void *bufs[NUM_KMALLOC_CACHES + 1];     
879         size_t size;
880         for (int i = 0; i < NUM_KMALLOC_CACHES + 1; i++){
881                 size = (KMALLOC_SMALLEST << i) - KMALLOC_OFFSET;
882                 bufs[i] = kmalloc(size, 0);
883                 printk("Size %d, Addr = %p\n", size, bufs[i]);
884         }
885         for (int i = 0; i < NUM_KMALLOC_CACHES; i++) {
886                 printk("Freeing buffer %d\n", i);
887                 kfree(bufs[i]);
888         }
889         printk("Testing a large kmalloc\n");
890         size = (KMALLOC_LARGEST << 2);
891         bufs[0] = kmalloc(size, 0);
892         printk("Size %d, Addr = %p\n", size, bufs[0]);
893         kfree(bufs[0]);
894 }
895