8 #include <arch/bitmask.h>
11 #include <ros/memlayout.h>
12 #include <ros/common.h>
21 #include <arch/trap.h>
26 #include <multiboot.h>
28 #include <page_alloc.h>
32 #include <hashtable.h>
35 #define l1 (available_caches.l1)
36 #define l2 (available_caches.l2)
37 #define l3 (available_caches.l3)
41 void test_ipi_sending(void)
43 extern handler_t (CT(NUM_INTERRUPT_HANDLERS) RO interrupt_handlers)[];
46 register_interrupt_handler(interrupt_handlers, I_TESTING,
47 test_hello_world_handler, NULL);
48 enable_irqsave(&state);
49 cprintf("\nCORE 0 sending broadcast\n");
50 send_broadcast_ipi(I_TESTING);
52 cprintf("\nCORE 0 sending all others\n");
53 send_all_others_ipi(I_TESTING);
55 cprintf("\nCORE 0 sending self\n");
56 send_self_ipi(I_TESTING);
58 cprintf("\nCORE 0 sending ipi to physical 1\n");
59 send_ipi(get_hw_coreid(0x01), I_TESTING);
61 cprintf("\nCORE 0 sending ipi to physical 2\n");
62 send_ipi(get_hw_coreid(0x02), I_TESTING);
64 cprintf("\nCORE 0 sending ipi to physical 3\n");
65 send_ipi(get_hw_coreid(0x03), I_TESTING);
67 cprintf("\nCORE 0 sending ipi to physical 15\n");
68 send_ipi(get_hw_coreid(0x0f), I_TESTING);
70 cprintf("\nCORE 0 sending ipi to logical 2\n");
71 send_group_ipi(0x02, I_TESTING);
73 cprintf("\nCORE 0 sending ipi to logical 1\n");
74 send_group_ipi(0x01, I_TESTING);
77 disable_irqsave(&state);
80 // Note this never returns and will muck with any other timer work
81 void test_pic_reception(void)
83 register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
84 pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
86 cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
87 cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
88 unmask_lapic_lvt(LAPIC_LVT_LINT0);
89 cprintf("Core %d's LINT0: 0x%08x\n", core_id(), read_mmreg32(LAPIC_LVT_LINT0));
94 void test_ioapic_pit_reroute(void)
96 register_interrupt_handler(interrupt_handlers, 0x20, test_hello_world_handler, NULL);
97 ioapic_route_irq(0, 3);
99 cprintf("Starting pit on core 3....\n");
101 pit_set_timer(0xFFFE,TIMER_RATEGEN); // totally arbitrary time
104 ioapic_unroute_irq(0);
106 cprintf("Masked pit. Waiting before return...\n");
113 void test_print_info(void)
115 cprintf("\nCORE 0 asking all cores to print info:\n");
116 smp_call_function_all(test_print_info_handler, NULL, 0);
117 cprintf("\nDone!\n");
120 void test_page_coloring(void)
123 //Print the different cache properties of our machine
124 print_cache_properties("L1", l1);
126 print_cache_properties("L2", l2);
128 print_cache_properties("L3", l3);
131 //Print some stats about our memory
132 cprintf("Max Address: %llu\n", MAX_VADDR);
133 cprintf("Num Pages: %u\n", npages);
135 //Declare a local variable for allocating pages
138 cprintf("Contents of the page free list:\n");
139 for(int i=0; i<llc_cache->num_colors; i++) {
140 cprintf(" COLOR %d:\n", i);
141 LIST_FOREACH(page, &colored_page_free_list[i], pg_link) {
142 cprintf(" Page: %d\n", page2ppn(page));
146 //Run through and allocate all pages through l1_page_alloc
147 cprintf("Allocating from L1 page colors:\n");
148 for(int i=0; i<get_cache_num_page_colors(l1); i++) {
149 cprintf(" COLOR %d:\n", i);
150 while(colored_page_alloc(l1, &page, i) != -ENOMEM)
151 cprintf(" Page: %d\n", page2ppn(page));
154 //Put all the pages back by reinitializing
157 //Run through and allocate all pages through l2_page_alloc
158 cprintf("Allocating from L2 page colors:\n");
159 for(int i=0; i<get_cache_num_page_colors(l2); i++) {
160 cprintf(" COLOR %d:\n", i);
161 while(colored_page_alloc(l2, &page, i) != -ENOMEM)
162 cprintf(" Page: %d\n", page2ppn(page));
165 //Put all the pages back by reinitializing
168 //Run through and allocate all pages through l3_page_alloc
169 cprintf("Allocating from L3 page colors:\n");
170 for(int i=0; i<get_cache_num_page_colors(l3); i++) {
171 cprintf(" COLOR %d:\n", i);
172 while(colored_page_alloc(l3, &page, i) != -ENOMEM)
173 cprintf(" Page: %d\n", page2ppn(page));
176 //Put all the pages back by reinitializing
179 //Run through and allocate all pages through page_alloc
180 cprintf("Allocating from global allocator:\n");
181 while(upage_alloc(&page) != -ENOMEM)
182 cprintf(" Page: %d\n", page2ppn(page));
184 if(colored_page_alloc(l2, &page, 0) != -ENOMEM)
185 cprintf("Should not get here, all pages should already be gone!\n");
186 cprintf("All pages gone for sure...\n");
188 //Now lets put a few pages back using page_free..
189 cprintf("Reinserting pages via page_free and reallocating them...\n");
190 page_free(&pages[0]);
191 page_free(&pages[15]);
192 page_free(&pages[7]);
193 page_free(&pages[6]);
194 page_free(&pages[4]);
196 while(upage_alloc(&page) != -ENOMEM)
197 cprintf("Page: %d\n", page2ppn(page));
203 void test_color_alloc() {
204 size_t checkpoint = 0;
205 uint8_t* colors_map = kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
206 cache_color_alloc(l2, colors_map);
207 cache_color_alloc(l3, colors_map);
208 cache_color_alloc(l3, colors_map);
209 cache_color_alloc(l2, colors_map);
210 cache_color_free(llc_cache, colors_map);
211 cache_color_free(llc_cache, colors_map);
212 cache_color_free(llc_cache, colors_map);
213 cache_color_free(llc_cache, colors_map);
214 cache_color_free(llc_cache, colors_map);
215 cache_color_free(llc_cache, colors_map);
216 cache_color_free(llc_cache, colors_map);
217 cache_color_free(llc_cache, colors_map);
218 cache_color_free(llc_cache, colors_map);
219 cache_color_free(llc_cache, colors_map);
220 cache_color_free(llc_cache, colors_map);
221 cache_color_free(llc_cache, colors_map);
222 cache_color_free(llc_cache, colors_map);
223 cache_color_free(llc_cache, colors_map);
224 cache_color_free(llc_cache, colors_map);
225 cache_color_free(llc_cache, colors_map);
226 cache_color_free(l2, colors_map);
227 cache_color_free(llc_cache, colors_map);
228 cache_color_free(llc_cache, colors_map);
231 printk("L1 free colors, tot colors: %d\n", l1->num_colors);
232 PRINT_BITMASK(l1->free_colors_map, l1->num_colors);
233 printk("L2 free colors, tot colors: %d\n", l2->num_colors);
234 PRINT_BITMASK(l2->free_colors_map, l2->num_colors);
235 printk("L3 free colors, tot colors: %d\n", l3->num_colors);
236 PRINT_BITMASK(l3->free_colors_map, l3->num_colors);
237 printk("Process allocated colors\n");
238 PRINT_BITMASK(colors_map, llc_cache->num_colors);
239 printk("test_color_alloc() complete!\n");
242 barrier_t test_cpu_array;
244 void test_barrier(void)
246 cprintf("Core 0 initializing barrier\n");
247 init_barrier(&test_cpu_array, num_cpus);
248 cprintf("Core 0 asking all cores to print ids, barrier, rinse, repeat\n");
249 smp_call_function_all(test_barrier_handler, NULL, 0);
252 void test_interrupts_irqsave(void)
255 printd("Testing Nesting Enabling first, turning ints off:\n");
257 printd("Interrupts are: %x\n", irq_is_enabled());
258 assert(!irq_is_enabled());
259 printd("Enabling IRQSave\n");
260 enable_irqsave(&state);
261 printd("Interrupts are: %x\n", irq_is_enabled());
262 assert(irq_is_enabled());
263 printd("Enabling IRQSave Again\n");
264 enable_irqsave(&state);
265 printd("Interrupts are: %x\n", irq_is_enabled());
266 assert(irq_is_enabled());
267 printd("Disabling IRQSave Once\n");
268 disable_irqsave(&state);
269 printd("Interrupts are: %x\n", irq_is_enabled());
270 assert(irq_is_enabled());
271 printd("Disabling IRQSave Again\n");
272 disable_irqsave(&state);
273 printd("Interrupts are: %x\n", irq_is_enabled());
274 assert(!irq_is_enabled());
275 printd("Done. Should have been 0, 200, 200, 200, 0\n");
277 printd("Testing Nesting Disabling first, turning ints on:\n");
280 printd("Interrupts are: %x\n", irq_is_enabled());
281 assert(irq_is_enabled());
282 printd("Disabling IRQSave Once\n");
283 disable_irqsave(&state);
284 printd("Interrupts are: %x\n", irq_is_enabled());
285 assert(!irq_is_enabled());
286 printd("Disabling IRQSave Again\n");
287 disable_irqsave(&state);
288 printd("Interrupts are: %x\n", irq_is_enabled());
289 assert(!irq_is_enabled());
290 printd("Enabling IRQSave Once\n");
291 enable_irqsave(&state);
292 printd("Interrupts are: %x\n", irq_is_enabled());
293 assert(!irq_is_enabled());
294 printd("Enabling IRQSave Again\n");
295 enable_irqsave(&state);
296 printd("Interrupts are: %x\n", irq_is_enabled());
297 assert(irq_is_enabled());
298 printd("Done. Should have been 200, 0, 0, 0, 200 \n");
302 printd("Ints are off, enabling then disabling.\n");
303 enable_irqsave(&state);
304 printd("Interrupts are: %x\n", irq_is_enabled());
305 assert(irq_is_enabled());
306 disable_irqsave(&state);
307 printd("Interrupts are: %x\n", irq_is_enabled());
308 assert(!irq_is_enabled());
309 printd("Done. Should have been 200, 0\n");
313 printd("Ints are on, enabling then disabling.\n");
314 enable_irqsave(&state);
315 printd("Interrupts are: %x\n", irq_is_enabled());
316 assert(irq_is_enabled());
317 disable_irqsave(&state);
318 printd("Interrupts are: %x\n", irq_is_enabled());
319 assert(irq_is_enabled());
320 printd("Done. Should have been 200, 200\n");
324 printd("Ints are off, disabling then enabling.\n");
325 disable_irqsave(&state);
326 printd("Interrupts are: %x\n", irq_is_enabled());
327 assert(!irq_is_enabled());
328 enable_irqsave(&state);
329 printd("Interrupts are: %x\n", irq_is_enabled());
330 assert(!irq_is_enabled());
331 printd("Done. Should have been 0, 0\n");
335 printd("Ints are on, disabling then enabling.\n");
336 disable_irqsave(&state);
337 printd("Interrupts are: %x\n", irq_is_enabled());
338 assert(!irq_is_enabled());
339 enable_irqsave(&state);
340 printd("Interrupts are: %x\n", irq_is_enabled());
341 assert(irq_is_enabled());
342 printd("Done. Should have been 0, 200\n");
345 cprintf("Passed enable_irqsave tests\n");
348 void test_bitmasks(void)
351 DECL_BITMASK(mask, masksize);
352 printk("size of mask %d\n", sizeof(mask));
353 CLR_BITMASK(mask, masksize);
354 PRINT_BITMASK(mask, masksize);
356 SET_BITMASK_BIT(mask, 0);
357 SET_BITMASK_BIT(mask, 11);
358 SET_BITMASK_BIT(mask, 17);
359 SET_BITMASK_BIT(mask, masksize-1);
360 printk("bits set\n");
361 PRINT_BITMASK(mask, masksize);
362 DECL_BITMASK(mask2, masksize);
363 COPY_BITMASK(mask2, mask, masksize);
364 printk("copy of original mask, should be the same as the prev\n");
365 PRINT_BITMASK(mask2, masksize);
366 CLR_BITMASK_BIT(mask, 11);
367 printk("11 cleared\n");
368 PRINT_BITMASK(mask, masksize);
369 printk("bit 17 is %d (should be 1)\n", GET_BITMASK_BIT(mask, 17));
370 printk("bit 11 is %d (should be 0)\n", GET_BITMASK_BIT(mask, 11));
371 FILL_BITMASK(mask, masksize);
372 PRINT_BITMASK(mask, masksize);
373 printk("should be all 1's, except for a few at the end\n");
374 printk("Is Clear?: %d (should be 0)\n", BITMASK_IS_CLEAR(mask,masksize));
375 CLR_BITMASK(mask, masksize);
376 PRINT_BITMASK(mask, masksize);
377 printk("Is Clear?: %d (should be 1)\n", BITMASK_IS_CLEAR(mask,masksize));
378 printk("should be cleared\n");
381 checklist_t *RO the_global_list;
383 void test_checklist_handler(trapframe_t *tf, void* data)
386 cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
387 down_checklist(the_global_list);
390 void test_checklists(void)
392 INIT_CHECKLIST(a_list, MAX_NUM_CPUS);
393 the_global_list = &a_list;
394 printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
396 PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
397 SET_BITMASK_BIT(a_list.mask.bits, 11);
398 printk("Set bit 11\n");
399 PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
401 CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
402 INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CPUS);
403 FILL_BITMASK(a_mask.bits, num_cpus);
404 //CLR_BITMASK_BIT(a_mask.bits, core_id());
405 //SET_BITMASK_BIT(a_mask.bits, 1);
406 //printk("New mask (1, 17, 25):\n");
407 printk("Created new mask, filled up to num_cpus\n");
408 PRINT_BITMASK(a_mask.bits, a_mask.size);
409 printk("committing new mask\n");
410 commit_checklist_wait(&a_list, &a_mask);
411 printk("Old mask (copied onto):\n");
412 PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
413 //smp_call_function_single(1, test_checklist_handler, 0, 0);
415 smp_call_function_all(test_checklist_handler, NULL, 0);
417 printk("Waiting on checklist\n");
418 waiton_checklist(&a_list);
419 printk("Done Waiting!\n");
426 void test_incrementer_handler(trapframe_t *tf, atomic_t *data)
428 void test_incrementer_handler(trapframe_t *tf, void *data)
435 void test_null_handler(trapframe_t *tf, void* data)
440 void test_smp_call_functions(void)
446 handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0, *waiter3 = 0,
447 *waiter4 = 0, *waiter5 = 0;
448 uint8_t me = core_id();
449 printk("\nCore %d: SMP Call Self (nowait):\n", me);
450 printk("---------------------\n");
451 smp_call_function_self(test_hello_world_handler, NULL, 0);
452 printk("\nCore %d: SMP Call Self (wait):\n", me);
453 printk("---------------------\n");
454 smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
455 smp_call_wait(waiter0);
456 printk("\nCore %d: SMP Call All (nowait):\n", me);
457 printk("---------------------\n");
458 smp_call_function_all(test_hello_world_handler, NULL, 0);
459 printk("\nCore %d: SMP Call All (wait):\n", me);
460 printk("---------------------\n");
461 smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
462 smp_call_wait(waiter0);
463 printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n", me);
464 printk("---------------------\n");
465 for(i = 1; i < num_cpus; i++)
466 smp_call_function_single(i, test_hello_world_handler, NULL, 0);
467 printk("\nCore %d: SMP Call Self (wait):\n", me);
468 printk("---------------------\n");
469 smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
470 smp_call_wait(waiter0);
471 printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n", me);
472 printk("---------------------\n");
473 for(i = 1; i < num_cpus; i++)
475 smp_call_function_single(i, test_hello_world_handler, NULL, &waiter0);
476 smp_call_wait(waiter0);
478 printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
479 printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
480 smp_call_function_all(test_incrementer_handler, &a, 0);
481 smp_call_function_all(test_incrementer_handler, &b, 0);
482 smp_call_function_all(test_incrementer_handler, &c, 0);
483 // if i can clobber a previous IPI, the interleaving might do it
484 smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
485 smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
486 smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
487 smp_call_function_single(4 % num_cpus, test_incrementer_handler, &a, 0);
488 smp_call_function_single(5 % num_cpus, test_incrementer_handler, &b, 0);
489 smp_call_function_single(6 % num_cpus, test_incrementer_handler, &c, 0);
490 smp_call_function_all(test_incrementer_handler, &a, 0);
491 smp_call_function_single(3 % num_cpus, test_incrementer_handler, &c, 0);
492 smp_call_function_all(test_incrementer_handler, &b, 0);
493 smp_call_function_single(1 % num_cpus, test_incrementer_handler, &a, 0);
494 smp_call_function_all(test_incrementer_handler, &c, 0);
495 smp_call_function_single(2 % num_cpus, test_incrementer_handler, &b, 0);
496 // wait, so we're sure the others finish before printing.
497 // without this, we could (and did) get 19,18,19, since the B_inc
498 // handler didn't finish yet
499 smp_call_function_self(test_null_handler, NULL, &waiter0);
500 // need to grab all 5 handlers (max), since the code moves to the next free.
501 smp_call_function_self(test_null_handler, NULL, &waiter1);
502 smp_call_function_self(test_null_handler, NULL, &waiter2);
503 smp_call_function_self(test_null_handler, NULL, &waiter3);
504 smp_call_function_self(test_null_handler, NULL, &waiter4);
505 smp_call_wait(waiter0);
506 smp_call_wait(waiter1);
507 smp_call_wait(waiter2);
508 smp_call_wait(waiter3);
509 smp_call_wait(waiter4);
510 printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a), atomic_read(&b), atomic_read(&c));
511 printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
512 smp_call_function_self(test_null_handler, NULL, &waiter0);
513 printk("Sent one\n");
514 smp_call_function_self(test_null_handler, NULL, &waiter1);
515 printk("Sent two\n");
516 smp_call_wait(waiter0);
517 printk("Wait one\n");
518 smp_call_wait(waiter1);
519 printk("Wait two\n");
520 printk("\tMade it through!\n");
521 printk("Attempting to deadlock by smp_calling more than are available:\n");
522 printk("\tShould see an Insufficient message and a kernel warning.\n");
523 if (smp_call_function_self(test_null_handler, NULL, &waiter0))
524 printk("\tInsufficient handlers to call function (0)\n");
525 if (smp_call_function_self(test_null_handler, NULL, &waiter1))
526 printk("\tInsufficient handlers to call function (1)\n");
527 if (smp_call_function_self(test_null_handler, NULL, &waiter2))
528 printk("\tInsufficient handlers to call function (2)\n");
529 if (smp_call_function_self(test_null_handler, NULL, &waiter3))
530 printk("\tInsufficient handlers to call function (3)\n");
531 if (smp_call_function_self(test_null_handler, NULL, &waiter4))
532 printk("\tInsufficient handlers to call function (4)\n");
533 if (smp_call_function_self(test_null_handler, NULL, &waiter5))
534 printk("\tInsufficient handlers to call function (5)\n");
535 smp_call_wait(waiter0);
536 smp_call_wait(waiter1);
537 smp_call_wait(waiter2);
538 smp_call_wait(waiter3);
539 smp_call_wait(waiter4);
540 smp_call_wait(waiter5);
541 printk("\tMade it through!\n");
547 void test_lapic_status_bit(void)
549 register_interrupt_handler(interrupt_handlers, I_TESTING,
550 test_incrementer_handler, &a);
551 #define NUM_IPI 100000
553 printk("IPIs received (should be 0): %d\n", a);
554 for(int i = 0; i < NUM_IPI; i++) {
555 send_ipi(get_hw_coreid(7), I_TESTING);
556 lapic_wait_to_send();
558 // need to wait a bit to let those IPIs get there
560 printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
561 // hopefully that handler never fires again. leaving it registered for now.
565 /************************************************************/
566 /* ISR Handler Functions */
568 void test_hello_world_handler(trapframe_t *tf, void* data)
571 #if defined(__i386__)
572 trapno = tf->tf_trapno;
573 #elif defined(__sparc_v8__)
574 trapno = (tf->tbr >> 4) & 0xFF;
579 cprintf("Incoming IRQ, ISR: %d on core %d with tf at 0x%08x\n",
580 trapno, core_id(), tf);
583 spinlock_t print_info_lock = SPINLOCK_INITIALIZER;
585 void test_print_info_handler(trapframe_t *tf, void* data)
587 uint64_t tsc = read_tsc();
589 spin_lock_irqsave(&print_info_lock);
590 cprintf("----------------------------\n");
591 cprintf("This is Core %d\n", core_id());
592 cprintf("Timestamp = %lld\n", tsc);
594 cprintf("Hardware core %d\n", hw_core_id());
595 cprintf("MTRR_DEF_TYPE = 0x%08x\n", read_msr(IA32_MTRR_DEF_TYPE));
596 cprintf("MTRR Phys0 Base = 0x%016llx, Mask = 0x%016llx\n",
597 read_msr(0x200), read_msr(0x201));
598 cprintf("MTRR Phys1 Base = 0x%016llx, Mask = 0x%016llx\n",
599 read_msr(0x202), read_msr(0x203));
600 cprintf("MTRR Phys2 Base = 0x%016llx, Mask = 0x%016llx\n",
601 read_msr(0x204), read_msr(0x205));
602 cprintf("MTRR Phys3 Base = 0x%016llx, Mask = 0x%016llx\n",
603 read_msr(0x206), read_msr(0x207));
604 cprintf("MTRR Phys4 Base = 0x%016llx, Mask = 0x%016llx\n",
605 read_msr(0x208), read_msr(0x209));
606 cprintf("MTRR Phys5 Base = 0x%016llx, Mask = 0x%016llx\n",
607 read_msr(0x20a), read_msr(0x20b));
608 cprintf("MTRR Phys6 Base = 0x%016llx, Mask = 0x%016llx\n",
609 read_msr(0x20c), read_msr(0x20d));
610 cprintf("MTRR Phys7 Base = 0x%016llx, Mask = 0x%016llx\n",
611 read_msr(0x20e), read_msr(0x20f));
613 cprintf("----------------------------\n");
614 spin_unlock_irqsave(&print_info_lock);
617 void test_barrier_handler(trapframe_t *tf, void* data)
619 cprintf("Round 1: Core %d\n", core_id());
620 waiton_barrier(&test_cpu_array);
621 waiton_barrier(&test_cpu_array);
622 waiton_barrier(&test_cpu_array);
623 waiton_barrier(&test_cpu_array);
624 waiton_barrier(&test_cpu_array);
625 waiton_barrier(&test_cpu_array);
626 cprintf("Round 2: Core %d\n", core_id());
627 waiton_barrier(&test_cpu_array);
628 cprintf("Round 3: Core %d\n", core_id());
629 // uncomment to see it fucked up
630 //cprintf("Round 4: Core %d\n", core_id());
634 static void test_waiting_handler(trapframe_t *tf, atomic_t *data)
636 static void test_waiting_handler(trapframe_t *tf, void *data)
645 cprintf("Starting test for PIT now (10s)\n");
646 udelay_pit(10000000);
647 cprintf("End now\n");
648 cprintf("Starting test for TSC (if stable) now (10s)\n");
650 cprintf("End now\n");
652 cprintf("Starting test for LAPIC (if stable) now (10s)\n");
654 lapic_set_timer(10000000, FALSE);
657 atomic_init(&waiting, 1);
658 register_interrupt_handler(interrupt_handlers, I_TESTING,
659 test_waiting_handler, &waiting);
660 while(atomic_read(&waiting))
662 cprintf("End now\n");
665 void test_circ_buffer(void)
667 int arr[5] = {0, 1, 2, 3, 4};
669 for (int i = 0; i < 5; i++) {
670 FOR_CIRC_BUFFER(i, 5, j)
671 printk("Starting with current = %d, each value = %d\n", i, j);
676 void test_km_handler(trapframe_t* tf, uint32_t srcid, void *a0, void *a1,
679 printk("Received KM on core %d from core %d: arg0= 0x%08x, arg1 = "
680 "0x%08x, arg2 = 0x%08x\n", core_id(), srcid, a0, a1, a2);
684 void test_kernel_messages(void)
686 printk("Testing Kernel Messages\n");
687 /* Testing sending multiples, sending different types, alternating, and
688 * precendence (the immediates should trump the others) */
689 printk("sending 5 IMMED to core 1, sending (#,deadbeef,0)\n");
690 for (int i = 0; i < 5; i++)
691 send_kernel_message(1, test_km_handler, (void*)i, (void*)0xdeadbeef,
692 (void*)0, KMSG_IMMEDIATE);
694 printk("sending 5 routine to core 1, sending (#,cafebabe,0)\n");
695 for (int i = 0; i < 5; i++)
696 send_kernel_message(1, test_km_handler, (void*)i, (void*)0xcafebabe,
697 (void*)0, KMSG_ROUTINE);
699 printk("sending 10 routine and 3 immediate to core 2\n");
700 for (int i = 0; i < 10; i++)
701 send_kernel_message(2, test_km_handler, (void*)i, (void*)0xcafebabe,
702 (void*)0, KMSG_ROUTINE);
703 for (int i = 0; i < 3; i++)
704 send_kernel_message(2, test_km_handler, (void*)i, (void*)0xdeadbeef,
705 (void*)0, KMSG_IMMEDIATE);
707 printk("sending 5 ea alternating to core 2\n");
708 for (int i = 0; i < 5; i++) {
709 send_kernel_message(2, test_km_handler, (void*)i, (void*)0xdeadbeef,
710 (void*)0, KMSG_IMMEDIATE);
711 send_kernel_message(2, test_km_handler, (void*)i, (void*)0xcafebabe,
712 (void*)0, KMSG_ROUTINE);
719 static void test_single_cache(int iters, size_t size, int align, int flags,
720 void (*ctor)(void *, size_t),
721 void (*dtor)(void *, size_t))
723 struct kmem_cache *test_cache;
724 void *objects[iters];
725 test_cache = kmem_cache_create("test_cache", size, align, flags, ctor, dtor);
726 printk("Testing Kmem Cache:\n");
727 print_kmem_cache(test_cache);
728 for (int i = 0; i < iters; i++) {
729 objects[i] = kmem_cache_alloc(test_cache, 0);
730 printk("Buffer %d addr = %p\n", i, objects[i]);
732 for (int i = 0; i < iters; i++) {
733 kmem_cache_free(test_cache, objects[i]);
735 kmem_cache_destroy(test_cache);
739 void a_ctor(void *buf, size_t size)
741 printk("constructin tests\n");
743 void a_dtor(void *buf, size_t size)
745 printk("destructin tests\n");
750 test_single_cache(10, 128, 512, 0, 0, 0);
751 test_single_cache(10, 128, 4, 0, a_ctor, a_dtor);
752 test_single_cache(10, 1024, 16, 0, 0, 0);
755 void test_kmalloc(void)
757 printk("Testing Kmalloc\n");
758 void *bufs[NUM_KMALLOC_CACHES + 1];
760 for (int i = 0; i < NUM_KMALLOC_CACHES + 1; i++){
761 size = (KMALLOC_SMALLEST << i) - KMALLOC_OFFSET;
762 bufs[i] = kmalloc(size, 0);
763 printk("Size %d, Addr = %p\n", size, bufs[i]);
765 for (int i = 0; i < NUM_KMALLOC_CACHES; i++) {
766 printk("Freeing buffer %d\n", i);
769 printk("Testing a large kmalloc\n");
770 size = (KMALLOC_LARGEST << 2);
771 bufs[0] = kmalloc(size, 0);
772 printk("Size %d, Addr = %p\n", size, bufs[0]);
776 static size_t test_hash_fn_col(void *k)
778 return (size_t)k % 2; // collisions in slots 0 and 1
781 void test_hashtable(void)
783 struct test {int x; int y;};
784 struct test tstruct[10];
788 struct test *v = &tstruct[0];
790 h = create_hashtable(32, __generic_hash, __generic_eq);
792 // test inserting one item, then finding it again
793 printk("Tesing one item, insert, search, and removal\n");
794 if(!hashtable_insert(h, (void*)k, v))
795 printk("Failed to insert to hashtable!\n");
797 if (!(v = hashtable_search(h, (void*)k)))
798 printk("Failed to find in hashtable!\n");
799 if (v != &tstruct[0])
800 printk("Got the wrong item! (got %p, wanted %p)\n", v, &tstruct[0]);
802 if (!(v = hashtable_remove(h, (void*)k)))
803 printk("Failed to remove from hashtable!\n");
804 // shouldn't be able to find it again
805 if ((v = hashtable_search(h, (void*)k)))
806 printk("Should not have been able to find in hashtable!\n");
808 printk("Tesing a bunch of items, insert, search, and removal\n");
809 for (int i = 0; i < 10; i++) {
810 k = i; // vary the key, we don't do KEY collisions
811 if(!hashtable_insert(h, (void*)k, &tstruct[i]))
812 printk("Failed to insert iter %d to hashtable!\n", i);
814 // read out the 10 items
815 for (int i = 0; i < 10; i++) {
817 if (!(v = hashtable_search(h, (void*)k)))
818 printk("Failed to find in hashtable!\n");
819 if (v != &tstruct[i])
820 printk("Got the wrong item! (got %p, wanted %p)\n", v, &tstruct[i]);
822 if (hashtable_count(h) != 10)
823 printk("Wrong accounting of number of elements!\n");
824 // remove the 10 items
825 for (int i = 0; i < 10; i++) {
827 if (!(v = hashtable_remove(h, (void*)k)))
828 printk("Failed to remove from hashtable!\n");
830 // make sure they are all gone
831 for (int i = 0; i < 10; i++) {
833 if ((v = hashtable_search(h, (void*)k)))
834 printk("Should not have been able to find in hashtable!\n");
836 if (hashtable_count(h))
837 printk("Wrong accounting of number of elements!\n");
838 hashtable_destroy(h);
840 // same test of a bunch of items, but with collisions.
841 printk("Tesing a bunch of items with collisions, etc.\n");
842 h = create_hashtable(32, test_hash_fn_col, __generic_eq);
844 for (int i = 0; i < 10; i++) {
845 k = i; // vary the key, we don't do KEY collisions
846 if(!hashtable_insert(h, (void*)k, &tstruct[i]))
847 printk("Failed to insert iter %d to hashtable!\n", i);
849 // read out the 10 items
850 for (int i = 0; i < 10; i++) {
852 if (!(v = hashtable_search(h, (void*)k)))
853 printk("Failed to find in hashtable!\n");
854 if (v != &tstruct[i])
855 printk("Got the wrong item! (got %p, wanted %p)\n", v, &tstruct[i]);
857 if (hashtable_count(h) != 10)
858 printk("Wrong accounting of number of elements!\n");
859 // remove the 10 items
860 for (int i = 0; i < 10; i++) {
862 if (!(v = hashtable_remove(h, (void*)k)))
863 printk("Failed to remove from hashtable!\n");
865 // make sure they are all gone
866 for (int i = 0; i < 10; i++) {
868 if ((v = hashtable_search(h, (void*)k)))
869 printk("Should not have been able to find in hashtable!\n");
871 if (hashtable_count(h))
872 printk("Wrong accounting of number of elements!\n");
873 hashtable_destroy(h);
876 /* Ghetto test, only tests one prod or consumer at a time */
883 struct my_struct in_struct, out_struct;
885 DEFINE_BCQ_TYPES(test, struct my_struct, 16);
886 struct test_bcq t_bcq;
887 bcq_init(&t_bcq, struct my_struct, 16);
894 bcq_enqueue(&t_bcq, &in_struct, 16, 5);
895 bcq_dequeue(&t_bcq, &out_struct, 16);
896 printk("out x %d. out y %d\n", out_struct.x, out_struct.y);
898 DEFINE_BCQ_TYPES(my, int, 8);
900 bcq_init(&a_bcq, int, 8);
906 for (int i = 0; i < 15; i++) {
908 retval[i] = bcq_enqueue(&a_bcq, &y, 8, 10);
909 printk("enqueued: %d, had retval %d \n", y, retval[i]);
912 for (int i = 0; i < 15; i++) {
913 retval[i] = bcq_dequeue(&a_bcq, &output[i], 8);
914 printk("dequeued: %d with retval %d\n", output[i], retval[i]);
917 for (int i = 0; i < 3; i++) {
919 retval[i] = bcq_enqueue(&a_bcq, &y, 8, 10);
920 printk("enqueued: %d, had retval %d \n", y, retval[i]);
923 for (int i = 0; i < 5; i++) {
924 retval[i] = bcq_dequeue(&a_bcq, &output[i], 8);
925 printk("dequeued: %d with retval %d\n", output[i], retval[i]);
928 for (int i = 0; i < 5; i++) {
930 retval[i] = bcq_enqueue(&a_bcq, &y, 8, 10);
931 printk("enqueued: %d, had retval %d \n", y, retval[i]);
932 retval[i] = bcq_dequeue(&a_bcq, &output[i], 8);
933 printk("dequeued: %d with retval %d\n", output[i], retval[i]);
938 /* rudimentary tests. does the basics, create, merge, split, etc. Feel free to
939 * add more, esp for the error conditions and finding free slots. This is also
940 * a bit lazy with setting the caller's fields (perm, flags, etc). */
941 void test_vm_regions(void)
943 #define MAX_VMR_TESTS 10
944 struct proc pr, *p = ≺ /* too lazy to even create one */
946 TAILQ_INIT(&p->vm_regions);
952 int check_vmrs(struct proc *p, struct vmr_summary *results, int len, int n)
955 struct vm_region *vmr;
956 TAILQ_FOREACH(vmr, &p->vm_regions, vm_link) {
958 printk("More vm_regions than expected\n");
961 if ((vmr->vm_base != results[count].base) ||
962 (vmr->vm_end != results[count].end)) {
963 printk("VM test case %d failed!\n", n);
971 struct vm_region *vmrs[MAX_VMR_TESTS];
972 struct vmr_summary results[MAX_VMR_TESTS];
974 memset(results, 0, sizeof(results));
976 vmrs[0] = create_vmr(p, 0x2000, 0x1000);
977 results[0].base = 0x2000;
978 results[0].end = 0x3000;
979 check_vmrs(p, results, 1, n++);
981 grow_vmr(vmrs[0], 0x4000);
982 results[0].base = 0x2000;
983 results[0].end = 0x4000;
984 check_vmrs(p, results, 1, n++);
986 if (-1 != grow_vmr(vmrs[0], 0x3000))
987 printk("Bad grow test failed\n");
988 check_vmrs(p, results, 1, n++);
989 /* Make another right next to it */
990 vmrs[1] = create_vmr(p, 0x4000, 0x1000);
991 results[1].base = 0x4000;
992 results[1].end = 0x5000;
993 check_vmrs(p, results, 2, n++);
994 /* try to grow through it */
995 if (-1 != grow_vmr(vmrs[0], 0x5000))
996 printk("Bad grow test failed\n");
997 check_vmrs(p, results, 2, n++);
999 merge_vmr(vmrs[0], vmrs[1]);
1000 results[0].end = 0x5000;
1001 results[1].base = 0;
1003 check_vmrs(p, results, 1, n++);
1004 vmrs[1]= create_vmr(p, 0x6000, 0x4000);
1005 results[1].base = 0x6000;
1006 results[1].end = 0xa000;
1007 check_vmrs(p, results, 2, n++);
1008 /* try to merge unmergables (just testing ranges) */
1009 if (-1 != merge_vmr(vmrs[0], vmrs[1]))
1010 printk("Bad merge test failed\n");
1011 check_vmrs(p, results, 2, n++);
1012 vmrs[2] = split_vmr(vmrs[1], 0x8000);
1013 results[1].end = 0x8000;
1014 results[2].base = 0x8000;
1015 results[2].end = 0xa000;
1016 check_vmrs(p, results, 3, n++);
1018 destroy_vmr(vmrs[1]);
1019 results[1].base = 0x8000;
1020 results[1].end = 0xa000;
1021 check_vmrs(p, results, 2, n++);
1023 shrink_vmr(vmrs[2], 0x9000);
1024 results[1].base = 0x8000;
1025 results[1].end = 0x9000;
1026 check_vmrs(p, results, 2, n++); /* 10 */
1027 if (vmrs[2] != find_vmr(p, 0x8500))
1028 printk("Failed to find the right vmr!\n");
1029 if (vmrs[2] != find_first_vmr(p, 0x8500))
1030 printk("Failed to find the right vmr!\n");
1031 if (vmrs[2] != find_first_vmr(p, 0x7500))
1032 printk("Failed to find the right vmr!\n");
1033 if (find_first_vmr(p, 0x9500))
1034 printk("Found a vmr when we shouldn't!\n");
1035 /* grow up to another */
1036 grow_vmr(vmrs[0], 0x8000);
1037 results[0].end = 0x8000;
1038 check_vmrs(p, results, 2, n++);
1039 vmrs[0]->vm_prot = 88;
1040 vmrs[2]->vm_prot = 77;
1041 /* should be unmergeable due to perms */
1042 if (-1 != merge_vmr(vmrs[0], vmrs[2]))
1043 printk("Bad merge test failed\n");
1044 check_vmrs(p, results, 2, n++);
1045 /* should merge now */
1046 vmrs[2]->vm_prot = 88;
1047 merge_vmr(vmrs[0], vmrs[2]);
1048 results[0].end = 0x9000;
1049 check_vmrs(p, results, 1, n++);
1050 destroy_vmr(vmrs[0]);
1051 check_vmrs(p, results, 0, n++);
1052 /* Check the automerge function */
1053 vmrs[0] = create_vmr(p, 0x2000, 0x1000);
1054 vmrs[1] = create_vmr(p, 0x3000, 0x1000);
1055 vmrs[2] = create_vmr(p, 0x4000, 0x1000);
1056 for (int i = 0; i < 3; i++) {
1057 vmrs[i]->vm_prot = PROT_READ;
1058 vmrs[i]->vm_flags = 0;
1059 vmrs[i]->vm_file = 0; /* would like to test this, it's a pain for now */
1061 vmrs[0] = merge_me(vmrs[1]);
1062 results[0].base = 0x2000;
1063 results[0].end = 0x5000;
1064 check_vmrs(p, results, 1, n++);
1065 destroy_vmr(vmrs[0]);
1066 check_vmrs(p, results, 0, n++);
1067 /* Check unfixed creation requests */
1068 vmrs[0] = create_vmr(p, 0x0000, 0x1000);
1069 vmrs[1] = create_vmr(p, 0x0000, 0x1000);
1070 vmrs[2] = create_vmr(p, 0x0000, 0x1000);
1071 results[0].base = 0x0000;
1072 results[0].end = 0x1000;
1073 results[1].base = 0x1000;
1074 results[1].end = 0x2000;
1075 results[2].base = 0x2000;
1076 results[2].end = 0x3000;
1077 check_vmrs(p, results, 3, n++);
1079 printk("Finished vm_regions test!\n");
1082 void test_radix_tree(void)
1084 struct radix_tree real_tree = RADIX_INITIALIZER;
1085 struct radix_tree *tree = &real_tree;
1087 if (radix_insert(tree, 3, (void*)0xdeadbeef))
1088 printk("Failed to insert first!\n");
1089 radix_insert(tree, 4, (void*)0x04040404);
1090 assert((void*)0xdeadbeef == radix_lookup(tree, 3));
1091 if (radix_insert(tree, 65, (void*)0xcafebabe))
1092 printk("Failed to insert a two-tier!\n");
1093 if (!radix_insert(tree, 4, (void*)0x03030303))
1094 printk("Should not let us reinsert\n");
1095 if (radix_insert(tree, 4095, (void*)0x4095))
1096 printk("Failed to insert a two-tier boundary!\n");
1097 if (radix_insert(tree, 4096, (void*)0x4096))
1098 printk("Failed to insert a three-tier!\n");
1099 //print_radix_tree(tree);
1100 radix_delete(tree, 65);
1101 radix_delete(tree, 3);
1102 radix_delete(tree, 4);
1103 radix_delete(tree, 4095);
1104 radix_delete(tree, 4096);
1105 //print_radix_tree(tree);
1106 printk("Finished radix tree tests!\n");