akaros/kern/src/ktest/pb_ktests.c
<<
>>
Prefs
   1/*
   2 * Postboot kernel tests: Tests to be ran after boot in kernel mode.
   3 * TODO: Some of the tests here may not necessarily be tests to be ran after
   4 *       boot. If that is the case, change them in
   5 */
   6
   7#include <arch/mmu.h>
   8#include <arch/arch.h>
   9#include <arch/uaccess.h>
  10#include <bitmask.h>
  11#include <smp.h>
  12
  13#include <ros/memlayout.h>
  14#include <ros/common.h>
  15#include <ros/bcq.h>
  16#include <ros/ucq.h>
  17
  18#include <atomic.h>
  19#include <stdio.h>
  20#include <assert.h>
  21#include <string.h>
  22#include <testing.h>
  23#include <trap.h>
  24#include <process.h>
  25#include <syscall.h>
  26#include <time.h>
  27#include <mm.h>
  28#include <multiboot.h>
  29#include <pmap.h>
  30#include <page_alloc.h>
  31#include <pmap.h>
  32#include <slab.h>
  33#include <kmalloc.h>
  34#include <hashtable.h>
  35#include <circular_buffer.h>
  36#include <monitor.h>
  37#include <kthread.h>
  38#include <schedule.h>
  39#include <umem.h>
  40#include <init.h>
  41#include <ucq.h>
  42#include <setjmp.h>
  43#include <sort.h>
  44
  45#include <apipe.h>
  46#include <rwlock.h>
  47#include <rendez.h>
  48#include <ktest.h>
  49#include <smallidpool.h>
  50#include <linker_func.h>
  51
  52KTEST_SUITE("POSTBOOT")
  53
  54#ifdef CONFIG_X86
  55
  56// TODO: Do test if possible inside this function, and add assertions.
  57bool test_ipi_sending(void)
  58{
  59        int8_t state = 0;
  60
  61        register_irq(I_TESTING, test_hello_world_handler, NULL,
  62                     MKBUS(BusIPI, 0, 0, 0));
  63        enable_irqsave(&state);
  64        cprintf("\nCORE 0 sending broadcast\n");
  65        send_broadcast_ipi(I_TESTING);
  66        udelay(3000000);
  67        cprintf("\nCORE 0 sending all others\n");
  68        send_all_others_ipi(I_TESTING);
  69        udelay(3000000);
  70        cprintf("\nCORE 0 sending self\n");
  71        send_self_ipi(I_TESTING);
  72        udelay(3000000);
  73        cprintf("\nCORE 0 sending ipi to physical 1\n");
  74        send_ipi(0x01, I_TESTING);
  75        udelay(3000000);
  76        cprintf("\nCORE 0 sending ipi to physical 2\n");
  77        send_ipi(0x02, I_TESTING);
  78        udelay(3000000);
  79        cprintf("\nCORE 0 sending ipi to physical 3\n");
  80        send_ipi(0x03, I_TESTING);
  81        udelay(3000000);
  82        cprintf("\nCORE 0 sending ipi to physical 15\n");
  83        send_ipi(0x0f, I_TESTING);
  84        udelay(3000000);
  85        cprintf("\nCORE 0 sending ipi to logical 2\n");
  86        send_group_ipi(0x02, I_TESTING);
  87        udelay(3000000);
  88        cprintf("\nCORE 0 sending ipi to logical 1\n");
  89        send_group_ipi(0x01, I_TESTING);
  90        udelay(3000000);
  91        cprintf("\nDone!\n");
  92        disable_irqsave(&state);
  93
  94        return true;
  95}
  96
  97// TODO: Refactor to make it return and add assertions.
  98// Note this never returns and will muck with any other timer work
  99bool test_pic_reception(void)
 100{
 101        register_irq(IdtPIC + IrqCLOCK, test_hello_world_handler, NULL,
 102                     MKBUS(BusISA, 0, 0, 0));
 103        pit_set_timer(100,TIMER_RATEGEN); // totally arbitrary time
 104        pic_unmask_irq(0, 0);
 105        cprintf("PIC1 Mask = 0x%04x\n", inb(PIC1_DATA));
 106        cprintf("PIC2 Mask = 0x%04x\n", inb(PIC2_DATA));
 107        unmask_lapic_lvt(MSR_LAPIC_LVT_LINT0);
 108        printk("Core %d's LINT0: 0x%08x\n", core_id(),
 109               apicrget(MSR_LAPIC_LVT_TIMER));
 110        enable_irq();
 111        while(1);
 112
 113        return true;
 114}
 115
 116#endif // CONFIG_X86
 117
 118barrier_t test_cpu_array;
 119
 120// TODO: Add assertions, try to do everything from within this same function.
 121bool test_barrier(void)
 122{
 123        cprintf("Core 0 initializing barrier\n");
 124        init_barrier(&test_cpu_array, num_cores);
 125        printk("Core 0 asking all cores to print ids, barrier, etc\n");
 126        smp_call_function_all(test_barrier_handler, NULL, 0);
 127
 128        return true;
 129}
 130
 131// TODO: Maybe remove all the printing statements and instead use the
 132//       KT_ASSERT_M macro to include a message on assertions.
 133bool test_interrupts_irqsave(void)
 134{
 135        int8_t state = 0;
 136
 137        printd("Testing Nesting Enabling first, turning ints off:\n");
 138        disable_irq();
 139        printd("Interrupts are: %x\n", irq_is_enabled());
 140        KT_ASSERT(!irq_is_enabled());
 141        printd("Enabling IRQSave\n");
 142        enable_irqsave(&state);
 143        printd("Interrupts are: %x\n", irq_is_enabled());
 144        KT_ASSERT(irq_is_enabled());
 145        printd("Enabling IRQSave Again\n");
 146        enable_irqsave(&state);
 147        printd("Interrupts are: %x\n", irq_is_enabled());
 148        KT_ASSERT(irq_is_enabled());
 149        printd("Disabling IRQSave Once\n");
 150        disable_irqsave(&state);
 151        printd("Interrupts are: %x\n", irq_is_enabled());
 152        KT_ASSERT(irq_is_enabled());
 153        printd("Disabling IRQSave Again\n");
 154        disable_irqsave(&state);
 155        printd("Interrupts are: %x\n", irq_is_enabled());
 156        KT_ASSERT(!irq_is_enabled());
 157        printd("Done.  Should have been 0, 200, 200, 200, 0\n");
 158
 159        printd("Testing Nesting Disabling first, turning ints on:\n");
 160        state = 0;
 161        enable_irq();
 162        printd("Interrupts are: %x\n", irq_is_enabled());
 163        KT_ASSERT(irq_is_enabled());
 164        printd("Disabling IRQSave Once\n");
 165        disable_irqsave(&state);
 166        printd("Interrupts are: %x\n", irq_is_enabled());
 167        KT_ASSERT(!irq_is_enabled());
 168        printd("Disabling IRQSave Again\n");
 169        disable_irqsave(&state);
 170        printd("Interrupts are: %x\n", irq_is_enabled());
 171        KT_ASSERT(!irq_is_enabled());
 172        printd("Enabling IRQSave Once\n");
 173        enable_irqsave(&state);
 174        printd("Interrupts are: %x\n", irq_is_enabled());
 175        KT_ASSERT(!irq_is_enabled());
 176        printd("Enabling IRQSave Again\n");
 177        enable_irqsave(&state);
 178        printd("Interrupts are: %x\n", irq_is_enabled());
 179        KT_ASSERT(irq_is_enabled());
 180        printd("Done.  Should have been 200, 0, 0, 0, 200 \n");
 181
 182        state = 0;
 183        disable_irq();
 184        printd("Ints are off, enabling then disabling.\n");
 185        enable_irqsave(&state);
 186        printd("Interrupts are: %x\n", irq_is_enabled());
 187        KT_ASSERT(irq_is_enabled());
 188        disable_irqsave(&state);
 189        printd("Interrupts are: %x\n", irq_is_enabled());
 190        KT_ASSERT(!irq_is_enabled());
 191        printd("Done.  Should have been 200, 0\n");
 192
 193        state = 0;
 194        enable_irq();
 195        printd("Ints are on, enabling then disabling.\n");
 196        enable_irqsave(&state);
 197        printd("Interrupts are: %x\n", irq_is_enabled());
 198        KT_ASSERT(irq_is_enabled());
 199        disable_irqsave(&state);
 200        printd("Interrupts are: %x\n", irq_is_enabled());
 201        KT_ASSERT(irq_is_enabled());
 202        printd("Done.  Should have been 200, 200\n");
 203
 204        state = 0;
 205        disable_irq();
 206        printd("Ints are off, disabling then enabling.\n");
 207        disable_irqsave(&state);
 208        printd("Interrupts are: %x\n", irq_is_enabled());
 209        KT_ASSERT(!irq_is_enabled());
 210        enable_irqsave(&state);
 211        printd("Interrupts are: %x\n", irq_is_enabled());
 212        KT_ASSERT(!irq_is_enabled());
 213        printd("Done.  Should have been 0, 0\n");
 214
 215        state = 0;
 216        enable_irq();
 217        printd("Ints are on, disabling then enabling.\n");
 218        disable_irqsave(&state);
 219        printd("Interrupts are: %x\n", irq_is_enabled());
 220        KT_ASSERT(!irq_is_enabled());
 221        enable_irqsave(&state);
 222        printd("Interrupts are: %x\n", irq_is_enabled());
 223        KT_ASSERT(irq_is_enabled());
 224        printd("Done.  Should have been 0, 200\n");
 225
 226        disable_irq();
 227        return true;
 228}
 229
 230// TODO: Maybe remove PRINT_BITMASK statements and use KT_ASSERT_M instead
 231//       somehow.
 232bool test_bitmasks(void)
 233{
 234#define masksize 67
 235        DECL_BITMASK(mask, masksize);
 236        CLR_BITMASK(mask, masksize);
 237//      PRINT_BITMASK(mask, masksize);
 238        SET_BITMASK_BIT(mask, 0);
 239        SET_BITMASK_BIT(mask, 11);
 240        SET_BITMASK_BIT(mask, 17);
 241        SET_BITMASK_BIT(mask, masksize-1);
 242//      PRINT_BITMASK(mask, masksize);
 243        DECL_BITMASK(mask2, masksize);
 244        COPY_BITMASK(mask2, mask, masksize);
 245//      printk("copy of original mask, should be the same as the prev\n");
 246//      PRINT_BITMASK(mask2, masksize);
 247        CLR_BITMASK_BIT(mask, 11);
 248//      PRINT_BITMASK(mask, masksize);
 249        KT_ASSERT_M("Bit 17 should be 1", 1 == GET_BITMASK_BIT(mask, 17));
 250        KT_ASSERT_M("Bit 11 should be 0", 0 == GET_BITMASK_BIT(mask, 11));
 251        FILL_BITMASK(mask, masksize);
 252//      PRINT_BITMASK(mask, masksize);
 253        KT_ASSERT_M("Bitmask should not be clear after calling FILL_BITMASK",
 254                    0 == BITMASK_IS_CLEAR(mask,masksize));
 255        CLR_BITMASK(mask, masksize);
 256//      PRINT_BITMASK(mask, masksize);
 257        KT_ASSERT_M("Bitmask should be clear after calling CLR_BITMASK",
 258                    1 == BITMASK_IS_CLEAR(mask,masksize));
 259        return true;
 260}
 261
 262checklist_t *the_global_list;
 263
 264static void test_checklist_handler(struct hw_trapframe *hw_tf, void *data)
 265{
 266        udelay(1000000);
 267        cprintf("down_checklist(%x,%d)\n", the_global_list, core_id());
 268        down_checklist(the_global_list);
 269}
 270
 271// TODO: Add assertions
 272bool test_checklists(void)
 273{
 274        INIT_CHECKLIST(a_list, MAX_NUM_CORES);
 275        the_global_list = &a_list;
 276        printk("Checklist Build, mask size: %d\n", sizeof(a_list.mask.bits));
 277        printk("mask\n");
 278        PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
 279        SET_BITMASK_BIT(a_list.mask.bits, 11);
 280        printk("Set bit 11\n");
 281        PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
 282
 283        CLR_BITMASK(a_list.mask.bits, a_list.mask.size);
 284        INIT_CHECKLIST_MASK(a_mask, MAX_NUM_CORES);
 285        FILL_BITMASK(a_mask.bits, num_cores);
 286        //CLR_BITMASK_BIT(a_mask.bits, core_id());
 287        //SET_BITMASK_BIT(a_mask.bits, 1);
 288        //printk("New mask (1, 17, 25):\n");
 289        printk("Created new mask, filled up to num_cores\n");
 290        PRINT_BITMASK(a_mask.bits, a_mask.size);
 291        printk("committing new mask\n");
 292        commit_checklist_wait(&a_list, &a_mask);
 293        printk("Old mask (copied onto):\n");
 294        PRINT_BITMASK(a_list.mask.bits, a_list.mask.size);
 295        //smp_call_function_single(1, test_checklist_handler, 0, 0);
 296
 297        smp_call_function_all(test_checklist_handler, NULL, 0);
 298
 299        printk("Waiting on checklist\n");
 300        waiton_checklist(&a_list);
 301        printk("Done Waiting!\n");
 302
 303        return true;
 304}
 305
 306atomic_t a, b, c;
 307
 308static void test_incrementer_handler(struct hw_trapframe *tf, void *data)
 309{
 310        assert(data);
 311        atomic_inc(data);
 312}
 313
 314static void test_null_handler(struct hw_trapframe *tf, void *data)
 315{
 316        asm volatile("nop");
 317}
 318
 319// TODO: Add assertions.
 320bool test_smp_call_functions(void)
 321{
 322        int i;
 323
 324        atomic_init(&a, 0);
 325        atomic_init(&b, 0);
 326        atomic_init(&c, 0);
 327        handler_wrapper_t *waiter0 = 0, *waiter1 = 0, *waiter2 = 0,
 328                          *waiter3 = 0, *waiter4 = 0, *waiter5 = 0;
 329        uint8_t me = core_id();
 330
 331        printk("\nCore %d: SMP Call Self (nowait):\n", me);
 332        printk("---------------------\n");
 333        smp_call_function_self(test_hello_world_handler, NULL, 0);
 334        printk("\nCore %d: SMP Call Self (wait):\n", me);
 335        printk("---------------------\n");
 336        smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
 337        smp_call_wait(waiter0);
 338        printk("\nCore %d: SMP Call All (nowait):\n", me);
 339        printk("---------------------\n");
 340        smp_call_function_all(test_hello_world_handler, NULL, 0);
 341        printk("\nCore %d: SMP Call All (wait):\n", me);
 342        printk("---------------------\n");
 343        smp_call_function_all(test_hello_world_handler, NULL, &waiter0);
 344        smp_call_wait(waiter0);
 345        printk("\nCore %d: SMP Call All-Else Individually, in order (nowait):\n",
 346               me);
 347        printk("---------------------\n");
 348        for(i = 1; i < num_cores; i++)
 349                smp_call_function_single(i, test_hello_world_handler, NULL, 0);
 350        printk("\nCore %d: SMP Call Self (wait):\n", me);
 351        printk("---------------------\n");
 352        smp_call_function_self(test_hello_world_handler, NULL, &waiter0);
 353        smp_call_wait(waiter0);
 354        printk("\nCore %d: SMP Call All-Else Individually, in order (wait):\n",
 355               me);
 356        printk("---------------------\n");
 357        for(i = 1; i < num_cores; i++)
 358        {
 359                smp_call_function_single(i, test_hello_world_handler, NULL,
 360                                         &waiter0);
 361                smp_call_wait(waiter0);
 362        }
 363        printk("\nTesting to see if any IPI-functions are dropped when not waiting:\n");
 364        printk("A: %d, B: %d, C: %d (should be 0,0,0)\n", atomic_read(&a),
 365               atomic_read(&b), atomic_read(&c));
 366        smp_call_function_all(test_incrementer_handler, &a, 0);
 367        smp_call_function_all(test_incrementer_handler, &b, 0);
 368        smp_call_function_all(test_incrementer_handler, &c, 0);
 369        // if i can clobber a previous IPI, the interleaving might do it
 370        smp_call_function_single(1 % num_cores, test_incrementer_handler, &a,
 371                                 0);
 372        smp_call_function_single(2 % num_cores, test_incrementer_handler, &b,
 373                                 0);
 374        smp_call_function_single(3 % num_cores, test_incrementer_handler, &c,
 375                                 0);
 376        smp_call_function_single(4 % num_cores, test_incrementer_handler, &a,
 377                                 0);
 378        smp_call_function_single(5 % num_cores, test_incrementer_handler, &b,
 379                                 0);
 380        smp_call_function_single(6 % num_cores, test_incrementer_handler, &c,
 381                                 0);
 382        smp_call_function_all(test_incrementer_handler, &a, 0);
 383        smp_call_function_single(3 % num_cores, test_incrementer_handler, &c,
 384                                 0);
 385        smp_call_function_all(test_incrementer_handler, &b, 0);
 386        smp_call_function_single(1 % num_cores, test_incrementer_handler, &a,
 387                                 0);
 388        smp_call_function_all(test_incrementer_handler, &c, 0);
 389        smp_call_function_single(2 % num_cores, test_incrementer_handler, &b,
 390                                 0);
 391        // wait, so we're sure the others finish before printing.
 392        // without this, we could (and did) get 19,18,19, since the B_inc
 393        // handler didn't finish yet
 394        smp_call_function_self(test_null_handler, NULL, &waiter0);
 395        // need to grab all 5 handlers (max), since the code moves to the next
 396        // free.
 397        smp_call_function_self(test_null_handler, NULL, &waiter1);
 398        smp_call_function_self(test_null_handler, NULL, &waiter2);
 399        smp_call_function_self(test_null_handler, NULL, &waiter3);
 400        smp_call_function_self(test_null_handler, NULL, &waiter4);
 401        smp_call_wait(waiter0);
 402        smp_call_wait(waiter1);
 403        smp_call_wait(waiter2);
 404        smp_call_wait(waiter3);
 405        smp_call_wait(waiter4);
 406        printk("A: %d, B: %d, C: %d (should be 19,19,19)\n", atomic_read(&a),
 407               atomic_read(&b), atomic_read(&c));
 408        printk("Attempting to deadlock by smp_calling with an outstanding wait:\n");
 409        smp_call_function_self(test_null_handler, NULL, &waiter0);
 410        printk("Sent one\n");
 411        smp_call_function_self(test_null_handler, NULL, &waiter1);
 412        printk("Sent two\n");
 413        smp_call_wait(waiter0);
 414        printk("Wait one\n");
 415        smp_call_wait(waiter1);
 416        printk("Wait two\n");
 417        printk("\tMade it through!\n");
 418        printk("Attempting to deadlock by smp_calling more than are available:\n");
 419        printk("\tShould see an Insufficient message and a kernel warning.\n");
 420        if (smp_call_function_self(test_null_handler, NULL, &waiter0))
 421                printk("\tInsufficient handlers to call function (0)\n");
 422        if (smp_call_function_self(test_null_handler, NULL, &waiter1))
 423                printk("\tInsufficient handlers to call function (1)\n");
 424        if (smp_call_function_self(test_null_handler, NULL, &waiter2))
 425                printk("\tInsufficient handlers to call function (2)\n");
 426        if (smp_call_function_self(test_null_handler, NULL, &waiter3))
 427                printk("\tInsufficient handlers to call function (3)\n");
 428        if (smp_call_function_self(test_null_handler, NULL, &waiter4))
 429                printk("\tInsufficient handlers to call function (4)\n");
 430        if (smp_call_function_self(test_null_handler, NULL, &waiter5))
 431                printk("\tInsufficient handlers to call function (5)\n");
 432        smp_call_wait(waiter0);
 433        smp_call_wait(waiter1);
 434        smp_call_wait(waiter2);
 435        smp_call_wait(waiter3);
 436        smp_call_wait(waiter4);
 437        smp_call_wait(waiter5);
 438        printk("\tMade it through!\n");
 439
 440        printk("Done\n");
 441
 442        return true;
 443}
 444
 445#ifdef CONFIG_X86
 446// TODO: Fix the KT_ASSERTs
 447bool test_lapic_status_bit(void)
 448{
 449        register_irq(I_TESTING, test_incrementer_handler, &a,
 450                     MKBUS(BusIPI, 0, 0, 0));
 451        #define NUM_IPI 100000
 452        atomic_set(&a,0);
 453        printk("IPIs received (should be 0): %d\n", a);
 454        // KT_ASSERT_M("IPIs received should be 0", (0 == a));
 455        for(int i = 0; i < NUM_IPI; i++) {
 456                send_ipi(7, I_TESTING);
 457        }
 458        // need to wait a bit to let those IPIs get there
 459        udelay(5000000);
 460        printk("IPIs received (should be %d): %d\n", a, NUM_IPI);
 461        // KT_ASSERT_M("IPIs received should be 100000", (NUM_IPI == a));
 462        // hopefully that handler never fires again.  leaving it registered for
 463        // now.
 464
 465        return true;
 466}
 467#endif // CONFIG_X86
 468
 469/************************************************************/
 470/* ISR Handler Functions */
 471
 472void test_hello_world_handler(struct hw_trapframe *hw_tf, void *data)
 473{
 474        int trapno;
 475        #if defined(CONFIG_X86)
 476        trapno = hw_tf->tf_trapno;
 477        #else
 478        trapno = 0;
 479        #endif
 480
 481        cprintf("Incoming IRQ, ISR: %d on core %d with tf at %p\n",
 482                trapno, core_id(), hw_tf);
 483}
 484
 485void test_barrier_handler(struct hw_trapframe *hw_tf, void *data)
 486{
 487        cprintf("Round 1: Core %d\n", core_id());
 488        waiton_barrier(&test_cpu_array);
 489        waiton_barrier(&test_cpu_array);
 490        waiton_barrier(&test_cpu_array);
 491        waiton_barrier(&test_cpu_array);
 492        waiton_barrier(&test_cpu_array);
 493        waiton_barrier(&test_cpu_array);
 494        cprintf("Round 2: Core %d\n", core_id());
 495        waiton_barrier(&test_cpu_array);
 496        cprintf("Round 3: Core %d\n", core_id());
 497        // uncomment to see it fucked up
 498        //cprintf("Round 4: Core %d\n", core_id());
 499}
 500
 501static void test_waiting_handler(struct hw_trapframe *hw_tf, void *data)
 502{
 503        atomic_dec(data);
 504}
 505
 506#ifdef CONFIG_X86
 507// TODO: Add assertions.
 508bool test_pit(void)
 509{
 510        cprintf("Starting test for PIT now (10s)\n");
 511        udelay_pit(10000000);
 512        cprintf("End now\n");
 513        cprintf("Starting test for TSC (if stable) now (10s)\n");
 514        udelay(10000000);
 515        cprintf("End now\n");
 516
 517        cprintf("Starting test for LAPIC (if stable) now (10s)\n");
 518        enable_irq();
 519        lapic_set_timer(10000000, FALSE);
 520
 521        atomic_t waiting;
 522        atomic_init(&waiting, 1);
 523        register_irq(I_TESTING, test_waiting_handler, &waiting,
 524                     MKBUS(BusIPI, 0, 0, 0));
 525        while(atomic_read(&waiting))
 526                cpu_relax();
 527        cprintf("End now\n");
 528
 529        return true;
 530}
 531
 532// TODO: Add assertions.
 533bool test_circ_buffer(void)
 534{
 535        int arr[5] = {0, 1, 2, 3, 4};
 536
 537        for (int i = 0; i < 5; i++) {
 538                FOR_CIRC_BUFFER(i, 5, j)
 539                        printk("Starting with current = %d, each value = %d\n",
 540                               i, j);
 541        }
 542
 543        return true;
 544}
 545
 546static void test_km_handler(uint32_t srcid, long a0, long a1, long a2)
 547{
 548        printk("Received KM on core %d from core %d: arg0= %p, arg1 = %p, "
 549               "arg2 = %p\n", core_id(), srcid, a0, a1, a2);
 550        return;
 551}
 552
 553// TODO: Add assertions. Try to do everything inside this function.
 554bool test_kernel_messages(void)
 555{
 556        printk("Testing Kernel Messages\n");
 557        /* Testing sending multiples, sending different types, alternating, and
 558         * precendence (the immediates should trump the others) */
 559        printk("sending 5 IMMED to core 1, sending (#,deadbeef,0)\n");
 560        for (int i = 0; i < 5; i++)
 561                send_kernel_message(1, test_km_handler, (long)i, 0xdeadbeef, 0,
 562                                    KMSG_IMMEDIATE);
 563        udelay(5000000);
 564        printk("sending 5 routine to core 1, sending (#,cafebabe,0)\n");
 565        for (int i = 0; i < 5; i++)
 566                send_kernel_message(1, test_km_handler, (long)i, 0xcafebabe, 0,
 567                                    KMSG_ROUTINE);
 568        udelay(5000000);
 569        printk("sending 10 routine and 3 immediate to core 2\n");
 570        for (int i = 0; i < 10; i++)
 571                send_kernel_message(2, test_km_handler, (long)i, 0xcafebabe, 0,
 572                                    KMSG_ROUTINE);
 573        for (int i = 0; i < 3; i++)
 574                send_kernel_message(2, test_km_handler, (long)i, 0xdeadbeef, 0,
 575                                    KMSG_IMMEDIATE);
 576        udelay(5000000);
 577        printk("sending 5 ea alternating to core 2\n");
 578        for (int i = 0; i < 5; i++) {
 579                send_kernel_message(2, test_km_handler, (long)i, 0xdeadbeef, 0,
 580                                    KMSG_IMMEDIATE);
 581                send_kernel_message(2, test_km_handler, (long)i, 0xcafebabe, 0,
 582                                    KMSG_ROUTINE);
 583        }
 584        udelay(5000000);
 585
 586        return true;
 587}
 588#endif // CONFIG_X86
 589
 590static size_t test_hash_fn_col(void *k)
 591{
 592        return (size_t)k % 2; // collisions in slots 0 and 1
 593}
 594
 595bool test_hashtable(void)
 596{
 597        struct test {int x; int y;};
 598        struct test tstruct[10];
 599
 600        struct hashtable *h;
 601        uintptr_t k = 5;
 602        struct test *v = &tstruct[0];
 603
 604        h = create_hashtable(32, __generic_hash, __generic_eq);
 605
 606        // test inserting one item, then finding it again
 607        KT_ASSERT_M("It should be possible to insert items to a hashtable",
 608                    hashtable_insert(h, (void*)k, v));
 609        v = NULL;
 610        KT_ASSERT_M("should be possible to find inserted stuff in a hashtable",
 611                    (v = hashtable_search(h, (void*)k)));
 612
 613        KT_ASSERT_M("The extracted element should be the same we inserted",
 614                    (v == &tstruct[0]));
 615
 616        v = NULL;
 617
 618        KT_ASSERT_M("should be possible to remove an existing element",
 619                    (v = hashtable_remove(h, (void*)k)));
 620
 621        KT_ASSERT_M("element should not remain in a hashtable after deletion",
 622                    !(v = hashtable_search(h, (void*)k)));
 623
 624        /* Testing a bunch of items, insert, search, and removal */
 625        for (int i = 0; i < 10; i++) {
 626                k = i; // vary the key, we don't do KEY collisions
 627                KT_ASSERT_M("It should be possible to insert elements to a hashtable",
 628                            (hashtable_insert(h, (void*)k, &tstruct[i])));
 629        }
 630        // read out the 10 items
 631        for (int i = 0; i < 10; i++) {
 632                k = i;
 633                KT_ASSERT_M("It should be possible to find inserted stuff in a hashtable",
 634                            (v = hashtable_search(h, (void*)k)));
 635                KT_ASSERT_M("The extracted element should be the same we inserted",
 636                            (v == &tstruct[i]));
 637        }
 638
 639        KT_ASSERT_M("The total count of number of elements should be 10",
 640                    (10 == hashtable_count(h)));
 641
 642        // remove the 10 items
 643        for (int i = 0; i < 10; i++) {
 644                k = i;
 645                KT_ASSERT_M("It should be possible to remove an existing element",
 646                            (v = hashtable_remove(h, (void*)k)));
 647
 648        }
 649        // make sure they are all gone
 650        for (int i = 0; i < 10; i++) {
 651                k = i;
 652                KT_ASSERT_M("An element should not remain in a hashtable after deletion",
 653                            !(v = hashtable_search(h, (void*)k)));
 654        }
 655
 656        KT_ASSERT_M("The hashtable should be empty",
 657                    (0 == hashtable_count(h)));
 658
 659        hashtable_destroy(h);
 660
 661        // same test of a bunch of items, but with collisions.
 662        /* Testing a bunch of items with collisions, etc. */
 663        h = create_hashtable(32, test_hash_fn_col, __generic_eq);
 664        // insert 10 items
 665        for (int i = 0; i < 10; i++) {
 666                k = i; // vary the key, we don't do KEY collisions
 667
 668                KT_ASSERT_M("It should be possible to insert elements to a hashtable",
 669                            (hashtable_insert(h, (void*)k, &tstruct[i])));
 670        }
 671        // read out the 10 items
 672        for (int i = 0; i < 10; i++) {
 673                k = i;
 674                KT_ASSERT_M("It should be possible to find inserted stuff in a hashtable",
 675                            (v = hashtable_search(h, (void*)k)));
 676                KT_ASSERT_M("The extracted element should be the same we inserted",
 677                            (v == &tstruct[i]));
 678        }
 679
 680        KT_ASSERT_M("The total count of number of elements should be 10",
 681                    (10 == hashtable_count(h)));
 682
 683        // remove the 10 items
 684        for (int i = 0; i < 10; i++) {
 685                k = i;
 686                KT_ASSERT_M("It should be possible to remove an existing element",
 687                            (v = hashtable_remove(h, (void*)k)));
 688        }
 689        // make sure they are all gone
 690        for (int i = 0; i < 10; i++) {
 691                k = i;
 692
 693                KT_ASSERT_M("An element should not remain in a hashtable after deletion",
 694                            !(v = hashtable_search(h, (void*)k)));
 695        }
 696
 697        KT_ASSERT_M("The hashtable should be empty",
 698                    (0 == hashtable_count(h)));
 699
 700        hashtable_destroy(h);
 701
 702        return true;
 703}
 704
 705bool test_circular_buffer(void)
 706{
 707        static const size_t cbsize = 4096;
 708        struct circular_buffer cb;
 709        char *bigbuf;
 710        size_t csize, off, cnum, mxsize;
 711        char buf[256];
 712
 713        KT_ASSERT_M("Failed to build the circular buffer",
 714                                circular_buffer_init(&cb, cbsize, NULL));
 715
 716        for (size_t i = 0; i < 8 * cbsize; i++) {
 717                size_t len = snprintf(buf, sizeof(buf), "%lu\n", i);
 718
 719                KT_ASSERT_M("Circular buffer write failed",
 720                            circular_buffer_write(&cb, buf, len) == len);
 721        }
 722        cnum = off = 0;
 723        while ((csize = circular_buffer_read(&cb, buf, sizeof(buf), off)) != 0)
 724        {
 725                char *top = buf + csize;
 726                char *ptr = buf;
 727                char *pnl;
 728
 729                while ((pnl = memchr(ptr, '\n', top - ptr)) != NULL) {
 730                        size_t num;
 731
 732                        *pnl = 0;
 733                        num = strtoul(ptr, NULL, 10);
 734                        KT_ASSERT_M("Numbers should be ascending", num >= cnum);
 735                        cnum = num;
 736                        ptr = pnl + 1;
 737                }
 738
 739                off += ptr - buf;
 740        }
 741
 742        for (size_t i = 0; i < (cbsize / sizeof(buf) + 1); i++) {
 743                memset(buf, (int) i, sizeof(buf));
 744
 745                KT_ASSERT_M("Circular buffer write failed",
 746                            circular_buffer_write(&cb, buf, sizeof(buf)) ==
 747                            sizeof(buf));
 748        }
 749        cnum = off = 0;
 750        while ((csize = circular_buffer_read(&cb, buf, sizeof(buf), off)) != 0)
 751        {
 752                size_t num = buf[0];
 753
 754                KT_ASSERT_M("Invalid record read size", csize == sizeof(buf));
 755
 756                if (off != 0)
 757                        KT_ASSERT_M("Invalid record sequence number",
 758                                                num == ((cnum + 1) % 256));
 759                cnum = num;
 760                off += csize;
 761        }
 762
 763        bigbuf = kzmalloc(cbsize, MEM_WAIT);
 764        KT_ASSERT(bigbuf != NULL);
 765
 766        mxsize = circular_buffer_max_write_size(&cb);
 767        KT_ASSERT_M("Circular buffer max write failed",
 768                    circular_buffer_write(&cb, bigbuf, mxsize) == mxsize);
 769
 770        memset(bigbuf, 17, cbsize);
 771        csize = circular_buffer_read(&cb, bigbuf, mxsize, 0);
 772        KT_ASSERT_M("Invalid max record read size", csize == mxsize);
 773
 774        for (size_t i = 0; i < csize; i++)
 775                KT_ASSERT_M("Invalid max record value", bigbuf[i] == 0);
 776
 777        kfree(bigbuf);
 778
 779        circular_buffer_destroy(&cb);
 780
 781        return TRUE;
 782}
 783
 784/* Ghetto test, only tests one prod or consumer at a time */
 785// TODO: Un-guetto test, add assertions.
 786bool test_bcq(void)
 787{
 788        /* Tests a basic struct */
 789        struct my_struct {
 790                int x;
 791                int y;
 792        };
 793        struct my_struct in_struct, out_struct;
 794
 795        DEFINE_BCQ_TYPES(test, struct my_struct, 16);
 796        struct test_bcq t_bcq;
 797        bcq_init(&t_bcq, struct my_struct, 16);
 798
 799        in_struct.x = 4;
 800        in_struct.y = 5;
 801        out_struct.x = 1;
 802        out_struct.y = 2;
 803
 804        bcq_enqueue(&t_bcq, &in_struct, 16, 5);
 805        bcq_dequeue(&t_bcq, &out_struct, 16);
 806        printk("out x %d. out y %d\n", out_struct.x, out_struct.y);
 807
 808        /* Tests the BCQ a bit more, esp with overflow */
 809        #define NR_ELEM_A_BCQ 8 /* NOTE: this must be a power of 2! */
 810        DEFINE_BCQ_TYPES(my, int, NR_ELEM_A_BCQ);
 811        struct my_bcq a_bcq;
 812        bcq_init(&a_bcq, int, NR_ELEM_A_BCQ);
 813
 814        int y = 2;
 815        int output[100];
 816        int retval[100];
 817
 818        /* Helpful debugger */
 819        void print_a_bcq(struct my_bcq *bcq)
 820        {
 821                printk("A BCQ (made of ints): %p\n", bcq);
 822                printk("\tprod_idx: %p\n", bcq->hdr.prod_idx);
 823                printk("\tcons_pub_idx: %p\n", bcq->hdr.cons_pub_idx);
 824                printk("\tcons_pvt_idx: %p\n", bcq->hdr.cons_pvt_idx);
 825                for (int i = 0; i < NR_ELEM_A_BCQ; i++) {
 826                        printk("Element %d, rdy_for_cons: %02p\n", i,
 827                               bcq->wraps[i].rdy_for_cons);
 828                }
 829        }
 830
 831        /* Put in more than it can take */
 832        for (int i = 0; i < 15; i++) {
 833                y = i;
 834                retval[i] = bcq_enqueue(&a_bcq, &y, NR_ELEM_A_BCQ, 10);
 835                printk("enqueued: %d, had retval %d \n", y, retval[i]);
 836        }
 837        //print_a_bcq(&a_bcq);
 838
 839        /* Try to dequeue more than we put in */
 840        for (int i = 0; i < 15; i++) {
 841                retval[i] = bcq_dequeue(&a_bcq, &output[i], NR_ELEM_A_BCQ);
 842                printk("dequeued: %d with retval %d\n", output[i], retval[i]);
 843        }
 844        //print_a_bcq(&a_bcq);
 845
 846        /* Put in some it should be able to take */
 847        for (int i = 0; i < 3; i++) {
 848                y = i;
 849                retval[i] = bcq_enqueue(&a_bcq, &y, NR_ELEM_A_BCQ, 10);
 850                printk("enqueued: %d, had retval %d \n", y, retval[i]);
 851        }
 852
 853        /* Take those, and then a couple extra */
 854        for (int i = 0; i < 5; i++) {
 855                retval[i] = bcq_dequeue(&a_bcq, &output[i], NR_ELEM_A_BCQ);
 856                printk("dequeued: %d with retval %d\n", output[i], retval[i]);
 857        }
 858
 859        /* Try some one-for-one */
 860        for (int i = 0; i < 5; i++) {
 861                y = i;
 862                retval[i] = bcq_enqueue(&a_bcq, &y, NR_ELEM_A_BCQ, 10);
 863                printk("enqueued: %d, had retval %d \n", y, retval[i]);
 864                retval[i] = bcq_dequeue(&a_bcq, &output[i], NR_ELEM_A_BCQ);
 865                printk("dequeued: %d with retval %d\n", output[i], retval[i]);
 866        }
 867
 868        return true;
 869}
 870
 871/* Test a simple concurrent send and receive (one prod, one cons).  We spawn a
 872 * process that will go into _M mode on another core, and we'll do the test from
 873 * an alarm handler run on our core.  When we start up the process, we won't
 874 * return so we need to defer the work with an alarm. */
 875// TODO: Check if we can add more assertions.
 876bool test_ucq(void)
 877{
 878        struct timer_chain *tchain = &per_cpu_info[core_id()].tchain;
 879        struct alarm_waiter *waiter = kmalloc(sizeof(struct alarm_waiter), 0);
 880
 881        /* Alarm handler: what we want to do after the process is up */
 882        void send_msgs(struct alarm_waiter *waiter)
 883        {
 884                struct timer_chain *tchain;
 885                struct proc *p = waiter->data;
 886                uintptr_t old_proc;
 887                struct ucq *ucq = (struct ucq*)USTACKTOP;
 888                struct event_msg msg;
 889
 890                printk("Running the alarm handler!\n");
 891                printk("NR msg per page: %d\n", NR_MSG_PER_PAGE);
 892                /* might not be mmaped yet, if not, abort.  We used to
 893                 * user_mem_check, but now we just touch it and PF. */
 894                char touch = *(char*)ucq;
 895                asm volatile ("" : : "r"(touch));
 896                /* load their address space */
 897                old_proc = switch_to(p);
 898                /* So it's mmaped, see if it is ready (note that this is
 899                 * dangerous) */
 900                if (!ucq->ucq_ready) {
 901                        printk("Not ready yet\n");
 902                        switch_back(p, old_proc);
 903                        goto abort;
 904                }
 905                /* So it's ready, time to finally do the tests... */
 906                printk("[kernel] Finally starting the tests... \n");
 907                /* 1: Send a simple message */
 908                printk("[kernel] #1 Sending simple message (7, deadbeef)\n");
 909                msg.ev_type = 7;
 910                msg.ev_arg2 = 0xdeadbeef;
 911                send_ucq_msg(ucq, p, &msg);
 912                printk("nr_pages: %d\n", atomic_read(&ucq->nr_extra_pgs));
 913                /* 2: Send a bunch.  In a VM, this causes one swap, and then a
 914                 * bunch of mmaps. */
 915                printk("[kernel] #2 \n");
 916                for (int i = 0; i < 5000; i++) {
 917                        msg.ev_type = i;
 918                        send_ucq_msg(ucq, p, &msg);
 919                }
 920                printk("nr_pages: %d\n", atomic_read(&ucq->nr_extra_pgs));
 921                printk("[kernel] #3 \n");
 922                /* 3: make sure we chained pages (assuming 1k is enough) */
 923                for (int i = 0; i < 1000; i++) {
 924                        msg.ev_type = i;
 925                        send_ucq_msg(ucq, p, &msg);
 926                }
 927                printk("nr_pages: %d\n", atomic_read(&ucq->nr_extra_pgs));
 928                /* other things we could do:
 929                 *  - concurrent producers / consumers...  ugh.
 930                 *  - would require a kmsg to another core, instead of a local
 931                 *  alarm
 932                 */
 933                /* done, switch back and free things */
 934                switch_back(p, old_proc);
 935                proc_decref(p);
 936                kfree(waiter); /* since it was kmalloc()d */
 937                return;
 938        abort:
 939                tchain = &per_cpu_info[core_id()].tchain;
 940                /* Set to run again */
 941                set_awaiter_rel(waiter, 1000000);
 942                set_alarm(tchain, waiter);
 943        }
 944        /* Set up a handler to run the real part of the test */
 945        init_awaiter(waiter, send_msgs);
 946        set_awaiter_rel(waiter, 1000000);       /* 1s should be long enough */
 947        set_alarm(tchain, waiter);
 948        /* Just spawn the program */
 949        struct file_or_chan *program;
 950
 951        program = foc_open("/bin/ucq", O_READ, 0);
 952
 953        KT_ASSERT_M("We should be able to find /bin/ucq",
 954                    program);
 955
 956        struct proc *p = proc_create(program, NULL, NULL);
 957
 958        proc_wakeup(p);
 959        /* instead of getting rid of the reference created in proc_create, we'll
 960         * put it in the awaiter */
 961        waiter->data = p;
 962        foc_decref(program);
 963        /* Should never return from schedule (env_pop in there) also note you
 964         * may not get the process you created, in the event there are others
 965         * floating around that are runnable */
 966        run_scheduler();
 967        smp_idle();
 968
 969        KT_ASSERT_M("We should never return from schedule",
 970                    false);
 971
 972        return true;
 973}
 974
 975/* Kernel message to restart our kthread */
 976static void __test_up_sem(uint32_t srcid, long a0, long a1, long a2)
 977{
 978        struct semaphore *sem = (struct semaphore*)a0;
 979        printk("[kmsg] Upping the sem to start the kthread, stacktop is %p\n",
 980                   get_stack_top());
 981        if (!sem_up(sem)) {
 982                printk("[kmsg] Crap, the sem didn't have a kthread waiting!\n");
 983                return;
 984        }
 985        printk("Kthread will restart when we handle the __launch RKM\n");
 986}
 987
 988/* simple test - start one, do something else, and resume it.  For lack of a
 989 * better infrastructure, we send ourselves a kmsg to run the kthread, which
 990 * we'll handle in smp_idle (which you may have to manually call).  Note this
 991 * doesn't test things like memory being leaked, or dealing with processes. */
 992// TODO: Add assertions.
 993bool test_kthreads(void)
 994{
 995        struct semaphore sem = SEMAPHORE_INITIALIZER(sem, 1);
 996
 997        printk("We're a kthread!  Stacktop is %p.  Testing suspend, etc...\n",
 998               get_stack_top());
 999        /* So we have something that will wake us up.  Routine messages won't
1000         * get serviced in the kernel right away. */
1001        send_kernel_message(core_id(), __test_up_sem, (long)&sem, 0, 0,
1002                            KMSG_ROUTINE);
1003        /* Actually block (or try to) */
1004        /* This one shouldn't block - but will test the unwind (if 1 above) */
1005        printk("About to sleep, but should unwind (signal beat us)\n");
1006        sem_down(&sem);
1007        /* This one is for real, yo.  Run and tell that. */
1008        printk("About to sleep for real\n");
1009        sem_down(&sem);
1010        printk("Kthread restarted!, Stacktop is %p.\n", get_stack_top());
1011
1012        return true;
1013}
1014
1015/* Second player's kmsg */
1016static void __test_kref_2(uint32_t srcid, long a0, long a1, long a2)
1017{
1018        struct kref *kref = (struct kref*)a0;
1019        bool *done = (bool*)a1;
1020        enable_irq();
1021        for (int i = 0; i < 10000000; i++) {
1022                kref_get(kref, 1);
1023                set_core_timer(1, TRUE);
1024                udelay(2);
1025                kref_put(kref);
1026        }
1027        *done = TRUE;
1028}
1029
1030/* Runs a simple test between core 0 (caller) and core 2 */
1031// TODO: I believe we need more assertions.
1032bool test_kref(void)
1033{
1034        struct kref local_kref;
1035        bool done = FALSE;
1036
1037        kref_init(&local_kref, fake_release, 1);
1038        send_kernel_message(2, __test_kref_2, (long)&local_kref, (long)&done, 0,
1039                            KMSG_ROUTINE);
1040        for (int i = 0; i < 10000000; i++) {
1041                kref_get(&local_kref, 1);
1042                udelay(2);
1043                kref_put(&local_kref);
1044        }
1045        while (!done)
1046                cpu_relax();
1047        KT_ASSERT(kref_refcnt(&local_kref) == 1);
1048        printk("[TEST-KREF] Simple 2-core getting/putting passed.\n");
1049
1050        return true;
1051}
1052
1053// TODO: Add more descriptive assertion messages.
1054bool test_atomics(void)
1055{
1056        /* subtract_and_test */
1057        atomic_t num;
1058        /* Test subing to 0 */
1059        atomic_init(&num, 1);
1060        KT_ASSERT(atomic_sub_and_test(&num, 1) == 1);
1061        atomic_init(&num, 2);
1062        KT_ASSERT(atomic_sub_and_test(&num, 2) == 1);
1063        /* Test not getting to 0 */
1064        atomic_init(&num, 1);
1065        KT_ASSERT(atomic_sub_and_test(&num, 0) == 0);
1066        atomic_init(&num, 2);
1067        KT_ASSERT(atomic_sub_and_test(&num, 1) == 0);
1068        /* Test negatives */
1069        atomic_init(&num, -1);
1070        KT_ASSERT(atomic_sub_and_test(&num, 1) == 0);
1071        atomic_init(&num, -1);
1072        KT_ASSERT(atomic_sub_and_test(&num, -1) == 1);
1073        /* Test larger nums */
1074        atomic_init(&num, 265);
1075        KT_ASSERT(atomic_sub_and_test(&num, 265) == 1);
1076        atomic_init(&num, 265);
1077        KT_ASSERT(atomic_sub_and_test(&num, 2) == 0);
1078
1079        /* CAS */
1080        /* Simple test, make sure the bool retval of CAS handles failure */
1081        bool test_cas_val(long init_val)
1082        {
1083                atomic_t actual_num;
1084                long old_num;
1085                int attempt;
1086                atomic_init(&actual_num, init_val);
1087                attempt = 0;
1088                do {
1089                        old_num = atomic_read(&actual_num);
1090                        /* First time, try to fail */
1091                        if (attempt == 0)
1092                                old_num++;
1093                        attempt++;
1094                } while (!atomic_cas(&actual_num, old_num, old_num + 10));
1095                if (atomic_read(&actual_num) != init_val + 10) {
1096                        return false;
1097                } else {
1098                        return true;
1099                }
1100        }
1101        KT_ASSERT_M("CAS test for 257 should be successful.",
1102                    test_cas_val(257));
1103        KT_ASSERT_M("CAS test for 1 should be successful.",
1104                    test_cas_val(1));
1105        return true;
1106}
1107
1108/* Helper KMSG for test_abort.  Core 1 does this, while core 0 sends an IRQ. */
1109static void __test_try_halt(uint32_t srcid, long a0, long a1, long a2)
1110{
1111        disable_irq();
1112        /* wait 10 sec.  should have a bunch of ints pending */
1113        udelay(10000000);
1114        printk("Core 1 is about to halt\n");
1115        cpu_halt();
1116        printk("Returned from halting on core 1\n");
1117}
1118
1119/* x86 test, making sure our cpu_halt() and handle_irq() work.  If you want to
1120 * see it fail, you'll probably need to put a nop in the asm for cpu_halt(), and
1121 * comment out abort_halt() in handle_irq(). */
1122// TODO: Add assertions.
1123bool test_abort_halt(void)
1124{
1125#ifdef CONFIG_X86
1126        send_kernel_message(1, __test_try_halt, 0, 0, 0, KMSG_ROUTINE);
1127        /* wait 1 sec, enough time to for core 1 to be in its KMSG */
1128        udelay(1000000);
1129        /* Send an IPI */
1130        send_ipi(0x01, I_TESTING);
1131        printk("Core 0 sent the IPI\n");
1132#endif /* CONFIG_X86 */
1133        return true;
1134}
1135
1136/* Funcs and global vars for test_cv() */
1137static struct cond_var local_cv;
1138static atomic_t counter;
1139static struct cond_var *cv = &local_cv;
1140static volatile bool state = FALSE;             /* for test 3 */
1141
1142void __test_cv_signal(uint32_t srcid, long a0, long a1, long a2)
1143{
1144        if (atomic_read(&counter) % 4)
1145                cv_signal(cv);
1146        else
1147                cv_broadcast(cv);
1148        atomic_dec(&counter);
1149}
1150
1151void __test_cv_waiter(uint32_t srcid, long a0, long a1, long a2)
1152{
1153        cv_lock(cv);
1154        /* check state, etc */
1155        cv_wait_and_unlock(cv);
1156        atomic_dec(&counter);
1157}
1158
1159void __test_cv_waiter_t3(uint32_t srcid, long a0, long a1, long a2)
1160{
1161        udelay(a0);
1162        /* if state == false, we haven't seen the signal yet */
1163        cv_lock(cv);
1164        while (!state) {
1165                cpu_relax();
1166                cv_wait(cv);    /* unlocks and relocks */
1167        }
1168        cv_unlock(cv);
1169        /* Make sure we are done, tell the controller we are done */
1170        cmb();
1171        assert(state);
1172        atomic_dec(&counter);
1173}
1174
1175// TODO: Add more assertions.
1176bool test_cv(void)
1177{
1178        int nr_msgs;
1179
1180        cv_init(cv);
1181        /* Test 0: signal without waiting */
1182        cv_broadcast(cv);
1183        cv_signal(cv);
1184        kthread_yield();
1185        printk("test_cv: signal without waiting complete\n");
1186
1187        /* Test 1: single / minimal shit */
1188        nr_msgs = num_cores - 1; /* not using cpu 0 */
1189        atomic_init(&counter, nr_msgs);
1190        for (int i = 1; i < num_cores; i++)
1191                send_kernel_message(i, __test_cv_waiter, 0, 0, 0, KMSG_ROUTINE);
1192        udelay(1000000);
1193        cv_signal(cv);
1194        kthread_yield();
1195        while (atomic_read(&counter) != nr_msgs - 1)
1196                cpu_relax();
1197        printk("test_cv: single signal complete\n");
1198        cv_broadcast(cv);
1199        /* broadcast probably woke up the waiters on our core.  since we want to
1200         * spin on their completion, we need to yield for a bit. */
1201        kthread_yield();
1202        while (atomic_read(&counter))
1203                cpu_relax();
1204        printk("test_cv: broadcast signal complete\n");
1205
1206        /* Test 2: shitloads of waiters and signalers */
1207        nr_msgs = 0x500;        /* any more than 0x20000 could go OOM */
1208        atomic_init(&counter, nr_msgs);
1209        for (int i = 0; i < nr_msgs; i++) {
1210                int cpu = (i % (num_cores - 1)) + 1;
1211                if (atomic_read(&counter) % 5)
1212                        send_kernel_message(cpu, __test_cv_waiter, 0, 0, 0,
1213                                            KMSG_ROUTINE);
1214                else
1215                        send_kernel_message(cpu, __test_cv_signal, 0, 0, 0, KMSG_ROUTINE);
1216        }
1217        kthread_yield();        /* run whatever messages we sent to ourselves */
1218        while (atomic_read(&counter)) {
1219                cpu_relax();
1220                cv_broadcast(cv);
1221                udelay(1000000);
1222                kthread_yield();/* run whatever messages we sent to ourselves */
1223        }
1224        KT_ASSERT(!cv->nr_waiters);
1225        printk("test_cv: massive message storm complete\n");
1226
1227        /* Test 3: basic one signaller, one receiver.  we want to vary the
1228         * amount of time the sender and receiver delays, starting with (1ms,
1229         * 0ms) and ending with (0ms, 1ms).  At each extreme, such as with the
1230         * sender waiting 1ms, the receiver/waiter should hit the "check and
1231         * wait" point well before the sender/signaller hits the "change state
1232         * and signal" point. */
1233        for (int i = 0; i < 1000; i++) {
1234                /* some extra chances at each point */
1235                for (int j = 0; j < 10; j++) {
1236                        state = FALSE;
1237                        /* signal that the client is done */
1238                        atomic_init(&counter, 1);
1239                        /* client waits for i usec */
1240                        send_kernel_message(2, __test_cv_waiter_t3, i, 0, 0,
1241                                            KMSG_ROUTINE);
1242                        cmb();
1243                        udelay(1000 - i);       /* senders wait time: 1000..0 */
1244                        state = TRUE;
1245                        cv_signal(cv);
1246                        /* signal might have unblocked a kthread, let it run */
1247                        kthread_yield();
1248                        /* they might not have run at all yet (in which case
1249                         * they lost the race and don't need the signal).  but
1250                         * we need to wait til they're done */
1251                        while (atomic_read(&counter))
1252                                cpu_relax();
1253                        KT_ASSERT(!cv->nr_waiters);
1254                }
1255        }
1256        printk("test_cv: single sender/receiver complete\n");
1257
1258        return true;
1259}
1260
1261/* Based on a bug I noticed.  TODO: actual memset test... */
1262bool test_memset(void)
1263{
1264        #define ARR_SZ 256
1265
1266        void print_array(char *c, size_t len)
1267        {
1268                for (int i = 0; i < len; i++)
1269                        printk("%04d: %02x\n", i, *c++);
1270        }
1271
1272        bool check_array(char *c, char x, size_t len)
1273        {
1274                for (int i = 0; i < len; i++) {
1275                        #define ASSRT_SIZE 64
1276                        char *assrt_msg = (char*) kmalloc(ASSRT_SIZE, 0);
1277                        snprintf(assrt_msg, ASSRT_SIZE,
1278                                 "Char %d is %c (%02x), should be %c (%02x)", i,
1279                                 *c, *c, x, x);
1280                        KT_ASSERT_M(assrt_msg, (*c == x));
1281                        c++;
1282                }
1283                return true;
1284        }
1285
1286        bool run_check(char *arr, int ch, size_t len)
1287        {
1288                char *c = arr;
1289                for (int i = 0; i < ARR_SZ; i++)
1290                        *c++ = 0x0;
1291                memset(arr, ch, len - 4);
1292                if (check_array(arr, ch, len - 4) &&
1293                    check_array(arr + len - 4, 0x0, 4)) {
1294                        return true;
1295                } else {
1296                        return false;
1297                }
1298        }
1299
1300        char bytes[ARR_SZ];
1301
1302        if (!run_check(bytes, 0xfe, 20) || !run_check(bytes, 0xc0fe, 20)) {
1303                return false;
1304        }
1305
1306        return true;
1307}
1308
1309void noinline __longjmp_wrapper(struct jmpbuf *jb)
1310{
1311        asm ("");
1312        printk("Starting: %s\n", __FUNCTION__);
1313        longjmp(jb, 1);
1314        // Should never get here
1315        printk("Exiting: %s\n", __FUNCTION__);
1316}
1317
1318// TODO: Add assertions.
1319bool test_setjmp(void)
1320{
1321        struct jmpbuf jb;
1322        printk("Starting: %s\n", __FUNCTION__);
1323        if (setjmp(&jb)) {
1324          printk("After second setjmp return: %s\n", __FUNCTION__);
1325        }
1326        else {
1327                printk("After first setjmp return: %s\n", __FUNCTION__);
1328                __longjmp_wrapper(&jb);
1329        }
1330        printk("Exiting: %s\n", __FUNCTION__);
1331
1332        return true;
1333}
1334
1335// TODO: add assertions.
1336bool test_apipe(void)
1337{
1338        static struct atomic_pipe test_pipe;
1339
1340        struct some_struct {
1341                long x;
1342                int y;
1343        };
1344        /* Don't go too big, or you'll run off the stack */
1345        #define MAX_BATCH 100
1346
1347        void __test_apipe_writer(uint32_t srcid, long a0, long a1, long a2)
1348        {
1349                int ret, count_todo;
1350                int total = 0;
1351                struct some_struct local_str[MAX_BATCH];
1352                for (int i = 0; i < MAX_BATCH; i++) {
1353                        local_str[i].x = 0xf00;
1354                        local_str[i].y = 0xba5;
1355                }
1356                /* testing 0, and max out at 50. [0, ... 50] */
1357                for (int i = 0; i < MAX_BATCH + 1; i++) {
1358                        count_todo = i;
1359                        while (count_todo) {
1360                                ret = apipe_write(&test_pipe, &local_str,
1361                                                  count_todo);
1362                                /* Shouldn't break, based on the loop counters
1363                                 */
1364                                if (!ret) {
1365                                        printk("Writer breaking with %d left\n",
1366                                               count_todo);
1367                                        break;
1368                                }
1369                                total += ret;
1370                                count_todo -= ret;
1371                        }
1372                }
1373                printk("Writer done, added %d elems\n", total);
1374                apipe_close_writer(&test_pipe);
1375        }
1376
1377        void __test_apipe_reader(uint32_t srcid, long a0, long a1, long a2)
1378        {
1379                int ret, count_todo;
1380                int total = 0;
1381                struct some_struct local_str[MAX_BATCH] = {{0}};
1382                /* reversed loop compared to the writer [50, ... 0] */
1383                for (int i = MAX_BATCH; i >= 0; i--) {
1384                        count_todo = i;
1385                        while (count_todo) {
1386                                ret = apipe_read(&test_pipe, &local_str,
1387                                                 count_todo);
1388                                if (!ret) {
1389                                        printk("Reader breaking with %d left\n",
1390                                               count_todo);
1391                                        break;
1392                                }
1393                                total += ret;
1394                                count_todo -= ret;
1395                        }
1396                }
1397                printk("Reader done, took %d elems\n", total);
1398                for (int i = 0; i < MAX_BATCH; i++) {
1399                        assert(local_str[i].x == 0xf00);
1400                        assert(local_str[i].y == 0xba5);
1401                }
1402                apipe_close_reader(&test_pipe);
1403        }
1404
1405        void *pipe_buf = kpage_alloc_addr();
1406        KT_ASSERT(pipe_buf);
1407        apipe_init(&test_pipe, pipe_buf, PGSIZE, sizeof(struct some_struct));
1408        printd("*ap_buf %p\n", test_pipe.ap_buf);
1409        printd("ap_ring_sz %p\n", test_pipe.ap_ring_sz);
1410        printd("ap_elem_sz %p\n", test_pipe.ap_elem_sz);
1411        printd("ap_rd_off %p\n", test_pipe.ap_rd_off);
1412        printd("ap_wr_off %p\n", test_pipe.ap_wr_off);
1413        printd("ap_nr_readers %p\n", test_pipe.ap_nr_readers);
1414        printd("ap_nr_writers %p\n", test_pipe.ap_nr_writers);
1415        send_kernel_message(0, __test_apipe_writer, 0, 0, 0, KMSG_ROUTINE);
1416        /* Once we start synchronizing with a kmsg / kthread that could be on a
1417         * different core, we run the chance of being migrated when we block. */
1418        __test_apipe_reader(0, 0, 0, 0);
1419        /* Wait til the first test is done */
1420        while (test_pipe.ap_nr_writers) {
1421                kthread_yield();
1422                cpu_relax();
1423        }
1424        /* Try cross core (though CV wake ups schedule on the waking core) */
1425        apipe_open_reader(&test_pipe);
1426        apipe_open_writer(&test_pipe);
1427        send_kernel_message(1, __test_apipe_writer, 0, 0, 0, KMSG_ROUTINE);
1428        __test_apipe_reader(0, 0, 0, 0);
1429        /* We could be on core 1 now.  If we were called from core0, our caller
1430         * might expect us to return while being on core 0 (like if we were
1431         * kfunc'd from the monitor.  Be careful if you copy this code. */
1432
1433        return true;
1434}
1435
1436static struct rwlock rwlock, *rwl = &rwlock;
1437static atomic_t rwlock_counter;
1438// TODO: Add assertions.
1439bool test_rwlock(void)
1440{
1441        bool ret;
1442        rwinit(rwl);
1443        /* Basic: can i lock twice, recursively? */
1444        rlock(rwl);
1445        ret = canrlock(rwl);
1446        KT_ASSERT(ret);
1447        runlock(rwl);
1448        runlock(rwl);
1449        /* Other simply tests */
1450        wlock(rwl);
1451        wunlock(rwl);
1452
1453        /* Just some half-assed different operations */
1454        void __test_rwlock(uint32_t srcid, long a0, long a1, long a2)
1455        {
1456                int rand = read_tsc() & 0xff;
1457                for (int i = 0; i < 10000; i++) {
1458                        switch ((rand * i) % 5) {
1459                        case 0:
1460                        case 1:
1461                                rlock(rwl);
1462                                runlock(rwl);
1463                                break;
1464                        case 2:
1465                        case 3:
1466                                if (canrlock(rwl))
1467                                        runlock(rwl);
1468                                break;
1469                        case 4:
1470                                wlock(rwl);
1471                                wunlock(rwl);
1472                                break;
1473                        }
1474                }
1475                /* signal to allow core 0 to finish */
1476                atomic_dec(&rwlock_counter);
1477        }
1478
1479        /* send 4 messages to each non core 0 */
1480        atomic_init(&rwlock_counter, (num_cores - 1) * 4);
1481        for (int i = 1; i < num_cores; i++)
1482                for (int j = 0; j < 4; j++)
1483                        send_kernel_message(i, __test_rwlock, 0, 0, 0,
1484                                            KMSG_ROUTINE);
1485        while (atomic_read(&rwlock_counter))
1486                cpu_relax();
1487        printk("rwlock test complete\n");
1488
1489        return true;
1490}
1491
1492/* Funcs and global vars for test_rv() */
1493static struct rendez local_rv;
1494static struct rendez *rv = &local_rv;
1495/* reusing state and counter from test_cv... */
1496
1497static int __rendez_cond(void *arg)
1498{
1499        return *(bool*)arg;
1500}
1501
1502void __test_rv_wakeup(uint32_t srcid, long a0, long a1, long a2)
1503{
1504        if (atomic_read(&counter) % 4)
1505                cv_signal(cv);
1506        else
1507                cv_broadcast(cv);
1508        atomic_dec(&counter);
1509}
1510
1511void __test_rv_sleeper(uint32_t srcid, long a0, long a1, long a2)
1512{
1513        rendez_sleep(rv, __rendez_cond, (void*)&state);
1514        atomic_dec(&counter);
1515}
1516
1517void __test_rv_sleeper_timeout(uint32_t srcid, long a0, long a1, long a2)
1518{
1519        /* half-assed amount of time. */
1520        rendez_sleep_timeout(rv, __rendez_cond, (void*)&state, a0);
1521        atomic_dec(&counter);
1522}
1523
1524// TODO: Add more assertions.
1525bool test_rv(void)
1526{
1527        int nr_msgs;
1528
1529        rendez_init(rv);
1530        /* Test 0: signal without waiting */
1531        rendez_wakeup(rv);
1532        kthread_yield();
1533        printk("test_rv: wakeup without sleeping complete\n");
1534
1535        /* Test 1: a few sleepers */
1536        nr_msgs = num_cores - 1; /* not using cpu 0 */
1537        atomic_init(&counter, nr_msgs);
1538        state = FALSE;
1539        for (int i = 1; i < num_cores; i++)
1540                send_kernel_message(i, __test_rv_sleeper, 0, 0, 0,
1541                                    KMSG_ROUTINE);
1542        udelay(1000000);
1543        cmb();
1544        state = TRUE;
1545        rendez_wakeup(rv);
1546        /* broadcast probably woke up the waiters on our core.  since we want to
1547         * spin on their completion, we need to yield for a bit. */
1548        kthread_yield();
1549        while (atomic_read(&counter))
1550                cpu_relax();
1551        printk("test_rv: bulk wakeup complete\n");
1552
1553        /* Test 2: different types of sleepers / timeouts */
1554        state = FALSE;
1555        nr_msgs = 0x500;        /* any more than 0x20000 could go OOM */
1556        atomic_init(&counter, nr_msgs);
1557        for (int i = 0; i < nr_msgs; i++) {
1558                int cpu = (i % (num_cores - 1)) + 1;
1559
1560                /* timeouts from 0ms ..5000ms (enough that they should wake via
1561                 * cond */
1562                if (atomic_read(&counter) % 5)
1563                        send_kernel_message(cpu, __test_rv_sleeper_timeout, i *
1564                                            4000, 0, 0, KMSG_ROUTINE);
1565                else
1566                        send_kernel_message(cpu, __test_rv_sleeper, 0, 0, 0,
1567                                            KMSG_ROUTINE);
1568        }
1569        kthread_yield();        /* run whatever messages we sent to ourselves */
1570        state = TRUE;
1571        while (atomic_read(&counter)) {
1572                cpu_relax();
1573                rendez_wakeup(rv);
1574                udelay(1000000);
1575                kthread_yield();/* run whatever messages we sent to ourselves */
1576        }
1577        KT_ASSERT(!rv->cv.nr_waiters);
1578        printk("test_rv: lots of sleepers/timeouts complete\n");
1579
1580        return true;
1581}
1582
1583/* Cheap test for the alarm internal management */
1584// TODO: Add assertions.
1585bool test_alarm(void)
1586{
1587        uint64_t now = tsc2usec(read_tsc());
1588        struct alarm_waiter await1, await2;
1589        struct timer_chain *tchain = &per_cpu_info[0].tchain;
1590        void shouldnt_run(struct alarm_waiter *awaiter)
1591        {
1592                printk("Crap, %p ran!\n", awaiter);
1593        }
1594        void empty_run(struct alarm_waiter *awaiter)
1595        {
1596                printk("Yay, %p ran (hopefully twice)!\n", awaiter);
1597        }
1598        /* Test basic insert, move, remove */
1599        init_awaiter(&await1, shouldnt_run);
1600        set_awaiter_abs(&await1, now + 1000000000);
1601        set_alarm(tchain, &await1);
1602        reset_alarm_abs(tchain, &await1, now + 1000000000 - 50);
1603        reset_alarm_abs(tchain, &await1, now + 1000000000 + 50);
1604        unset_alarm(tchain, &await1);
1605        /* Test insert of one that fired already */
1606        init_awaiter(&await2, empty_run);
1607        set_awaiter_rel(&await2, 1);
1608        set_alarm(tchain, &await2);
1609        enable_irq();
1610        udelay(1000);
1611        reset_alarm_abs(tchain, &await2, now + 10);
1612        udelay(1000);
1613        unset_alarm(tchain, &await2);
1614
1615        printk("%s complete\n", __FUNCTION__);
1616
1617        return true;
1618}
1619
1620bool test_kmalloc_incref(void)
1621{
1622        /* this test is a bit invasive of the kmalloc internals */
1623        void *__get_unaligned_orig_buf(void *buf)
1624        {
1625                int *tag_flags = (int*)(buf - sizeof(int));
1626                if ((*tag_flags & KMALLOC_FLAG_MASK) == KMALLOC_TAG_UNALIGN)
1627                        return (buf - (*tag_flags >> KMALLOC_ALIGN_SHIFT));
1628                else
1629                        return 0;
1630        }
1631
1632        bool test_buftag(void *b, struct kmalloc_tag *btag, char *str)
1633        {
1634                KT_ASSERT_M(str, kref_refcnt(&btag->kref) == 1);
1635                kmalloc_incref(b);
1636                KT_ASSERT_M(str, kref_refcnt(&btag->kref) == 2);
1637                kfree(b);
1638                KT_ASSERT_M(str, kref_refcnt(&btag->kref) == 1);
1639                kfree(b);
1640                /* dangerous read, it's been freed */
1641                KT_ASSERT_M(str, kref_refcnt(&btag->kref) == 0);
1642                return TRUE;
1643        }
1644
1645        void *b1, *b2, *b2o;
1646        struct kmalloc_tag *b1tag, *b2tag;
1647
1648        /* no realigned case */
1649        b1 = kmalloc(55, 0);
1650        KT_ASSERT(!__get_unaligned_orig_buf(b1));
1651        b1tag = (struct kmalloc_tag*)(b1 - sizeof(struct kmalloc_tag));
1652
1653        /* realigned case.  alloc'd before b1's test, so we know we get
1654         * different buffers. */
1655        b2 = kmalloc_align(55, 0, 64);
1656        b2o = __get_unaligned_orig_buf(b2);
1657        KT_ASSERT(b2o);
1658        b2tag = (struct kmalloc_tag*)(b2o - sizeof(struct kmalloc_tag));
1659
1660        test_buftag(b1, b1tag, "b1, no realign");
1661        test_buftag(b2, b2tag, "b2, realigned");
1662
1663        return TRUE;
1664}
1665
1666/* Some ghetto things:
1667 * - ASSERT_M only lets you have a string, not a format string.
1668 * - put doesn't return, so we have a "loud" test for that.  alternatively, we
1669 *   could have put panic, but then we couldn't test it at all.  and i don't
1670 *   particularly want it to have a return value.
1671 * - ASSERT_M just blindly returns.  we're leaking memory.
1672 */
1673bool test_u16pool(void)
1674{
1675        #define AMT 4096
1676        int *t;
1677        struct u16_pool *id = create_u16_pool(AMT);
1678        int i, x, y;
1679        int numalloc;
1680        KT_ASSERT(id);
1681
1682        t = kzmalloc(sizeof(int) * (AMT + 1), MEM_WAIT);
1683        for (x = 0; x < 1024; x++) {
1684                KT_ASSERT_M("Should be empty", id->tos == 0);
1685                for (i = 0; i < id->size; i++) {
1686                        int p = get_u16(id);
1687                        if (p < 0)
1688                                KT_ASSERT_M("Couldn't get enough", 0);
1689                        t[i] = p;
1690                }
1691                numalloc = i;
1692                // free them at random. With luck, we don't get too many
1693                // duplicate hits.
1694                for (y = i = 0; i < numalloc; y++) {
1695                        /* could read genrand, but that could be offline */
1696                        int f = (uint16_t)read_tsc() % numalloc;
1697                        if (!t[f])
1698                                continue;
1699                        put_u16(id, t[f]);
1700                        t[f] = 0;
1701                        i++;
1702                        /* that's long enough... */
1703                        if (y > 2 * id->size)
1704                                break;
1705                }
1706                /* grab the leftovers */
1707                for (i = 0; i < id->size; i++) {
1708                        if (!t[i])
1709                                continue;
1710                        put_u16(id, t[i]);
1711                        t[i] = 0;
1712                }
1713                /* all of our previous checks failed to give back 0 */
1714                put_u16(id, 0);
1715        }
1716
1717        // pop too many.
1718        bool we_broke = FALSE;
1719        for (i = 0; i < id->size * 2; i++) {
1720                x = get_u16(id);
1721                if (x == -1) {
1722                        we_broke = TRUE;
1723                        break;
1724                }
1725                t[i] = x;
1726        }
1727        KT_ASSERT_M("Should have failed to get too many", we_broke);
1728
1729        numalloc = i;
1730
1731        printd("Allocated %d items\n", numalloc);
1732        for (i = 0; i < numalloc; i++) {
1733                put_u16(id, t[i]);
1734                t[i] = 0;
1735        }
1736        KT_ASSERT_M("Should be empty", id->tos == 0);
1737
1738        printk("Ignore next BAD, testing bad alloc\n");
1739        put_u16(id, 25);        // should get an error.
1740        for (i = 0; i < id->size; i++) {
1741                int v = get_u16(id);
1742                if (t[v])
1743                        printd("BAD: %d pops twice!\n", v);
1744                KT_ASSERT_M("Popped twice!", t[v] == 0);
1745                t[v] = 1;
1746                //printk("%d,", v);
1747        }
1748
1749        for (i = 1; i < id->size; i++) {
1750                if (!t[i])
1751                        printd("BAD: %d was not set\n", i);
1752                KT_ASSERT_M("Wasn't set!", t[i]);
1753        }
1754
1755        kfree(t);
1756        return FALSE;
1757}
1758
1759static bool uaccess_mapped(void *addr, char *buf, char *buf2)
1760{
1761        KT_ASSERT_M(
1762                "Copy to user (u8) to mapped address should not fail",
1763                copy_to_user(addr, buf, 1) == 0);
1764        KT_ASSERT_M(
1765                "Copy to user (u16) to mapped address should not fail",
1766                copy_to_user(addr, buf, 2) == 0);
1767        KT_ASSERT_M(
1768                "Copy to user (u32) to mapped address should not fail",
1769                copy_to_user(addr, buf, 4) == 0);
1770        KT_ASSERT_M(
1771                "Copy to user (u64) to mapped address should not fail",
1772                copy_to_user(addr, buf, 8) == 0);
1773        KT_ASSERT_M(
1774                "Copy to user (mem) to mapped address should not fail",
1775                copy_to_user(addr, buf, sizeof(buf)) == 0);
1776
1777        KT_ASSERT_M(
1778                "Copy from user (u8) to mapped address should not fail",
1779                copy_from_user(buf, addr, 1) == 0);
1780        KT_ASSERT_M(
1781                "Copy from user (u16) to mapped address should not fail",
1782                copy_from_user(buf, addr, 2) == 0);
1783        KT_ASSERT_M(
1784                "Copy from user (u32) to mapped address should not fail",
1785                copy_from_user(buf, addr, 4) == 0);
1786        KT_ASSERT_M(
1787                "Copy from user (u64) to mapped address should not fail",
1788                copy_from_user(buf, addr, 8) == 0);
1789        KT_ASSERT_M(
1790                "Copy from user (mem) to mapped address should not fail",
1791                copy_from_user(buf, addr, sizeof(buf)) == 0);
1792
1793        KT_ASSERT_M(
1794                "String copy to user to mapped address should not fail",
1795                strcpy_to_user(current, addr, "Akaros") == 0);
1796        KT_ASSERT_M(
1797                "String copy from user to mapped address should not fail",
1798                strcpy_from_user(current, buf, addr) == 0);
1799        KT_ASSERT_M("The copied string content should be matching",
1800                                memcmp(buf, "Akaros", 7) == 0);
1801
1802        return TRUE;
1803}
1804
1805static bool uaccess_unmapped(void *addr, char *buf, char *buf2)
1806{
1807        KT_ASSERT_M("Copy to user (u8) to not mapped address should fail",
1808                    copy_to_user(addr, buf, 1) == -EFAULT);
1809        KT_ASSERT_M("Copy to user (u16) to not mapped address should fail",
1810                    copy_to_user(addr, buf, 2) == -EFAULT);
1811        KT_ASSERT_M("Copy to user (u32) to not mapped address should fail",
1812                    copy_to_user(addr, buf, 4) == -EFAULT);
1813        KT_ASSERT_M("Copy to user (u64) to not mapped address should fail",
1814                    copy_to_user(addr, buf, 8) == -EFAULT);
1815        KT_ASSERT_M("Copy to user (mem) to not mapped address should fail",
1816                    copy_to_user(addr, buf, sizeof(buf)) == -EFAULT);
1817
1818        KT_ASSERT_M("Copy from user (u8) to not mapped address should fail",
1819                    copy_from_user(buf, addr, 1) == -EFAULT);
1820        KT_ASSERT_M("Copy from user (u16) to not mapped address should fail",
1821                    copy_from_user(buf, addr, 2) == -EFAULT);
1822        KT_ASSERT_M("Copy from user (u32) to not mapped address should fail",
1823                    copy_from_user(buf, addr, 4) == -EFAULT);
1824        KT_ASSERT_M("Copy from user (u64) to not mapped address should fail",
1825                    copy_from_user(buf, addr, 8) == -EFAULT);
1826        KT_ASSERT_M("Copy from user (mem) to not mapped address should fail",
1827                    copy_from_user(buf, addr, sizeof(buf)) == -EFAULT);
1828
1829        KT_ASSERT_M("String copy to user to not mapped address should fail",
1830                    strcpy_to_user(NULL, addr, "Akaros") == -EFAULT);
1831        KT_ASSERT_M("String copy from user to not mapped address should fail",
1832                    strcpy_from_user(NULL, buf, addr) == -EFAULT);
1833
1834        KT_ASSERT_M("Copy from user with kernel side source pointer should fail",
1835                    copy_from_user(buf, buf2, sizeof(buf)) == -EFAULT);
1836        KT_ASSERT_M("Copy to user with kernel side source pointer should fail",
1837                    copy_to_user(buf, buf2, sizeof(buf)) == -EFAULT);
1838
1839        return TRUE;
1840}
1841
1842bool test_uaccess(void)
1843{
1844        char buf[128] = { 0 };
1845        char buf2[128] = { 0 };
1846        struct proc *tmp;
1847        uintptr_t switch_tmp;
1848        int err;
1849        static const size_t mmap_size = 4096;
1850        void *addr;
1851        bool passed = FALSE;
1852
1853        err = proc_alloc(&tmp, 0, 0);
1854        KT_ASSERT_M("Failed to alloc a temp proc", err == 0);
1855        /* Tell everyone we're ready in case some ops don't work on PROC_CREATED
1856         */
1857        __proc_set_state(tmp, PROC_RUNNABLE_S);
1858        switch_tmp = switch_to(tmp);
1859        addr = mmap(tmp, 0, mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, -1,
1860                    0);
1861        if (addr == MAP_FAILED)
1862                goto out;
1863        passed = uaccess_mapped(addr, buf, buf2);
1864        munmap(tmp, (uintptr_t) addr, mmap_size);
1865        if (!passed)
1866                goto out;
1867        passed = uaccess_unmapped(addr, buf, buf2);
1868out:
1869        switch_back(tmp, switch_tmp);
1870        proc_decref(tmp);
1871        return passed;
1872}
1873
1874bool test_sort(void)
1875{
1876        int cmp_longs_asc(const void *p1, const void *p2)
1877        {
1878                const long v1 = *(const long *) p1;
1879                const long v2 = *(const long *) p2;
1880
1881                return v1 < v2 ? -1 : (v1 > v2 ? 1 : 0);
1882        }
1883
1884        int cmp_longs_desc(const void *p1, const void *p2)
1885        {
1886                const long v1 = *(const long *) p1;
1887                const long v2 = *(const long *) p2;
1888
1889                return v1 < v2 ? 1 : (v1 > v2 ? -1 : 0);
1890        }
1891
1892        size_t i;
1893        long long_set_1[] = {
1894                -9, 11, 0, 23, 123, -99, 3, 11, 23, -999, 872, 17, 21
1895        };
1896        long long_set_2[] = {
1897                31, 77, -1, 2, 0, 64, 11, 19, 69, 111, -89, 17, 21, 44, 77
1898        };
1899
1900        sort(long_set_1, ARRAY_SIZE(long_set_1), sizeof(long), cmp_longs_asc);
1901        for (i = 1; i < ARRAY_SIZE(long_set_1); i++)
1902                KT_ASSERT(long_set_1[i - 1] <= long_set_1[i]);
1903
1904        sort(long_set_2, ARRAY_SIZE(long_set_2), sizeof(long), cmp_longs_desc);
1905        for (i = 1; i < ARRAY_SIZE(long_set_2); i++)
1906                KT_ASSERT(long_set_2[i - 1] >= long_set_2[i]);
1907
1908        return TRUE;
1909}
1910
1911bool test_cmdline_parse(void)
1912{
1913        static const char *fake_cmdline =
1914                "kernel -root=/foo -simple -num=123 -quoted='abc \\'' -dup=311 "
1915                "-dup='akaros' -empty='' -inner=-outer -outer=-inner=xyz";
1916        const char *opt;
1917        char param[128];
1918
1919        /* Note that the get_boot_option() API should be passed NULL the first
1920         * time it is called, in normal cases, and should be passed the value
1921         * returned by previous call to get_boot_option(), in case multiple
1922         * options with same name have to be fetched.  */
1923        opt = get_boot_option(fake_cmdline, "-root", param, sizeof(param));
1924        KT_ASSERT_M("Unable to parse -root option", opt);
1925        KT_ASSERT_M("Invalid -root option value", strcmp(param, "/foo") == 0);
1926
1927        opt = get_boot_option(fake_cmdline, "-root", NULL, 0);
1928        KT_ASSERT_M("Unable to parse -root option when param not provided",
1929                    opt);
1930
1931        opt = get_boot_option(fake_cmdline, "-simple", param, sizeof(param));
1932        KT_ASSERT_M("Unable to parse -simple option", opt);
1933        KT_ASSERT_M("Invalid -simple option value", strcmp(param, "") == 0);
1934
1935        opt = get_boot_option(fake_cmdline, "-num", param, sizeof(param));
1936        KT_ASSERT_M("Unable to parse -num option", opt);
1937        KT_ASSERT_M("Invalid -num option value", strcmp(param, "123") == 0);
1938
1939        opt = get_boot_option(fake_cmdline, "-quoted", param, sizeof(param));
1940        KT_ASSERT_M("Unable to parse -quoted option", opt);
1941        KT_ASSERT_M("Invalid -quoted option value", strcmp(param, "abc '") ==
1942                    0);
1943
1944        opt = get_boot_option(fake_cmdline, "-dup", param, sizeof(param));
1945        KT_ASSERT_M("Unable to parse -dup option", opt);
1946        KT_ASSERT_M("Invalid -dup option first value", strcmp(param, "311") ==
1947                    0);
1948
1949        opt = get_boot_option(opt, "-dup", param, sizeof(param));
1950        KT_ASSERT_M("Unable to parse -dup option", opt);
1951        KT_ASSERT_M("Invalid -dup option second value",
1952                                strcmp(param, "akaros") == 0);
1953
1954        opt = get_boot_option(fake_cmdline, "-inner", param, sizeof(param));
1955        KT_ASSERT_M("Unable to parse -inner option", opt);
1956        KT_ASSERT_M("Invalid -inner option value", strcmp(param, "-outer") ==
1957                    0);
1958
1959        opt = get_boot_option(opt, "-inner", param, sizeof(param));
1960        KT_ASSERT_M("Should not be parsing -inner as value", !opt);
1961
1962        opt = get_boot_option(fake_cmdline, "-outer", param, sizeof(param));
1963        KT_ASSERT_M("Unable to parse -outer option", opt);
1964        KT_ASSERT_M("Invalid -outer option value",
1965                                strcmp(param, "-inner=xyz") == 0);
1966
1967        opt = get_boot_option(fake_cmdline, "-missing", param, sizeof(param));
1968        KT_ASSERT_M("Should not be parsing -missing option", !opt);
1969
1970        opt = get_boot_option(fake_cmdline, "-inne", NULL, 0);
1971        KT_ASSERT_M("Should not be parsing -inne option", !opt);
1972
1973        opt = get_boot_option(fake_cmdline, "-outera", NULL, 0);
1974        KT_ASSERT_M("Should not be parsing -outera option", !opt);
1975
1976        opt = get_boot_option(fake_cmdline, "-empty", param, sizeof(param));
1977        KT_ASSERT_M("Unable to parse -empty option", opt);
1978        KT_ASSERT_M("Invalid -empty option value", strcmp(param, "") == 0);
1979
1980        return TRUE;
1981}
1982
1983static bool __pcpu_ptr_is_dyn(void *ptr)
1984{
1985        char *p_c = ptr;
1986
1987        return (PERCPU_STOP_VAR <= p_c) &&
1988               (p_c < PERCPU_STOP_VAR + PERCPU_DYN_SIZE);
1989}
1990
1991static bool test_percpu_zalloc(void)
1992{
1993        uint8_t *u8 = percpu_zalloc(*u8, MEM_WAIT);
1994        uint64_t *u64 = percpu_zalloc(uint64_t, MEM_WAIT);
1995        uint32_t *u32 = percpu_zalloc(uint32_t, MEM_WAIT);
1996        uint64_t *old_u64;
1997
1998        KT_ASSERT(__pcpu_ptr_is_dyn(u8));
1999        KT_ASSERT(__pcpu_ptr_is_dyn(u64));
2000        KT_ASSERT(__pcpu_ptr_is_dyn(u32));
2001
2002        /* The order here is a bit hokey too - the first alloc is usually 16
2003         * byte aligned, so if we did a packed alloc, the u64 wouldn't be
2004         * aligned. */
2005        KT_ASSERT(ALIGNED(u8, __alignof__(*u8)));
2006        KT_ASSERT(ALIGNED(u64, __alignof__(*u64)));
2007        KT_ASSERT(ALIGNED(u32, __alignof__(*u32)));
2008
2009        /* Testing zalloc.  Though the first alloc ever is likely to be zero. */
2010        for_each_core(i)
2011                KT_ASSERT(_PERCPU_VAR(*u64, i) == 0);
2012        for_each_core(i)
2013                _PERCPU_VAR(*u64, i) = i;
2014        for_each_core(i)
2015                KT_ASSERT(_PERCPU_VAR(*u64, i) == i);
2016        /* If we free and realloc, we're likely to get the same one.  This is
2017         * due to the ARENA_BESTFIT policy with xalloc. */
2018        old_u64 = u64;
2019        percpu_free(u64);
2020        u64 = percpu_zalloc(uint64_t, MEM_WAIT);
2021        /* If this trips, then we didn't test this as well as we'd like. */
2022        warn_on(u64 != old_u64);
2023        for_each_core(i)
2024                KT_ASSERT(_PERCPU_VAR(*u64, i) == 0);
2025
2026        /* Yes, if an assert failed, we leak memory. */
2027        percpu_free(u8);
2028        percpu_free(u64);
2029        percpu_free(u32);
2030        return true;
2031}
2032
2033static void __inc_foo(uint32_t srcid, long a0, long a1, long a2)
2034{
2035        uint64_t *foos = (uint64_t*)a0;
2036        atomic_t *check_in_p = (atomic_t*)a1;
2037
2038        for (int i = 0; i < core_id() + 1; i++)
2039                PERCPU_VAR(*foos)++;
2040        cmb();
2041        atomic_dec(check_in_p);
2042}
2043
2044static bool test_percpu_increment(void)
2045{
2046        uint64_t *foos = percpu_zalloc(uint64_t, MEM_WAIT);
2047        atomic_t check_in;
2048
2049        atomic_set(&check_in, num_cores);
2050        for_each_core(i)
2051                send_kernel_message(i, __inc_foo, (long)foos, (long)&check_in,
2052                                    0, KMSG_IMMEDIATE);
2053        while (atomic_read(&check_in))
2054                cpu_relax();
2055        for_each_core(i)
2056                KT_ASSERT(_PERCPU_VAR(*foos, i) == i + 1);
2057        /* Yes, if an assert failed, we leak memory. */
2058        percpu_free(foos);
2059        return true;
2060}
2061
2062static struct ktest ktests[] = {
2063#ifdef CONFIG_X86
2064        KTEST_REG(ipi_sending,        CONFIG_TEST_ipi_sending),
2065        KTEST_REG(pic_reception,      CONFIG_TEST_pic_reception),
2066        KTEST_REG(lapic_status_bit,   CONFIG_TEST_lapic_status_bit),
2067        KTEST_REG(pit,                CONFIG_TEST_pit),
2068        KTEST_REG(circ_buffer,        CONFIG_TEST_circ_buffer),
2069        KTEST_REG(kernel_messages,    CONFIG_TEST_kernel_messages),
2070#endif // CONFIG_X86
2071        KTEST_REG(barrier,            CONFIG_TEST_barrier),
2072        KTEST_REG(interrupts_irqsave, CONFIG_TEST_interrupts_irqsave),
2073        KTEST_REG(bitmasks,           CONFIG_TEST_bitmasks),
2074        KTEST_REG(checklists,         CONFIG_TEST_checklists),
2075        KTEST_REG(smp_call_functions, CONFIG_TEST_smp_call_functions),
2076        KTEST_REG(hashtable,          CONFIG_TEST_hashtable),
2077        KTEST_REG(circular_buffer,    CONFIG_TEST_circular_buffer),
2078        KTEST_REG(bcq,                CONFIG_TEST_bcq),
2079        KTEST_REG(ucq,                CONFIG_TEST_ucq),
2080        KTEST_REG(kthreads,           CONFIG_TEST_kthreads),
2081        KTEST_REG(kref,               CONFIG_TEST_kref),
2082        KTEST_REG(atomics,            CONFIG_TEST_atomics),
2083        KTEST_REG(abort_halt,         CONFIG_TEST_abort_halt),
2084        KTEST_REG(cv,                 CONFIG_TEST_cv),
2085        KTEST_REG(memset,             CONFIG_TEST_memset),
2086        KTEST_REG(setjmp,             CONFIG_TEST_setjmp),
2087        KTEST_REG(apipe,              CONFIG_TEST_apipe),
2088        KTEST_REG(rwlock,             CONFIG_TEST_rwlock),
2089        KTEST_REG(rv,                 CONFIG_TEST_rv),
2090        KTEST_REG(alarm,              CONFIG_TEST_alarm),
2091        KTEST_REG(kmalloc_incref,     CONFIG_TEST_kmalloc_incref),
2092        KTEST_REG(u16pool,            CONFIG_TEST_u16pool),
2093        KTEST_REG(uaccess,            CONFIG_TEST_uaccess),
2094        KTEST_REG(sort,               CONFIG_TEST_sort),
2095        KTEST_REG(cmdline_parse,      CONFIG_TEST_cmdline_parse),
2096        KTEST_REG(percpu_zalloc,      CONFIG_TEST_percpu_zalloc),
2097        KTEST_REG(percpu_increment,   CONFIG_TEST_percpu_increment),
2098};
2099static int num_ktests = sizeof(ktests) / sizeof(struct ktest);
2100linker_func_1(register_pb_ktests)
2101{
2102        REGISTER_KTESTS(ktests, num_ktests);
2103}
2104
2105/* Linker function tests.  Keep them commented, etc. */
2106#if 0
2107linker_func_1(xme11)
2108{
2109        printk("xme11\n");
2110}
2111
2112linker_func_1(xme12)
2113{
2114        printk("xme12\n");
2115}
2116
2117linker_func_1(xme13)
2118{
2119        printk("xme13\n");
2120}
2121
2122linker_func_1(xme14)
2123{
2124        printk("xme14\n");
2125}
2126
2127linker_func_1(xme15)
2128{
2129        printk("xme15\n");
2130}
2131
2132linker_func_2(xme21)
2133{
2134        printk("xme21\n");
2135}
2136
2137linker_func_2(xme22)
2138{
2139        printk("xme22\n");
2140}
2141
2142linker_func_2(xme23)
2143{
2144        printk("xme23\n");
2145}
2146
2147linker_func_2(xme24)
2148{
2149        printk("xme24\n");
2150}
2151
2152linker_func_2(xme25)
2153{
2154        printk("xme25\n");
2155}
2156
2157linker_func_3(xme31)
2158{
2159        printk("xme31\n");
2160}
2161
2162linker_func_3(xme32)
2163{
2164        printk("xme32\n");
2165}
2166
2167linker_func_3(xme33)
2168{
2169        printk("xme33\n");
2170}
2171
2172linker_func_3(xme34)
2173{
2174        printk("xme34\n");
2175}
2176
2177linker_func_3(xme35)
2178{
2179        printk("xme35\n");
2180}
2181
2182linker_func_4(xme41)
2183{
2184        printk("xme41\n");
2185}
2186
2187linker_func_4(xme42)
2188{
2189        printk("xme42\n");
2190}
2191
2192linker_func_4(xme43)
2193{
2194        printk("xme43\n");
2195}
2196
2197linker_func_4(xme44)
2198{
2199        printk("xme44\n");
2200}
2201
2202linker_func_4(xme45)
2203{
2204        printk("xme45\n");
2205}
2206#endif /* linker func tests */
2207