2 #include <ros/memlayout.h>
10 #define MAX_KERNBASE_SIZE (KERN_VMAP_TOP - KERNBASE)
12 uint32_t num_cpus = 1; // this must not be in BSS
15 mem_size(uint64_t sz_mb)
17 uint64_t sz = (uint64_t)sz_mb * 1024 * 1024;
18 return MIN(sz, MIN(MAX_KERNBASE_SIZE, (uint64_t)L1PGSIZE * NPTENTRIES));
21 void pagetable_init(uint32_t memsize_mb, pte_t* l1pt, pte_t* l1pt_boot,
24 static_assert(KERNBASE % L1PGSIZE == 0);
25 // The boot L1 PT retains the identity mapping [0,memsize-1],
26 // whereas the post-boot L1 PT does not.
27 uint64_t memsize = mem_size(memsize_mb);
28 for(uint64_t pa = 0; pa < memsize+L1PGSIZE-1; pa += L1PGSIZE)
30 pte_t pte = PTE(LA2PPN(pa), PTE_KERN_RW | PTE_E);
32 l1pt_boot[L1X(pa)] = pte; // identity mapping
33 l1pt_boot[L1X(KERNBASE+pa)] = pte; // KERNBASE mapping
34 l1pt[L1X(KERNBASE+pa)] = pte; // KERNBASE mapping
38 // The kernel code and static data actually are usually not accessed
39 // via the KERNBASE mapping, but rather by an aliased "load" mapping in
40 // the upper 2GB (0xFFFFFFFF80000000 and up).
41 // This simplifies the linking model by making all static addresses
42 // representable in 32 bits.
43 static_assert(L1X(KERN_LOAD_ADDR) > L1X(KERNBASE));
44 static_assert(KERN_LOAD_ADDR % L2PGSIZE == 0);
45 static_assert((uintptr_t)(-KERN_LOAD_ADDR) <= L1PGSIZE);
47 l1pt[L1X(KERN_LOAD_ADDR)] = PTD(l2pt);
48 l1pt_boot[L1X(KERN_LOAD_ADDR)] = PTD(l2pt);
50 for (uintptr_t pa = 0; pa < (uintptr_t)(-KERN_LOAD_ADDR); pa += L2PGSIZE)
51 l2pt[L2X(KERN_LOAD_ADDR+pa)] = PTE(LA2PPN(pa), PTE_KERN_RW | PTE_E);
53 (void) l2pt; // don't need this for rv32
58 cmain(uint32_t memsize_mb, uint32_t num_cores)
61 memset(&mbi, 0, sizeof(mbi));
62 mbi.flags = 0x00000001;
63 mbi.mem_lower = mem_size(memsize_mb) / 1024;
67 extern void kernel_init(multiboot_info_t *mboot_info);
68 // kernel_init expects a pre-relocation mbi address
69 kernel_init((multiboot_info_t*)PADDR(&mbi));