akaros/kern/include/pmap.h
<<
>>
Prefs
   1/* See COPYRIGHT for copyright information.
   2 * Inlines, macros, and most function prototypes (c) the JOS project.
   3 *
   4 * Actual implementation:
   5 * Copyright (c) 2009 The Regents of the University of California
   6 * Barret Rhoden <brho@cs.berkeley.edu>
   7 * See LICENSE for details.
   8 *
   9 * Physical memory mangement, low-level virtual address space initialization and
  10 * management, and other things related to virtual->physical mappings.
  11 */
  12
  13#pragma once
  14
  15#include <ros/memlayout.h>
  16#include <sys/queue.h>
  17#include <multiboot.h>
  18#include <atomic.h>
  19#include <process.h>
  20#include <assert.h>
  21#include <page_alloc.h>
  22#include <multiboot.h>
  23#include <arch/pmap.h>
  24
  25/* This macro takes a kernel virtual address -- an address that points above
  26 * KERNBASE, where the machine's maximum 256MB of physical memory is mapped --
  27 * and returns the corresponding physical address.  It panics if you pass it a
  28 * non-kernel virtual address.
  29 */
  30#define PADDR(kva)                                              \
  31({                                                              \
  32        physaddr_t __m_pa, __m_kva = (physaddr_t) (kva);        \
  33        if (__m_kva < KERNBASE)                                 \
  34                panic("PADDR called with invalid kva %p", __m_kva);\
  35        if(__m_kva >= KERN_LOAD_ADDR)                           \
  36                __m_pa = __m_kva - KERN_LOAD_ADDR;              \
  37        else                                                    \
  38                __m_pa = __m_kva - KERNBASE;                    \
  39        __m_pa;                                                 \
  40})
  41
  42#define paddr_low32(p) ((uint32_t)(uintptr_t)PADDR(p))
  43#define paddr_high32(p) ((uint32_t)((uint64_t)PADDR(p) >> 32))
  44
  45/* This macro takes a physical address and returns the corresponding kernel
  46 * virtual address.  It warns if you pass an invalid physical address. */
  47#define KADDR(pa)                                               \
  48({                                                              \
  49        physaddr_t __m_pa = (pa);                               \
  50        size_t __m_ppn = LA2PPN(__m_pa);                        \
  51        if (__m_ppn > max_nr_pages)                             \
  52                warn("KADDR called with invalid pa %p", __m_pa);\
  53        (void*) (__m_pa + KERNBASE);                            \
  54})
  55
  56#define KADDR_NOCHECK(pa) ((void*)(pa + KERNBASE))
  57#define KBASEADDR(kla) KADDR(PADDR(kla))
  58
  59extern physaddr_t max_pmem;     /* Total amount of physical memory */
  60extern size_t max_nr_pages;     /* Total number of physical memory pages */
  61extern physaddr_t max_paddr;    /* Maximum addressable physical address */
  62extern size_t nr_free_pages;
  63extern struct multiboot_info *multiboot_kaddr;
  64extern uintptr_t boot_freemem;
  65extern uintptr_t boot_freelimit;
  66
  67/* Pages are stored in an array, including for pages that we can never touch
  68 * (like reserved memory from the BIOS, fake regions, etc).  Pages are reference
  69 * counted, and free pages are kept on a linked list. */
  70extern struct page *pages;
  71
  72extern physaddr_t boot_cr3;
  73extern pgdir_t boot_pgdir;
  74
  75bool enable_pse(void);
  76void vm_init(void);
  77
  78void pmem_init(struct multiboot_info *mbi);
  79void *boot_alloc(size_t amt, size_t align);
  80void *boot_zalloc(size_t amt, size_t align);
  81
  82void page_check(void);
  83int      page_insert(pgdir_t pgdir, struct page *page, void *va,
  84                        int perm);
  85void page_remove(pgdir_t pgdir, void *va);
  86page_t* page_lookup(pgdir_t pgdir, void *va, pte_t *pte_store);
  87error_t pagetable_remove(pgdir_t pgdir, void *va);
  88void    page_decref(page_t *pp);
  89
  90void    tlb_invalidate(pgdir_t pgdir, void *ga);
  91void tlb_flush_global(void);
  92void tlb_shootdown_global(void);
  93bool regions_collide_unsafe(uintptr_t start1, uintptr_t end1,
  94                            uintptr_t start2, uintptr_t end2);
  95
  96/* Arch specific implementations for these */
  97void map_segment(pgdir_t pgdir, uintptr_t va, size_t size, physaddr_t pa,
  98                 int perm, int pml_shift);
  99int unmap_segment(pgdir_t pgdir, uintptr_t va, size_t size);
 100pte_t pgdir_walk(pgdir_t pgdir, const void *va, int create);
 101int get_va_perms(pgdir_t pgdir, const void *va);
 102int arch_pgdir_setup(pgdir_t boot_copy, pgdir_t *new_pd);
 103physaddr_t arch_pgdir_get_cr3(pgdir_t pd);
 104void arch_pgdir_clear(pgdir_t *pd);
 105int arch_max_jumbo_page_shift(void);
 106void arch_add_intermediate_pts(pgdir_t pgdir, uintptr_t va, size_t len);
 107
 108static inline page_t *ppn2page(size_t ppn)
 109{
 110        if (ppn >= max_nr_pages)
 111                warn("%s called with ppn (%p) larger than max_nr_pages (%p)",
 112                     __func__, ppn, max_nr_pages);
 113        return &(pages[ppn]);
 114}
 115
 116static inline ppn_t page2ppn(page_t *pp)
 117{
 118        return pp - pages;
 119}
 120
 121static inline physaddr_t page2pa(page_t *pp)
 122{
 123        return page2ppn(pp) << PGSHIFT;
 124}
 125
 126static inline page_t *pa2page(physaddr_t pa)
 127{
 128        if (LA2PPN(pa) >= max_nr_pages)
 129                warn("%s called with pa (%p) larger than max_nr_pages (%p)",
 130                     __func__, pa, max_nr_pages);
 131        return &pages[LA2PPN(pa)];
 132}
 133
 134static inline ppn_t pa2ppn(physaddr_t pa)
 135{
 136        return pa >> PGSHIFT;
 137}
 138
 139static inline void *page2kva(page_t *pp)
 140{
 141        return KADDR(page2pa(pp));
 142}
 143
 144static inline void *ppn2kva(size_t pp)
 145{
 146        return page2kva(ppn2page(pp));
 147}
 148
 149static inline page_t* kva2page(void* addr)
 150{
 151        return pa2page(PADDR(addr));
 152}
 153
 154static inline ppn_t kva2ppn(void* addr)
 155{
 156        return page2ppn(kva2page(addr));
 157}
 158
 159static inline bool is_kaddr(void *addr)
 160{
 161        return (uintptr_t)addr >= KERNBASE;
 162}
 163
 164static inline unsigned long nr_pages(size_t nr_bytes)
 165{
 166        return (nr_bytes >> PGSHIFT) + (PGOFF(nr_bytes) ? 1 : 0);
 167}
 168
 169/* Including here, since these ops often rely on pmap.h helpers, which rely on
 170 * the generic arch/pmap.h.  It's likely that many of these ops will be inlined
 171 * for speed in pmap_ops. */
 172#include <arch/pmap_ops.h>
 173