-/* See COPYRIGHT for copyright information. */
+/* See COPYRIGHT for copyright information.
+ * Inlines, macros, and most function prototypes (c) the JOS project.
+ *
+ * Actual implementation:
+ * Copyright (c) 2009 The Regents of the University of California
+ * Barret Rhoden <brho@cs.berkeley.edu>
+ * See LICENSE for details.
+ *
+ * Physical memory mangement, low-level virtual address space initialization and
+ * management, and other things related to virtual->physical mappings.
+ */
-#ifndef ROS_KERN_PMAP_H
-#define ROS_KERN_PMAP_H
-#ifndef ROS_KERNEL
-# error "This is a ROS kernel header; user programs should not #include it"
-#endif
+#pragma once
#include <ros/memlayout.h>
+#include <sys/queue.h>
#include <multiboot.h>
#include <atomic.h>
#include <process.h>
#include <assert.h>
-#include <sys/queue.h>
+#include <page_alloc.h>
+#include <multiboot.h>
+#include <arch/pmap.h>
/* This macro takes a kernel virtual address -- an address that points above
* KERNBASE, where the machine's maximum 256MB of physical memory is mapped --
*/
#define PADDR(kva) \
({ \
- physaddr_t __m_kva = (physaddr_t) (kva); \
+ physaddr_t __m_pa, __m_kva = (physaddr_t) (kva); \
if (__m_kva < KERNBASE) \
- panic("PADDR called with invalid kva %08lx", __m_kva);\
- __m_kva - KERNBASE; \
+ panic("PADDR called with invalid kva %p", __m_kva);\
+ if(__m_kva >= KERN_LOAD_ADDR) \
+ __m_pa = __m_kva - KERN_LOAD_ADDR; \
+ else \
+ __m_pa = __m_kva - KERNBASE; \
+ __m_pa; \
})
+#define paddr_low32(p) ((uint32_t)(uintptr_t)PADDR(p))
+#define paddr_high32(p) ((uint32_t)((uint64_t)PADDR(p) >> 32))
+
/* This macro takes a physical address and returns the corresponding kernel
* virtual address. It warns if you pass an invalid physical address. */
#define KADDR(pa) \
({ \
physaddr_t __m_pa = (pa); \
- size_t __m_ppn = PPN(__m_pa); \
- if (__m_ppn >= npage) \
- warn("KADDR called with invalid pa %08lx", __m_pa);\
- (void*TRUSTED) (__m_pa + KERNBASE); \
+ size_t __m_ppn = LA2PPN(__m_pa); \
+ if (__m_ppn > max_nr_pages) \
+ warn("KADDR called with invalid pa %p", __m_pa);\
+ (void*) (__m_pa + KERNBASE); \
})
-/*
- * Page descriptor structures, mapped at UPAGES.
- * Read/write to the kernel, read-only to user programs.
- *
- * Each Page describes one physical page.
- * You can map a Page * to the corresponding physical address
- * with page2pa() in kern/pmap.h.
- */
-
-struct Page;
-typedef struct Page page_t;
-
-LIST_HEAD(page_list, Page);
-typedef struct page_list page_list_t;
-typedef LIST_ENTRY(Page) page_list_entry_t;
+#define KADDR_NOCHECK(pa) ((void*)(pa + KERNBASE))
+#define KBASEADDR(kla) KADDR(PADDR(kla))
-struct Page {
- page_list_entry_t pp_link; /* free list link */
- size_t num_cons_links;
+extern char bootstacktop[], bootstack[];
- // pp_ref is the count of pointers (usually in page table entries)
- // to this page, for pages allocated using page_alloc.
- // Pages allocated at boot time using pmap.c's
- // boot_alloc do not have valid reference count fields.
+extern physaddr_t max_pmem; /* Total amount of physical memory */
+extern size_t max_nr_pages; /* Total number of physical memory pages */
+extern physaddr_t max_paddr; /* Maximum addressable physical address */
+extern size_t nr_free_pages;
+extern struct multiboot_info *multiboot_kaddr;
+extern uintptr_t boot_freemem;
+extern uintptr_t boot_freelimit;
- uint16_t pp_ref;
-};
+/* Pages are stored in an array, including for pages that we can never touch
+ * (like reserved memory from the BIOS, fake regions, etc). Pages are reference
+ * counted, and free pages are kept on a linked list. */
+extern struct page *pages;
+extern physaddr_t boot_cr3;
+extern pgdir_t boot_pgdir;
-extern char (SNT bootstacktop)[], (SNT bootstack)[];
+bool enable_pse(void);
+void vm_init(void);
-extern page_t *COUNT(npage) pages;
-extern size_t npage;
+void pmem_init(struct multiboot_info *mbi);
+void *boot_alloc(size_t amt, size_t align);
+void *boot_zalloc(size_t amt, size_t align);
-extern physaddr_t boot_cr3;
-extern pde_t *COUNT(NPDENTRIES) boot_pgdir;
-
-extern char* boot_freemem;
-extern page_list_t page_free_list;
-
-void* (DALLOC(n) boot_alloc)(uint32_t n, uint32_t align);
-void* (DALLOC(_n*sz) boot_calloc)(uint32_t _n, size_t sz, uint32_t align);
-
-void multiboot_detect_memory(multiboot_info_t *mbi);
-void multiboot_print_memory_map(multiboot_info_t *mbi);
-bool enable_pse(void);
-void vm_init(void);
-
-void page_init(void);
-void page_check(void);
-int page_alloc(page_t **pp_store);
-int page_alloc_specific(page_t **pp_store, size_t ppn);
-void page_free(page_t *pp);
-int page_is_free(size_t ppn);
-int page_insert(pde_t *COUNT(NPDENTRIES) pgdir, page_t *pp, void *SNT va, int perm);
-void* page_insert_in_range(pde_t *COUNT(NPDENTRIES) pgdir, page_t *pp,
- void *SNT vab, void *SNT vae, int perm);
-void page_remove(pde_t *COUNT(NPDENTRIES) pgdir, void *SNT va);
-page_t* page_lookup(pde_t *COUNT(NPDENTRIES) pgdir, void *SNT va, pte_t **pte_store);
-error_t pagetable_remove(pde_t *COUNT(NPDENTRIES) pgdir, void *va);
+void page_check(void);
+int page_insert(pgdir_t pgdir, struct page *page, void *va,
+ int perm);
+void page_remove(pgdir_t pgdir, void *va);
+page_t* page_lookup(pgdir_t pgdir, void *va, pte_t *pte_store);
+error_t pagetable_remove(pgdir_t pgdir, void *va);
void page_decref(page_t *pp);
-void setup_default_mtrrs(barrier_t* smp_barrier);
-void tlb_invalidate(pde_t *COUNT(NPDENTRIES) pgdir, void *va);
+void tlb_invalidate(pgdir_t pgdir, void *ga);
void tlb_flush_global(void);
-
-void *COUNT(len)
-user_mem_check(env_t *env, const void *DANGEROUS va, size_t len, int perm);
-
-void *COUNT(len)
-user_mem_assert(env_t *env, const void *DANGEROUS va, size_t len, int perm);
-
-error_t
-memcpy_from_user(env_t* env, void* COUNT(len) dest,
- const void *DANGEROUS va, size_t len);
-
-static inline page_t* ppn2page(size_t ppn)
+void tlb_shootdown_global(void);
+bool regions_collide_unsafe(uintptr_t start1, uintptr_t end1,
+ uintptr_t start2, uintptr_t end2);
+
+/* Arch specific implementations for these */
+void map_segment(pgdir_t pgdir, uintptr_t va, size_t size, physaddr_t pa,
+ int perm, int pml_shift);
+int unmap_segment(pgdir_t pgdir, uintptr_t va, size_t size);
+pte_t pgdir_walk(pgdir_t pgdir, const void *va, int create);
+int get_va_perms(pgdir_t pgdir, const void *va);
+int arch_pgdir_setup(pgdir_t boot_copy, pgdir_t *new_pd);
+physaddr_t arch_pgdir_get_cr3(pgdir_t pd);
+void arch_pgdir_clear(pgdir_t *pd);
+int arch_max_jumbo_page_shift(void);
+void arch_add_intermediate_pts(pgdir_t pgdir, uintptr_t va, size_t len);
+
+static inline page_t *ppn2page(size_t ppn)
{
- if( ppn >= npage )
- warn("ppn2page called with ppn (%08u) larger than npage", ppn);
+ if (ppn >= max_nr_pages)
+ warn("ppn2page called with ppn (%08lu) larger than max_nr_pages", ppn);
return &(pages[ppn]);
}
return page2ppn(pp) << PGSHIFT;
}
-static inline page_t* pa2page(physaddr_t pa)
+static inline page_t *pa2page(physaddr_t pa)
{
- if (PPN(pa) >= npage)
- warn("pa2page called with pa (0x%08x) larger than npage", pa);
- return &pages[PPN(pa)];
+ if (LA2PPN(pa) >= max_nr_pages)
+ warn("pa2page called with pa (%p) larger than max_nr_pages", pa);
+ return &pages[LA2PPN(pa)];
}
-static inline void*COUNT(PGSIZE) page2kva(page_t *pp)
+static inline ppn_t pa2ppn(physaddr_t pa)
+{
+ return pa >> PGSHIFT;
+}
+
+static inline void *page2kva(page_t *pp)
{
return KADDR(page2pa(pp));
}
-static inline void*COUNT(PGSIZE) ppn2kva(size_t pp)
+static inline void *ppn2kva(size_t pp)
{
return page2kva(ppn2page(pp));
}
-static inline page_t* kva2page(void* addr)
+static inline page_t* kva2page(void* addr)
{
return pa2page(PADDR(addr));
}
-pte_t *pgdir_walk(pde_t *COUNT(NPDENTRIES) pgdir, const void *SNT va, int create);
+static inline ppn_t kva2ppn(void* addr)
+{
+ return page2ppn(kva2page(addr));
+}
+
+static inline bool is_kaddr(void *addr)
+{
+ return (uintptr_t)addr >= KERNBASE;
+}
+
+static inline unsigned long nr_pages(size_t nr_bytes)
+{
+ return (nr_bytes >> PGSHIFT) + (PGOFF(nr_bytes) ? 1 : 0);
+}
-#endif /* !ROS_KERN_PMAP_H */
+/* Including here, since these ops often rely on pmap.h helpers, which rely on
+ * the generic arch/pmap.h. It's likely that many of these ops will be inlined
+ * for speed in pmap_ops. */
+#include <arch/pmap_ops.h>