/* Copyright (c) 2009,13 The Regents of the University of California
* Barret Rhoden <brho@cs.berkeley.edu>
- * See LICENSE for details.
+ * See LICENSE for details.
*
* Arch independent physical memory and page table management.
*
#include <string.h>
#include <assert.h>
#include <pmap.h>
-#include <kclock.h>
#include <process.h>
#include <stdio.h>
#include <mm.h>
#include <multiboot.h>
+#include <arena.h>
+#include <init.h>
physaddr_t max_pmem = 0; /* Total amount of physical memory (bytes) */
physaddr_t max_paddr = 0; /* Maximum addressable physical address */
size_t max_nr_pages = 0; /* Number of addressable physical memory pages */
-size_t nr_free_pages = 0; /* TODO: actually track this, after init */
struct page *pages = 0;
struct multiboot_info *multiboot_kaddr = 0;
uintptr_t boot_freemem = 0;
max_pmem = MAX(max_pmem, (size_t)(entry->addr + entry->len));
}
+static void kpages_arena_init(void)
+{
+ void *kpages_pg;
+
+ kpages_pg = arena_alloc(base_arena, PGSIZE, MEM_WAIT);
+ kpages_arena = arena_builder(kpages_pg, "kpages", PGSIZE, arena_alloc,
+ arena_free, base_arena, 8 * PGSIZE);
+}
+
/**
* @brief Initializes physical memory. Determines the pmem layout, sets up the
- * array of physical pages and memory free list, and turns on virtual
- * memory/page tables.
+ * base and kpages arenas, and turns on virtual memory/page tables.
*
* Regarding max_pmem vs max_paddr and max_nr_pages: max_pmem is the largest
* physical address that is in a FREE region. It includes RESERVED regions that
printk("Max physical RAM (appx, bytes): %lu\n", max_pmem);
printk("Max addressable physical RAM (appx): %lu\n", max_paddr);
printk("Highest page number (including reserved): %lu\n", max_nr_pages);
+ /* We should init the page structs, but zeroing happens to work, except for
+ * the sems. Those are init'd by the page cache before they are used. */
pages = (struct page*)boot_zalloc(max_nr_pages * sizeof(struct page),
PGSIZE);
- page_alloc_init(mbi);
+ base_arena_init(mbi);
+ /* kpages will use some of the basic slab caches. kmem_cache_init needs to
+ * not do memory allocations (which it doesn't, and it can base_alloc()). */
+ kmem_cache_init();
+ kpages_arena_init();
+ printk("Base arena total mem: %lu\n", arena_amt_total(base_arena));
vm_init();
static_assert(PROCINFO_NUM_PAGES*PGSIZE <= PTSIZE);
* "end" is a symbol marking the end of the kernel. This covers anything linked
* in with the kernel (KFS, etc). However, 'end' is a kernel load address,
* which differs from kernbase addrs in 64 bit. We need to use the kernbase
- * mapping for anything dynamic (because it could go beyond 1 GB).
+ * mapping for anything dynamic (because it could go beyond 1 GB).
*
* Ideally, we'll use the largest mmap zone, as reported by multiboot. If we
* don't have one (riscv), we'll just use the memory after the kernel.
return v;
}
-/**
+/**
* @brief Map the physical page 'pp' into the virtual address 'va' in page
* directory 'pgdir'
*
* Map the physical page 'pp' at virtual address 'va'.
* The permissions (the low 12 bits) of the page table
* entry should be set to 'perm|PTE_P'.
- *
+ *
* Details:
* - If there is already a page mapped at 'va', it is page_remove()d.
- * - If necessary, on demand, allocates a page table and inserts it into
+ * - If necessary, on demand, allocates a page table and inserts it into
* 'pgdir'.
- * - page_incref() should be called if the insertion succeeds.
+ * - This saves your refcnt in the pgdir (refcnts going away soon).
* - The TLB must be invalidated if a page was formerly present at 'va'.
* (this is handled in page_remove)
*
* No support for jumbos here. We will need to be careful when trying to
* insert regular pages into something that was already jumbo. We will
- * also need to be careful with our overloading of the PTE_PS and
+ * also need to be careful with our overloading of the PTE_PS and
* PTE_PAT flags...
*
* @param[in] pgdir the page directory to insert the page into
* physical page that should be inserted.
* @param[in] va the virtual address where the page should be
* inserted.
- * @param[in] perm the permition bits with which to set up the
+ * @param[in] perm the permition bits with which to set up the
* virtual mapping.
*
* @return ESUCCESS on success
* into which the page should be inserted
*
*/
-int page_insert(pde_t *pgdir, struct page *page, void *va, int perm)
+int page_insert(pgdir_t pgdir, struct page *page, void *va, int perm)
{
- pte_t* pte = pgdir_walk(pgdir, va, 1);
- if (!pte)
+ pte_t pte = pgdir_walk(pgdir, va, 1);
+ if (!pte_walk_okay(pte))
return -ENOMEM;
- /* Two things here: First, we need to up the ref count of the page we want
- * to insert in case it is already mapped at va. In that case we don't want
- * page_remove to ultimately free it, and then for us to continue as if pp
- * wasn't freed. (moral = up the ref asap) */
- kref_get(&page->pg_kref, 1);
- /* Careful, page remove handles the cases where the page is PAGED_OUT. */
- if (!PAGE_UNMAPPED(*pte))
- page_remove(pgdir, va);
- *pte = PTE(page2ppn(page), PTE_P | perm);
+ /* Leftover from older times, but we no longer suppor this: */
+ assert(!pte_is_mapped(pte));
+ pte_write(pte, page2pa(page), perm);
return 0;
}
/**
- * @brief Return the page mapped at virtual address 'va' in
+ * @brief Return the page mapped at virtual address 'va' in
* page directory 'pgdir'.
*
* If pte_store is not NULL, then we store in it the address
* @return PAGE the page mapped at virtual address 'va'
* @return NULL No mapping exists at virtual address 'va', or it's paged out
*/
-page_t *page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
+page_t *page_lookup(pgdir_t pgdir, void *va, pte_t *pte_store)
{
- pte_t* pte = pgdir_walk(pgdir, va, 0);
- if (!pte || !PAGE_PRESENT(*pte))
+ pte_t pte = pgdir_walk(pgdir, va, 0);
+ if (!pte_walk_okay(pte) || !pte_is_mapped(pte))
return 0;
if (pte_store)
*pte_store = pte;
- return pa2page(PTE_ADDR(*pte));
+ return pa2page(pte_get_paddr(pte));
}
/**
* @brief Unmaps the physical page at virtual address 'va' in page directory
* 'pgdir'.
*
- * If there is no physical page at that address, this function silently
+ * If there is no physical page at that address, this function silently
* does nothing.
*
* Details:
* (if such a PTE exists)
* - The TLB is invalidated if an entry is removes from the pg dir/pg table.
*
- * This may be wonky wrt Jumbo pages and decref.
+ * This may be wonky wrt Jumbo pages and decref.
*
* @param pgdir the page directory from with the page sholuld be removed
- * @param va the virtual address at which the page we are trying to
+ * @param va the virtual address at which the page we are trying to
* remove is mapped
* TODO: consider deprecating this, or at least changing how it works with TLBs.
* Might want to have the caller need to manage the TLB. Also note it is used
* in env_user_mem_free, minus the walk. */
-void page_remove(pde_t *pgdir, void *va)
+void page_remove(pgdir_t pgdir, void *va)
{
- pte_t *pte;
+ pte_t pte;
page_t *page;
pte = pgdir_walk(pgdir,va,0);
- if (!pte || PAGE_UNMAPPED(*pte))
+ if (!pte_walk_okay(pte) || pte_is_unmapped(pte))
return;
- if (PAGE_PRESENT(*pte)) {
+ if (pte_is_mapped(pte)) {
/* TODO: (TLB) need to do a shootdown, inval sucks. And might want to
* manage the TLB / free pages differently. (like by the caller).
* Careful about the proc/memory lock here. */
- page = ppn2page(PTE2PPN(*pte));
- *pte = 0;
+ page = pa2page(pte_get_paddr(pte));
+ pte_clear(pte);
tlb_invalidate(pgdir, va);
page_decref(page);
- } else if (PAGE_PAGED_OUT(*pte)) {
+ } else if (pte_is_paged_out(pte)) {
/* TODO: (SWAP) need to free this from the swap */
panic("Swapping not supported!");
- *pte = 0;
+ pte_clear(pte);
}
}
*
* TODO: (TLB) Need to sort this for cross core lovin'
*
- * @param pgdir the page directory assocaited with the tlb entry
+ * @param pgdir the page directory assocaited with the tlb entry
* we are trying to invalidate
* @param va the virtual address associated with the tlb entry
* we are trying to invalidate
*/
-void tlb_invalidate(pde_t *pgdir, void *va)
+void tlb_invalidate(pgdir_t pgdir, void *va)
{
// Flush the entry only if we're modifying the current address space.
// For now, there is only one address space, so always invalidate.
invlpg(va);
}
+static void __tlb_global(uint32_t srcid, long a0, long a1, long a2)
+{
+ tlb_flush_global();
+}
+
+/* Does a global TLB flush on all cores. */
+void tlb_shootdown_global(void)
+{
+ tlb_flush_global();
+ if (booting)
+ return;
+ /* TODO: consider a helper for broadcast messages, though note that we're
+ * doing our flush immediately, which our caller expects from us before it
+ * returns. */
+ for (int i = 0; i < num_cores; i++) {
+ if (i == core_id())
+ continue;
+ send_kernel_message(i, __tlb_global, 0, 0, 0, KMSG_IMMEDIATE);
+ }
+}
+
/* Helper, returns true if any part of (start1, end1) is within (start2, end2).
* Equality of endpoints (like end1 == start2) is okay.
* Assumes no wrap-around. */
-bool regions_collide_unsafe(uintptr_t start1, uintptr_t end1,
+bool regions_collide_unsafe(uintptr_t start1, uintptr_t end1,
uintptr_t start2, uintptr_t end2)
{
if (start1 <= start2) {