-/* See COPYRIGHT for copyright information. */
-#ifdef __DEPUTY__
-#pragma nodeputy
-#endif
+/* Copyright (c) 2009,13 The Regents of the University of California
+ * Barret Rhoden <brho@cs.berkeley.edu>
+ * See LICENSE for details.
+ *
+ * Arch independent physical memory and page table management.
+ *
+ * For page allocation, check out the family of page_alloc files. */
#include <arch/arch.h>
#include <arch/mmu.h>
-#include <ros/error.h>
+#include <error.h>
+#include <kmalloc.h>
#include <atomic.h>
#include <string.h>
#include <assert.h>
#include <pmap.h>
-#include <kclock.h>
#include <process.h>
#include <stdio.h>
-
-//
-// Allocate n bytes of physical memory aligned on an
-// align-byte boundary. Align must be a power of two.
-// Return kernel virtual address. Returned memory is uninitialized.
-//
-// If we're out of memory, boot_alloc should panic.
-// This function may ONLY be used during initialization,
-// before the page_free_list has been set up.
-//
-void*
-boot_alloc(uint32_t n, uint32_t align)
+#include <mm.h>
+#include <multiboot.h>
+#include <arena.h>
+#include <init.h>
+
+physaddr_t max_pmem = 0; /* Total amount of physical memory (bytes) */
+physaddr_t max_paddr = 0; /* Maximum addressable physical address */
+size_t max_nr_pages = 0; /* Number of addressable physical memory pages */
+struct page *pages = 0;
+struct multiboot_info *multiboot_kaddr = 0;
+uintptr_t boot_freemem = 0;
+uintptr_t boot_freelimit = 0;
+
+static size_t sizeof_mboot_mmentry(struct multiboot_mmap_entry *entry)
{
- extern char end[];
- void *v;
-
- // Initialize boot_freemem if this is the first time.
- // 'end' is a magic symbol automatically generated by the linker,
- // which points to the end of the kernel's bss segment -
- // i.e., the first virtual address that the linker
- // did _not_ assign to any kernel code or global variables.
- if (boot_freemem == 0)
- boot_freemem = end;
-
- // Step 1: round boot_freemem up to be aligned properly
- boot_freemem = ROUNDUP(boot_freemem, align);
-
- // Step 2: save current value of boot_freemem as allocated chunk
- v = boot_freemem;
- // Step 2.5: check if we can alloc
- if (PADDR(boot_freemem + n) > maxaddrpa)
- panic("Out of memory in boot alloc, you fool!\n");
- // Step 3: increase boot_freemem to record allocation
- boot_freemem += n;
- // Step 4: return allocated chunk
- return v;
+ /* Careful - len is a uint64 (need to cast down for 32 bit) */
+ return (size_t)(entry->len);
}
-void*
-boot_calloc(uint32_t _n, size_t sz, uint32_t align)
+static void adjust_max_pmem(struct multiboot_mmap_entry *entry, void *data)
{
- extern char end[];
- uint32_t n = _n *sz;
- void *v;
-
- // Initialize boot_freemem if this is the first time.
- // 'end' is a magic symbol automatically generated by the linker,
- // which points to the end of the kernel's bss segment -
- // i.e., the first virtual address that the linker
- // did _not_ assign to any kernel code or global variables.
- if (boot_freemem == 0)
- boot_freemem = end;
+ if (entry->type != MULTIBOOT_MEMORY_AVAILABLE)
+ return;
+ /* Careful - addr + len is a uint64 (need to cast down for 32 bit) */
+ max_pmem = MAX(max_pmem, (size_t)(entry->addr + entry->len));
+}
- // Step 1: round boot_freemem up to be aligned properly
- boot_freemem = ROUNDUP(boot_freemem, align);
+static void kpages_arena_init(void)
+{
+ void *kpages_pg;
- // Step 2: save current value of boot_freemem as allocated chunk
- v = boot_freemem;
- // Step 2.5: check if we can alloc
- if (PADDR(boot_freemem + n) > maxaddrpa)
- panic("Out of memory in boot alloc, you fool!\n");
- // Step 3: increase boot_freemem to record allocation
- boot_freemem += n;
- // Step 4: zero allocated chunk
- memset(v,0,n);
- // Step 5: return allocated chunk
- return v;
+ kpages_pg = arena_alloc(base_arena, PGSIZE, MEM_WAIT);
+ kpages_arena = arena_builder(kpages_pg, "kpages", PGSIZE, arena_alloc,
+ arena_free, base_arena, 8 * PGSIZE);
}
-//
-// Initialize a Page structure.
-// The result has null links and 0 refcount.
-// Note that the corresponding physical page is NOT initialized!
-//
-static void
-page_initpp(page_t *pp)
+/**
+ * @brief Initializes physical memory. Determines the pmem layout, sets up the
+ * base and kpages arenas, and turns on virtual memory/page tables.
+ *
+ * Regarding max_pmem vs max_paddr and max_nr_pages: max_pmem is the largest
+ * physical address that is in a FREE region. It includes RESERVED regions that
+ * are below this point. max_paddr is the largest physical address, <=
+ * max_pmem, that the KERNBASE mapping can map. It too may include reserved
+ * ranges. The 'pages' array will track all physical pages up to max_paddr.
+ * There are max_nr_pages of them. On 64 bit systems, max_pmem == max_paddr. */
+void pmem_init(struct multiboot_info *mbi)
{
- memset(pp, 0, sizeof(*pp));
+ mboot_detect_memory(mbi);
+ mboot_print_mmap(mbi);
+ /* adjust the max memory based on the mmaps, since the old detection doesn't
+ * help much on 64 bit systems */
+ mboot_foreach_mmap(mbi, adjust_max_pmem, 0);
+ /* KERN_VMAP_TOP - KERNBASE is the max amount of virtual addresses we can
+ * use for the physical memory mapping (aka - the KERNBASE mapping).
+ * Should't be an issue on 64b, but is usually for 32 bit. */
+ max_paddr = MIN(max_pmem, KERN_VMAP_TOP - KERNBASE);
+ /* Note not all of this memory is free. */
+ max_nr_pages = max_paddr / PGSIZE;
+ printk("Max physical RAM (appx, bytes): %lu\n", max_pmem);
+ printk("Max addressable physical RAM (appx): %lu\n", max_paddr);
+ printk("Highest page number (including reserved): %lu\n", max_nr_pages);
+ /* We should init the page structs, but zeroing happens to work, except for
+ * the sems. Those are init'd by the page cache before they are used. */
+ pages = (struct page*)boot_zalloc(max_nr_pages * sizeof(struct page),
+ PGSIZE);
+ base_arena_init(mbi);
+ /* kpages will use some of the basic slab caches. kmem_cache_init needs to
+ * not do memory allocations (which it doesn't, and it can base_alloc()). */
+ kmem_cache_init();
+ kpages_arena_init();
+ printk("Base arena total mem: %lu\n", arena_amt_total(base_arena));
+ vm_init();
+
+ static_assert(PROCINFO_NUM_PAGES*PGSIZE <= PTSIZE);
+ static_assert(PROCDATA_NUM_PAGES*PGSIZE <= PTSIZE);
}
-/*
- * Allocates a physical page.
- * Does NOT set the contents of the physical page to zero -
- * the caller must do that if necessary.
- *
- * *pp_store -- is set to point to the Page struct
- * of the newly allocated page
- *
- * RETURNS
- * 0 -- on success
- * -ENOMEM -- otherwise
- */
-int page_alloc(page_t **pp_store)
+static void set_largest_freezone(struct multiboot_mmap_entry *entry, void *data)
{
- if (LIST_EMPTY(&page_free_list))
- return -ENOMEM;
- *pp_store = LIST_FIRST(&page_free_list);
- LIST_REMOVE(*pp_store, pp_link);
- page_initpp(*pp_store);
- return 0;
+ struct multiboot_mmap_entry **boot_zone =
+ (struct multiboot_mmap_entry**)data;
+
+ if (entry->type != MULTIBOOT_MEMORY_AVAILABLE)
+ return;
+ if (!*boot_zone || (sizeof_mboot_mmentry(entry) >
+ sizeof_mboot_mmentry(*boot_zone)))
+ *boot_zone = entry;
}
-/*
- * Allocates a specific physical page.
- * Does NOT set the contents of the physical page to zero -
- * the caller must do that if necessary.
+/* Initialize boot freemem and its limit.
*
- * *pp_store -- is set to point to the Page struct
- * of the newly allocated page
+ * "end" is a symbol marking the end of the kernel. This covers anything linked
+ * in with the kernel (KFS, etc). However, 'end' is a kernel load address,
+ * which differs from kernbase addrs in 64 bit. We need to use the kernbase
+ * mapping for anything dynamic (because it could go beyond 1 GB).
*
- * RETURNS
- * 0 -- on success
- * -ENOMEM -- otherwise
- */
-int page_alloc_specific(page_t **pp_store, size_t ppn)
+ * Ideally, we'll use the largest mmap zone, as reported by multiboot. If we
+ * don't have one (riscv), we'll just use the memory after the kernel.
+ *
+ * If we do have a zone, there is a chance we've already used some of it (for
+ * the kernel, etc). We'll use the lowest address in the zone that is
+ * greater than "end" (and adjust the limit accordingly). */
+static void boot_alloc_init(void)
{
- page_t* page = ppn2page(ppn);
- if( page->pp_ref != 0 )
- return -ENOMEM;
- *pp_store = page;
- LIST_REMOVE(*pp_store, pp_link);
- page_initpp(*pp_store);
- return 0;
-}
-
-int page_is_free(size_t ppn) {
- page_t* page = ppn2page(ppn);
- if( page->pp_ref == 0 )
- return TRUE;
- return FALSE;
+ extern char end[];
+ uintptr_t boot_zone_start, boot_zone_end;
+ uintptr_t end_kva = (uintptr_t)KBASEADDR(end);
+ struct multiboot_mmap_entry *boot_zone = 0;
+
+ /* Find our largest mmap_entry; that will set bootzone */
+ mboot_foreach_mmap(multiboot_kaddr, set_largest_freezone, &boot_zone);
+ if (boot_zone) {
+ boot_zone_start = (uintptr_t)KADDR(boot_zone->addr);
+ /* one issue for 32b is that the boot_zone_end could be beyond max_paddr
+ * and even wrap-around. Do the min check as a uint64_t. The result
+ * should be a safe, unwrapped 32/64b when cast to physaddr_t. */
+ boot_zone_end = (uintptr_t)KADDR(MIN(boot_zone->addr + boot_zone->len,
+ (uint64_t)max_paddr));
+ /* using KERNBASE (kva, btw) which covers the kernel and anything before
+ * it (like the stuff below EXTPHYSMEM on x86) */
+ if (regions_collide_unsafe(KERNBASE, end_kva,
+ boot_zone_start, boot_zone_end))
+ boot_freemem = end_kva;
+ else
+ boot_freemem = boot_zone_start;
+ boot_freelimit = boot_zone_end;
+ } else {
+ boot_freemem = end_kva;
+ boot_freelimit = max_paddr + KERNBASE;
+ }
+ printd("boot_zone: %p, paddr base: 0x%llx, paddr len: 0x%llx\n", boot_zone,
+ boot_zone ? boot_zone->addr : 0,
+ boot_zone ? boot_zone->len : 0);
+ printd("boot_freemem: %p, boot_freelimit %p\n", boot_freemem,
+ boot_freelimit);
}
-//
-// Return a page to the free list.
-// (This function should only be called when pp->pp_ref reaches 0.)
-//
-void page_free(page_t *pp)
+/* Low-level allocator, used before page_alloc is on. Returns size bytes,
+ * aligned to align (should be a power of 2). Retval is a kernbase addr. Will
+ * panic on failure. */
+void *boot_alloc(size_t amt, size_t align)
{
- // this check allows us to call this on null ptrs, which helps when
- // allocating and checking for errors on several pages at once
- if (pp) {
- if (pp->pp_ref)
- panic("Attempting to free page with non-zero reference count!");
- LIST_INSERT_HEAD(&page_free_list, pp, pp_link);
+ uintptr_t retval;
+
+ if (!boot_freemem)
+ boot_alloc_init();
+ boot_freemem = ROUNDUP(boot_freemem, align);
+ retval = boot_freemem;
+ if (boot_freemem + amt > boot_freelimit){
+ printk("boot_alloc: boot_freemem is 0x%x\n", boot_freemem);
+ printk("boot_alloc: amt is %d\n", amt);
+ printk("boot_freelimit is 0x%x\n", boot_freelimit);
+ printk("boot_freemem + amt is > boot_freelimit\n");
+ panic("Out of memory in boot alloc, you fool!\n");
}
+ boot_freemem += amt;
+ printd("boot alloc from %p to %p\n", retval, boot_freemem);
+ /* multiboot info probably won't ever conflict with our boot alloc */
+ if (mboot_region_collides(multiboot_kaddr, retval, boot_freemem))
+ panic("boot allocation could clobber multiboot info! Get help!");
+ return (void*)retval;
}
-//
-// Decrement the reference count on a page,
-// freeing it if there are no more refs.
-//
-void
-page_decref(page_t *pp)
+void *boot_zalloc(size_t amt, size_t align)
{
- if (--pp->pp_ref == 0)
- page_free(pp);
+ /* boot_alloc panics on failure */
+ void *v = boot_alloc(amt, align);
+ memset(v, 0, amt);
+ return v;
}
-//
-// Map the physical page 'pp' at virtual address 'va'.
-// The permissions (the low 12 bits) of the page table
-// entry should be set to 'perm|PTE_P'.
-//
-// Details
-// - If there is already a page mapped at 'va', it is page_remove()d.
-// - If necessary, on demand, allocates a page table and inserts it into
-// 'pgdir'.
-// - pp->pp_ref should be incremented if the insertion succeeds.
-// - The TLB must be invalidated if a page was formerly present at 'va'.
-// (this is handled in page_remove)
-//
-// RETURNS:
-// 0 on success
-// -ENOMEM, if page table couldn't be allocated
-//
-// Hint: The TA solution is implemented using pgdir_walk, page_remove,
-// and page2pa.
-//
-// No support for jumbos here. will need to be careful of trying to insert
-// regular pages into something that was already jumbo, and the overloading
-// of the PTE_PS and PTE_PAT flags...
-int
-page_insert(pde_t *pgdir, page_t *pp, void *va, int perm)
+/**
+ * @brief Map the physical page 'pp' into the virtual address 'va' in page
+ * directory 'pgdir'
+ *
+ * Map the physical page 'pp' at virtual address 'va'.
+ * The permissions (the low 12 bits) of the page table
+ * entry should be set to 'perm|PTE_P'.
+ *
+ * Details:
+ * - If there is already a page mapped at 'va', it is page_remove()d.
+ * - If necessary, on demand, allocates a page table and inserts it into
+ * 'pgdir'.
+ * - This saves your refcnt in the pgdir (refcnts going away soon).
+ * - The TLB must be invalidated if a page was formerly present at 'va'.
+ * (this is handled in page_remove)
+ *
+ * No support for jumbos here. We will need to be careful when trying to
+ * insert regular pages into something that was already jumbo. We will
+ * also need to be careful with our overloading of the PTE_PS and
+ * PTE_PAT flags...
+ *
+ * @param[in] pgdir the page directory to insert the page into
+ * @param[in] pp a pointr to the page struct representing the
+ * physical page that should be inserted.
+ * @param[in] va the virtual address where the page should be
+ * inserted.
+ * @param[in] perm the permition bits with which to set up the
+ * virtual mapping.
+ *
+ * @return ESUCCESS on success
+ * @return -ENOMEM if a page table could not be allocated
+ * into which the page should be inserted
+ *
+ */
+int page_insert(pgdir_t pgdir, struct page *page, void *va, int perm)
{
- pte_t* pte = pgdir_walk(pgdir, va, 1);
- if (!pte)
+ pte_t pte = pgdir_walk(pgdir, va, 1);
+ if (!pte_walk_okay(pte))
return -ENOMEM;
- // need to up the ref count in case pp is already mapped at va
- // and we don't want to page_remove (which could free pp) and then
- // continue as if pp wasn't freed. moral = up the ref asap
- pp->pp_ref++;
- if (*pte & PTE_P) {
- page_remove(pgdir, va);
- }
- *pte = PTE(page2ppn(pp), PTE_P | perm);
+ /* Leftover from older times, but we no longer suppor this: */
+ assert(!pte_is_mapped(pte));
+ pte_write(pte, page2pa(page), perm);
return 0;
}
-//
-// Map the physical page 'pp' at the first virtual address that is free
-// in the range 'vab' to 'vae'.
-// The permissions (the low 12 bits) of the page table entry get set to
-// 'perm|PTE_P'.
-//
-// Details
-// - If there is no free entry in the range 'vab' to 'vae' this
-// function returns -ENOMEM.
-// - If necessary, on demand, this function will allocate a page table
-// and inserts it into 'pgdir'.
-// - pp->pp_ref should be incremented if the insertion succeeds.
-//
-// RETURNS:
-// NULL, if no free va in the range (vab, vae) could be found
-// va, the virtual address where pp has been mapped in the
-// range (vab, vae)
-//
-void* page_insert_in_range(pde_t *pgdir, page_t *pp,
- void *vab, void *vae, int perm)
-{
- pte_t* pte = NULL;
- void* new_va;
-
- for(new_va = vab; new_va <= vae; new_va+= PGSIZE) {
- pte = pgdir_walk(pgdir, new_va, 1);
- if(pte != NULL && !(*pte & PTE_P)) break;
- else pte = NULL;
- }
- if (!pte) return NULL;
- *pte = page2pa(pp) | PTE_P | perm;
- return new_va;
-}
-
-//
-// Return the page mapped at virtual address 'va'.
-// If pte_store is not zero, then we store in it the address
-// of the pte for this page. This is used by page_remove
-// but should not be used by other callers.
-//
-// Return 0 if there is no page mapped at va.
-//
-// Hint: the TA solution uses pgdir_walk and pa2page.
-//
-// For jumbos, right now this returns the first Page* in the 4MB
-page_t *page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
+/**
+ * @brief Return the page mapped at virtual address 'va' in
+ * page directory 'pgdir'.
+ *
+ * If pte_store is not NULL, then we store in it the address
+ * of the pte for this page. This is used by page_remove
+ * but should not be used by other callers.
+ *
+ * For jumbos, right now this returns the first Page* in the 4MB range
+ *
+ * @param[in] pgdir the page directory from which we should do the lookup
+ * @param[in] va the virtual address of the page we are looking up
+ * @param[out] pte_store the address of the page table entry for the returned page
+ *
+ * @return PAGE the page mapped at virtual address 'va'
+ * @return NULL No mapping exists at virtual address 'va', or it's paged out
+ */
+page_t *page_lookup(pgdir_t pgdir, void *va, pte_t *pte_store)
{
- pte_t* pte = pgdir_walk(pgdir, va, 0);
- if (!pte || !(*pte & PTE_P))
+ pte_t pte = pgdir_walk(pgdir, va, 0);
+ if (!pte_walk_okay(pte) || !pte_is_mapped(pte))
return 0;
if (pte_store)
*pte_store = pte;
- return pa2page(PTE_ADDR(*pte));
+ return pa2page(pte_get_paddr(pte));
}
-//
-// Unmaps the physical page at virtual address 'va'.
-// If there is no physical page at that address, silently does nothing.
-//
-// Details:
-// - The ref count on the physical page should decrement.
-// - The physical page should be freed if the refcount reaches 0.
-// - The pg table entry corresponding to 'va' should be set to 0.
-// (if such a PTE exists)
-// - The TLB must be invalidated if you remove an entry from
-// the pg dir/pg table.
-//
-// Hint: The TA solution is implemented using page_lookup,
-// tlb_invalidate, and page_decref.
-//
-// This may be wonky wrt Jumbo pages and decref.
-void
-page_remove(pde_t *pgdir, void *va)
+/**
+ * @brief Unmaps the physical page at virtual address 'va' in page directory
+ * 'pgdir'.
+ *
+ * If there is no physical page at that address, this function silently
+ * does nothing.
+ *
+ * Details:
+ * - The ref count on the physical page is decrement when the page is removed
+ * - The physical page is freed if the refcount reaches 0.
+ * - The pg table entry corresponding to 'va' is set to 0.
+ * (if such a PTE exists)
+ * - The TLB is invalidated if an entry is removes from the pg dir/pg table.
+ *
+ * This may be wonky wrt Jumbo pages and decref.
+ *
+ * @param pgdir the page directory from with the page sholuld be removed
+ * @param va the virtual address at which the page we are trying to
+ * remove is mapped
+ * TODO: consider deprecating this, or at least changing how it works with TLBs.
+ * Might want to have the caller need to manage the TLB. Also note it is used
+ * in env_user_mem_free, minus the walk. */
+void page_remove(pgdir_t pgdir, void *va)
{
- pte_t* pte;
+ pte_t pte;
page_t *page;
- page = page_lookup(pgdir, va, &pte);
- if (!page)
+
+ pte = pgdir_walk(pgdir,va,0);
+ if (!pte_walk_okay(pte) || pte_is_unmapped(pte))
return;
- *pte = 0;
- tlb_invalidate(pgdir, va);
- page_decref(page);
+
+ if (pte_is_mapped(pte)) {
+ /* TODO: (TLB) need to do a shootdown, inval sucks. And might want to
+ * manage the TLB / free pages differently. (like by the caller).
+ * Careful about the proc/memory lock here. */
+ page = pa2page(pte_get_paddr(pte));
+ pte_clear(pte);
+ tlb_invalidate(pgdir, va);
+ page_decref(page);
+ } else if (pte_is_paged_out(pte)) {
+ /* TODO: (SWAP) need to free this from the swap */
+ panic("Swapping not supported!");
+ pte_clear(pte);
+ }
}
-//
-// Invalidate a TLB entry, but only if the page tables being
-// edited are the ones currently in use by the processor.
-//
-// Need to sort this for cross core lovin' TODO
-void
-tlb_invalidate(pde_t *pgdir, void *va)
+/**
+ * @brief Invalidate a TLB entry, but only if the page tables being
+ * edited are the ones currently in use by the processor.
+ *
+ * TODO: (TLB) Need to sort this for cross core lovin'
+ *
+ * @param pgdir the page directory assocaited with the tlb entry
+ * we are trying to invalidate
+ * @param va the virtual address associated with the tlb entry
+ * we are trying to invalidate
+ */
+void tlb_invalidate(pgdir_t pgdir, void *va)
{
// Flush the entry only if we're modifying the current address space.
// For now, there is only one address space, so always invalidate.
invlpg(va);
}
-static void *DANGEROUS user_mem_check_addr;
-
-//
-// Check that an environment is allowed to access the range of memory
-// [va, va+len) with permissions 'perm | PTE_P'.
-// Normally 'perm' will contain PTE_U at least, but this is not required.
-// 'va' and 'len' need not be page-aligned; you must test every page that
-// contains any of that range. You will test either 'len/PGSIZE',
-// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
-//
-// A user program can access a virtual address if (1) the address is below
-// ULIM, and (2) the page table gives it permission. These are exactly
-// the tests you should implement here.
-//
-// If there is an error, set the 'user_mem_check_addr' variable to the first
-// erroneous virtual address.
-//
-// Returns 0 if the user program can access this range of addresses,
-// and -EFAULT otherwise.
-//
-// Hint: The TA solution uses pgdir_walk.
-//
-
-// zra: I've modified the interface to these two functions so that Ivy can
-// check that user pointers aren't dereferenced. User pointers get the
-// DANGEROUS qualifier. After validation, these functions return a
-// COUNT(len) pointer. user_mem_check now returns NULL on error instead of
-// -EFAULT.
-
-void *COUNT(len)
-user_mem_check(env_t *env, const void *DANGEROUS va, size_t len, int perm)
+static void __tlb_global(uint32_t srcid, long a0, long a1, long a2)
{
- // TODO - will need to sort this out wrt page faulting / PTE_P
- // also could be issues with sleeping and waking up to find pages
- // are unmapped, though i think the lab ignores this since the
- // kernel is uninterruptible
- void *DANGEROUS start, *DANGEROUS end;
- size_t num_pages, i;
- pte_t *pte;
-
- perm |= PTE_P;
- start = ROUNDDOWN((void*)va, PGSIZE);
- end = ROUNDUP((void*)va + len, PGSIZE);
- if (start >= end) {
- warn("Blimey! Wrap around in VM range calculation!");
- return NULL;
- }
- num_pages = PPN(end - start);
- for (i = 0; i < num_pages; i++, start += PGSIZE) {
- pte = pgdir_walk(env->env_pgdir, start, 0);
- // ensures the bits we want on are turned on. if not, error out
- if ( !pte || ((*pte & perm) != perm) ) {
- if (i = 0)
- user_mem_check_addr = (void*)va;
- else
- user_mem_check_addr = start;
- return NULL;
- }
- }
- // this should never be needed, since the perms should catch it
- if ((uintptr_t)end > ULIM) {
- warn ("I suck - Bug in user permission mappings!");
- return NULL;
- }
- return (void *COUNT(len))TC(va);
+ tlb_flush_global();
}
-//
-// Checks that environment 'env' is allowed to access the range
-// of memory [va, va+len) with permissions 'perm | PTE_U'.
-// If it can, then the function simply returns.
-// If it cannot, 'env' is destroyed.
-//
-void *COUNT(len)
-user_mem_assert(env_t *env, const void *DANGEROUS va, size_t len, int perm)
+/* Does a global TLB flush on all cores. */
+void tlb_shootdown_global(void)
{
- void *COUNT(len) res = user_mem_check(env,va,len,perm | PTE_USER_RO);
- if (!res) {
- cprintf("[%08x] user_mem_check assertion failure for "
- "va %08x\n", env->env_id, user_mem_check_addr);
- proc_destroy(env); // may not return
- return NULL;
+ tlb_flush_global();
+ if (booting)
+ return;
+ /* TODO: consider a helper for broadcast messages, though note that we're
+ * doing our flush immediately, which our caller expects from us before it
+ * returns. */
+ for (int i = 0; i < num_cores; i++) {
+ if (i == core_id())
+ continue;
+ send_kernel_message(i, __tlb_global, 0, 0, 0, KMSG_IMMEDIATE);
}
- return res;
}
-// copies data from a user buffer to a kernel buffer.
-// EFAULT if page not present, user lacks perms, or invalid addr.
-error_t
-memcpy_from_user(env_t* env, void* COUNT(len) dest,
- const void *DANGEROUS va, size_t len)
+/* Helper, returns true if any part of (start1, end1) is within (start2, end2).
+ * Equality of endpoints (like end1 == start2) is okay.
+ * Assumes no wrap-around. */
+bool regions_collide_unsafe(uintptr_t start1, uintptr_t end1,
+ uintptr_t start2, uintptr_t end2)
{
- const void *DANGEROUS start, *DANGEROUS end;
- size_t num_pages, i;
- pte_t *pte;
- uintptr_t perm = PTE_P | PTE_USER_RO;
- size_t bytes_copied = 0;
-
- static_assert(ULIM % PGSIZE == 0 && ULIM != 0); // prevent wrap-around
-
- start = ROUNDDOWN(va, PGSIZE);
- end = ROUNDUP(va + len, PGSIZE);
-
- if(start >= (void*SNT)ULIM || end >= (void*SNT)ULIM)
- return -EFAULT;
-
- num_pages = PPN(end - start);
- for(i = 0; i < num_pages; i++)
- {
- pte = pgdir_walk(env->env_pgdir, start+i*PGSIZE, 0);
- if(!pte || (*pte & perm) != perm)
- return -EFAULT;
-
- void*COUNT(PGSIZE) kpage = KADDR(PTE_ADDR(pte));
- void* src_start = i > 0 ? kpage : kpage+(va-start);
- void* dst_start = dest+bytes_copied;
- size_t copy_len = PGSIZE;
- if(i == 0)
- copy_len -= va-start;
- if(i == num_pages-1)
- copy_len -= end-(start+len);
-
- memcpy(dst_start,src_start,copy_len);
- bytes_copied += copy_len;
+ if (start1 <= start2) {
+ if (end1 <= start2)
+ return FALSE;
+ return TRUE;
+ } else {
+ if (end2 <= start1)
+ return FALSE;
+ return TRUE;
}
-
- assert(bytes_copied == len);
-
- return ESUCCESS;
}