mm: Allow dev.mmap() to block
[akaros.git] / kern / src / pagemap.c
index 6cf169c..ae69a84 100644 (file)
@@ -70,11 +70,17 @@ static int pm_slot_check_refcnt(void *slot_val)
 
 static void *pm_slot_inc_refcnt(void *slot_val)
 {
-       return (void*)((unsigned long)slot_val + (1UL << PM_REFCNT_SHIFT));
+       void *ret;
+
+       ret = (void*)((unsigned long)slot_val + (1UL << PM_REFCNT_SHIFT));
+       /* Catches previously negative refcnts */
+       assert(pm_slot_check_refcnt(ret) > 0);
+       return ret;
 }
 
 static void *pm_slot_dec_refcnt(void *slot_val)
 {
+       assert(pm_slot_check_refcnt(slot_val) > 0);
        return (void*)((unsigned long)slot_val - (1UL << PM_REFCNT_SHIFT));
 }
 
@@ -181,7 +187,10 @@ static int pm_insert_page(struct page_map *pm, unsigned long index,
 void pm_put_page(struct page *page)
 {
        void **tree_slot = page->pg_tree_slot;
+
        assert(tree_slot);
+       assert(pm_slot_get_page(*tree_slot) == page);
+       assert(pm_slot_check_refcnt(*tree_slot) > 0);
        /* decref, don't care about CASing */
        atomic_add((atomic_t*)tree_slot, -(1UL << PM_REFCNT_SHIFT));
 }
@@ -202,7 +211,10 @@ int pm_load_page(struct page_map *pm, unsigned long index, struct page **pp)
                /* important that UP_TO_DATE is not set.  once we put it in the PM,
                 * others can find it, and we still need to fill it. */
                atomic_set(&page->pg_flags, PG_LOCKED | PG_PAGEMAP);
-               page->pg_sem.nr_signals = 0;    /* preemptively locking */
+               /* The sem needs to be initted before anyone can try to lock it, meaning
+                * before it is in the page cache.  We also want it locked preemptively,
+                * by setting signals = 0. */
+               sem_init(&page->pg_sem, 0);
                error = pm_insert_page(pm, index, page);
                switch (error) {
                        case 0:
@@ -211,15 +223,19 @@ int pm_load_page(struct page_map *pm, unsigned long index, struct page **pp)
                        case -EEXIST:
                                /* the page was mapped already (benign race), just get rid of
                                 * our page and try again (the only case that uses the while) */
+                               atomic_set(&page->pg_flags, 0);
                                page_decref(page);
                                page = pm_find_page(pm, index);
                                break;
                        default:
+                               atomic_set(&page->pg_flags, 0);
                                page_decref(page);
                                return error;
                }
        }
-       assert(page && pm_slot_check_refcnt(*page->pg_tree_slot));
+       assert(page);
+       assert(pm_slot_check_refcnt(*page->pg_tree_slot));
+       assert(pm_slot_get_page(*page->pg_tree_slot) == page);
        if (atomic_read(&page->pg_flags) & PG_UPTODATE) {
                *pp = page;
                printd("pm %p FOUND page %p, addr %p, idx %d\n", pm, page,
@@ -266,6 +282,9 @@ static bool vmr_has_page_idx(struct vm_region *vmr, unsigned long pg_idx)
 {
        unsigned long nr_pgs = (vmr->vm_end - vmr->vm_base) >> PGSHIFT;
        unsigned long start_pg = vmr->vm_foff >> PGSHIFT;
+
+       if (!vmr->vm_ready)
+               return false;
        return ((start_pg <= pg_idx) && (pg_idx < start_pg + nr_pgs));
 }
 
@@ -602,7 +621,19 @@ void print_page_map_info(struct page_map *pm)
        TAILQ_FOREACH(vmr_i, &pm->pm_vmrs, vm_pm_link) {
                printk("\tVMR proc %d: (%p - %p): 0x%08x, 0x%08x, %p, %p\n",
                       vmr_i->vm_proc->pid, vmr_i->vm_base, vmr_i->vm_end,
-                      vmr_i->vm_prot, vmr_i->vm_flags, vmr_i->vm_file, vmr_i->vm_foff);
+                      vmr_i->vm_prot, vmr_i->vm_flags, foc_pointer(vmr_i->__vm_foc),
+                          vmr_i->vm_foff);
        }
        spin_unlock(&pm->pm_lock);
 }
+
+void pm_page_asserter(struct page *page, char *str)
+{
+       void **tree_slot = page->pg_tree_slot;
+
+       if (!page_is_pagemap(page))
+               return;
+       assert(tree_slot);
+       assert(pm_slot_get_page(*tree_slot) == page);
+       assert(pm_slot_check_refcnt(*tree_slot) > 0);
+}