mm: Use struct file_or_chan for VMR files
[akaros.git] / kern / src / pagemap.c
index 8c7e5a2..46b0f03 100644 (file)
@@ -70,11 +70,17 @@ static int pm_slot_check_refcnt(void *slot_val)
 
 static void *pm_slot_inc_refcnt(void *slot_val)
 {
-       return (void*)((unsigned long)slot_val + (1UL << PM_REFCNT_SHIFT));
+       void *ret;
+
+       ret = (void*)((unsigned long)slot_val + (1UL << PM_REFCNT_SHIFT));
+       /* Catches previously negative refcnts */
+       assert(pm_slot_check_refcnt(ret) > 0);
+       return ret;
 }
 
 static void *pm_slot_dec_refcnt(void *slot_val)
 {
+       assert(pm_slot_check_refcnt(slot_val) > 0);
        return (void*)((unsigned long)slot_val - (1UL << PM_REFCNT_SHIFT));
 }
 
@@ -181,7 +187,10 @@ static int pm_insert_page(struct page_map *pm, unsigned long index,
 void pm_put_page(struct page *page)
 {
        void **tree_slot = page->pg_tree_slot;
+
        assert(tree_slot);
+       assert(pm_slot_get_page(*tree_slot) == page);
+       assert(pm_slot_check_refcnt(*tree_slot) > 0);
        /* decref, don't care about CASing */
        atomic_add((atomic_t*)tree_slot, -(1UL << PM_REFCNT_SHIFT));
 }
@@ -202,7 +211,10 @@ int pm_load_page(struct page_map *pm, unsigned long index, struct page **pp)
                /* important that UP_TO_DATE is not set.  once we put it in the PM,
                 * others can find it, and we still need to fill it. */
                atomic_set(&page->pg_flags, PG_LOCKED | PG_PAGEMAP);
-               page->pg_sem.nr_signals = 0;    /* preemptively locking */
+               /* The sem needs to be initted before anyone can try to lock it, meaning
+                * before it is in the page cache.  We also want it locked preemptively,
+                * by setting signals = 0. */
+               sem_init(&page->pg_sem, 0);
                error = pm_insert_page(pm, index, page);
                switch (error) {
                        case 0:
@@ -211,15 +223,19 @@ int pm_load_page(struct page_map *pm, unsigned long index, struct page **pp)
                        case -EEXIST:
                                /* the page was mapped already (benign race), just get rid of
                                 * our page and try again (the only case that uses the while) */
+                               atomic_set(&page->pg_flags, 0);
                                page_decref(page);
                                page = pm_find_page(pm, index);
                                break;
                        default:
+                               atomic_set(&page->pg_flags, 0);
                                page_decref(page);
                                return error;
                }
        }
-       assert(page && pm_slot_check_refcnt(*page->pg_tree_slot));
+       assert(page);
+       assert(pm_slot_check_refcnt(*page->pg_tree_slot));
+       assert(pm_slot_get_page(*page->pg_tree_slot) == page);
        if (atomic_read(&page->pg_flags) & PG_UPTODATE) {
                *pp = page;
                printd("pm %p FOUND page %p, addr %p, idx %d\n", pm, page,
@@ -306,7 +322,11 @@ static void vmr_for_each(struct vm_region *vmr, unsigned long pg_idx,
 static int __pm_mark_not_present(struct proc *p, pte_t pte, void *va, void *arg)
 {
        struct page *page;
-       if (!pte_is_present(pte))
+       /* mapped includes present.  Any PTE pointing to a page (mapped) will get
+        * flagged for removal and have its access prots revoked.  We need to deal
+        * with mapped-but-maybe-not-present in case of a dirtied file that was
+        * mprotected to PROT_NONE (which is not present) */
+       if (pte_is_unmapped(pte))
                return 0;
        page = pa2page(pte_get_paddr(pte));
        if (atomic_read(&page->pg_flags) & PG_REMOVAL)
@@ -598,7 +618,19 @@ void print_page_map_info(struct page_map *pm)
        TAILQ_FOREACH(vmr_i, &pm->pm_vmrs, vm_pm_link) {
                printk("\tVMR proc %d: (%p - %p): 0x%08x, 0x%08x, %p, %p\n",
                       vmr_i->vm_proc->pid, vmr_i->vm_base, vmr_i->vm_end,
-                      vmr_i->vm_prot, vmr_i->vm_flags, vmr_i->vm_file, vmr_i->vm_foff);
+                      vmr_i->vm_prot, vmr_i->vm_flags, foc_pointer(vmr_i->__vm_foc),
+                          vmr_i->vm_foff);
        }
        spin_unlock(&pm->pm_lock);
 }
+
+void pm_page_asserter(struct page *page, char *str)
+{
+       void **tree_slot = page->pg_tree_slot;
+
+       if (!page_is_pagemap(page))
+               return;
+       assert(tree_slot);
+       assert(pm_slot_get_page(*tree_slot) == page);
+       assert(pm_slot_check_refcnt(*tree_slot) > 0);
+}