Page flags are now atomic
authorBarret Rhoden <brho@cs.berkeley.edu>
Fri, 10 Jan 2014 20:58:14 +0000 (12:58 -0800)
committerBarret Rhoden <brho@cs.berkeley.edu>
Fri, 17 Jan 2014 22:57:12 +0000 (14:57 -0800)
I don't want to lock pages just to mark it dirty or something.  This
will also help the PM removal.

kern/include/page_alloc.h
kern/src/blockdev.c
kern/src/ext2fs.c
kern/src/kfs.c
kern/src/page_alloc.c
kern/src/pagemap.c

index 0f7f2b3..b0a7e4c 100644 (file)
@@ -41,7 +41,7 @@ typedef LIST_ENTRY(page) page_list_entry_t;
 struct page {
        LIST_ENTRY(page)                        pg_link;        /* membership in various lists */
        struct kref                                     pg_kref;
-       unsigned int                            pg_flags;
+       atomic_t                                        pg_flags;
        struct page_map                         *pg_mapping;
        unsigned long                           pg_index;
        void                                            *pg_private;    /* type depends on page usage */
index 9028110..8e4f3b5 100644 (file)
@@ -69,7 +69,7 @@ struct block_device *get_bdev(char *path)
 void free_bhs(struct page *page)
 {
        struct buffer_head *bh, *next;
-       assert(page->pg_flags & PG_BUFFER);
+       assert(atomic_read(&page->pg_flags) & PG_BUFFER);
        bh = (struct buffer_head*)page->pg_private;
        while (bh) {
                next = bh->bh_next;
@@ -156,7 +156,7 @@ void sleep_on_breq(struct block_request *breq)
  * readpage, we read them in when a specific block is there */
 int block_readpage(struct page_map *pm, struct page *page)
 {
-       page->pg_flags |= PG_UPTODATE;
+       atomic_or(&page->pg_flags, PG_UPTODATE);
        return 0;
 }
 
@@ -191,7 +191,7 @@ struct buffer_head *bdev_get_buffer(struct block_device *bdev,
        if (error)
                panic("Failed to load page! (%d)", error);
        my_buf = page2kva(page) + blk_offset;
-       assert(page->pg_flags & PG_BUFFER);             /* Should be part of a page map */
+       assert(atomic_read(&page->pg_flags) & PG_BUFFER);
 retry:
        bh = (struct buffer_head*)page->pg_private;
        prev = 0;
@@ -268,7 +268,7 @@ void bdev_dirty_buffer(struct buffer_head *bh)
        struct page *page = bh->bh_page;
        /* TODO: race on flag modification */
        bh->bh_flags |= BH_DIRTY;
-       page->pg_flags |= PG_DIRTY;
+       atomic_or(&page->pg_flags, PG_DIRTY);
 }
 
 /* Decrefs the buffer from bdev_get_buffer().  Call this when you no longer
index 02ea162..a9ddb51 100644 (file)
@@ -704,7 +704,7 @@ int ext2_readpage(struct page_map *pm, struct page *page)
        struct block_request *breq;
        void *eobh;
 
-       assert(page->pg_flags & PG_BUFFER);
+       assert(atomic_read(&page->pg_flags) & PG_BUFFER);
        retval = ext2_mappage(pm, page);
        if (retval)
                return retval;
@@ -731,7 +731,7 @@ int ext2_readpage(struct page_map *pm, struct page *page)
                } else {
                        memset(bh->bh_buffer, 0, pm->pm_host->i_sb->s_blocksize);
                        bh->bh_flags |= BH_DIRTY;
-                       bh->bh_page->pg_flags |= PG_DIRTY;
+                       atomic_or(&bh->bh_page->pg_flags, PG_DIRTY);
                }
        }
        retval = bdev_submit_request(bdev, breq);
@@ -748,7 +748,7 @@ int ext2_readpage(struct page_map *pm, struct page *page)
        if (eof_off)
                memset(eof_off + page2kva(page), 0, PGSIZE - eof_off);
        /* Now the page is up to date */
-       page->pg_flags |= PG_UPTODATE;
+       atomic_or(&page->pg_flags, PG_UPTODATE);
        /* Useful debugging.  Put one higher up if the page is not getting mapped */
        //print_pageinfo(page);
        return 0;
index 8324323..1875d47 100644 (file)
@@ -151,7 +151,7 @@ int kfs_readpage(struct page_map *pm, struct page *page)
        /* This is supposed to be done in the IO system when the operation is
         * complete.  Since we aren't doing a real IO request, and it is already
         * done, we can do it here. */
-       page->pg_flags |= PG_UPTODATE;
+       atomic_or(&page->pg_flags, PG_UPTODATE);
        return 0;
 }
 
index 96fd2e6..686ef1d 100644 (file)
@@ -309,7 +309,7 @@ static void page_release(struct kref *kref)
 {
        struct page *page = container_of(kref, struct page, pg_kref);
 
-       if (page->pg_flags & PG_BUFFER)
+       if (atomic_read(&page->pg_flags) & PG_BUFFER)
                free_bhs(page);
        /* Give our page back to the free list.  The protections for this are that
         * the list lock is grabbed by page_decref. */
@@ -336,14 +336,14 @@ void lock_page(struct page *page)
 {
        /* when this returns, we have are the ones to have locked the page */
        sem_down(&page->pg_sem);
-       assert(!(page->pg_flags & PG_LOCKED));
-       page->pg_flags |= PG_LOCKED;
+       assert(!(atomic_read(&page->pg_flags) & PG_LOCKED));
+       atomic_or(&page->pg_flags, PG_LOCKED);
 }
 
 /* Unlocks the page, and wakes up whoever is waiting on the lock */
 void unlock_page(struct page *page)
 {
-       page->pg_flags &= ~PG_LOCKED;
+       atomic_and(&page->pg_flags, ~PG_LOCKED);
        if (sem_up(&page->pg_sem)) {
                printk("Unexpected sleeper on a page!");        /* til we test this */
        }
@@ -357,12 +357,13 @@ void print_pageinfo(struct page *page)
                return;
        }
        printk("Page %d (%p), Flags: 0x%08x Refcnt: %d\n", page2ppn(page),
-              page2kva(page), page->pg_flags, kref_refcnt(&page->pg_kref));
+              page2kva(page), atomic_read(&page->pg_flags),
+              kref_refcnt(&page->pg_kref));
        if (page->pg_mapping) {
                printk("\tMapped into object %p at index %d\n",
                       page->pg_mapping->pm_host, page->pg_index);
        }
-       if (page->pg_flags & PG_BUFFER) {
+       if (atomic_read(&page->pg_flags) & PG_BUFFER) {
                struct buffer_head *bh = (struct buffer_head*)page->pg_private;
                i = 0;
                while (bh) {
@@ -372,8 +373,10 @@ void print_pageinfo(struct page *page)
                        bh = bh->bh_next;
                }
                printk("\tPage is %sup to date\n",
-                      page->pg_flags & PG_UPTODATE ? "" : "not ");
+                      atomic_read(&page->pg_flags) & PG_UPTODATE ? "" : "not ");
        }
-       printk("\tPage is %slocked\n", page->pg_flags & PG_LOCKED ? "" : "un");
-       printk("\tPage is %s\n", page->pg_flags & PG_DIRTY ? "dirty" : "clean");
+       printk("\tPage is %slocked\n",
+              atomic_read(&page->pg_flags) & PG_LOCKED ? "" : "un");
+       printk("\tPage is %s\n",
+              atomic_read(&page->pg_flags) & PG_DIRTY ? "dirty" : "clean");
 }
index 84fab40..7db0905 100644 (file)
@@ -47,7 +47,7 @@ int pm_insert_page(struct page_map *pm, unsigned long index, struct page *page)
        error = radix_insert(&pm->pm_tree, index, page);
        if (!error) {
                page_incref(page);
-               page->pg_flags |= PG_LOCKED | PG_BUFFER;
+               atomic_or(&page->pg_flags, PG_LOCKED | PG_BUFFER);
                page->pg_sem.nr_signals = 0;            /* ensure others will block */
                page->pg_mapping = pm;
                page->pg_index = index;
@@ -94,7 +94,7 @@ int pm_load_page(struct page_map *pm, unsigned long index, struct page **pp)
                if (kpage_alloc(&page))
                        return -ENOMEM;
                /* might want to initialize other things, perhaps in page_alloc() */
-               page->pg_flags = 0;
+               atomic_set(&page->pg_flags, 0);
                error = pm_insert_page(pm, index, page);
                switch (error) {
                        case 0:
@@ -122,7 +122,7 @@ int pm_load_page(struct page_map *pm, unsigned long index, struct page **pp)
         * us, we skip this since we are the one doing the readpage(). */
        if (page_was_mapped) {
                /* is it already here and up to date?  if so, we're done */
-               if (page->pg_flags & PG_UPTODATE)
+               if (atomic_read(&page->pg_flags) & PG_UPTODATE)
                        return 0;
                /* if not, try to lock the page (could BLOCK) */
                lock_page(page);
@@ -131,7 +131,7 @@ int pm_load_page(struct page_map *pm, unsigned long index, struct page **pp)
                if (!page->pg_mapping)
                        panic("Page is not in the mapping!  Haven't implemented this!");
                /* double check, are we up to date?  if so, we're done */
-               if (page->pg_flags & PG_UPTODATE) {
+               if (atomic_read(&page->pg_flags) & PG_UPTODATE) {
                        unlock_page(page);
                        return 0;
                }