akaros/kern/include/page_alloc.h
<<
>>
Prefs
   1/* Copyright (c) 2009, 2010 The Regents of the University  of California.
   2 * See the COPYRIGHT files at the top of this source tree for full
   3 * license information.
   4 *
   5 * Kevin Klues <klueska@cs.berkeley.edu>
   6 * Barret Rhoden <brho@cs.berkeley.edu> */
   7
   8#pragma once
   9
  10#include <atomic.h>
  11#include <sys/queue.h>
  12#include <error.h>
  13#include <arch/mmu.h>
  14#include <process.h>
  15#include <kref.h>
  16#include <kthread.h>
  17#include <multiboot.h>
  18
  19struct page_map;                /* preprocessor games */
  20
  21/****************** Page Structures *********************/
  22struct page;
  23typedef size_t ppn_t;
  24typedef struct page page_t;
  25typedef BSD_LIST_HEAD(PageList, page) page_list_t;
  26typedef BSD_LIST_ENTRY(page) page_list_entry_t;
  27
  28/* Per-page flag bits related to their state in the page cache */
  29#define PG_LOCKED               0x001   /* involved in an IO op */
  30#define PG_UPTODATE             0x002   /* page map, filled with file data */
  31#define PG_DIRTY                0x004   /* page map, data is dirty */
  32#define PG_BUFFER               0x008   /* is a buffer page, has BHs */
  33#define PG_PAGEMAP              0x010   /* belongs to a page map */
  34#define PG_REMOVAL              0x020   /* Working flag for page map removal */
  35
  36/* TODO: this struct is not protected from concurrent operations in some
  37 * functions.  If you want to lock on it, use the spinlock in the semaphore.
  38 * This structure is getting pretty big (and we're wasting RAM).  If it becomes
  39 * an issue, we can dynamically allocate some of these things when we're a
  40 * buffer page (in a page mapping) */
  41struct page {
  42        BSD_LIST_ENTRY(page)            pg_link;
  43        atomic_t                        pg_flags;
  44        struct page_map                 *pg_mapping;    /* for debugging... */
  45        unsigned long                   pg_index;
  46        void                            **pg_tree_slot;
  47        void                            *pg_private;
  48        struct semaphore                pg_sem; 
  49        uint64_t                        gpa;    /* physical address in guest */
  50
  51        bool                            pg_is_free;     /* TODO: will remove */
  52};
  53
  54/******** Externally visible global variables ************/
  55extern spinlock_t page_list_lock;
  56extern page_list_t page_free_list;
  57
  58/*************** Functional Interface *******************/
  59void base_arena_init(struct multiboot_info *mbi);
  60
  61error_t upage_alloc(struct proc *p, page_t **page, bool zero);
  62error_t kpage_alloc(page_t **page);
  63void *kpage_alloc_addr(void);
  64void *kpage_zalloc_addr(void);
  65
  66/* Direct allocation from the kpages arena (instead of kmalloc).  These will
  67 * give you PGSIZE quantum. */
  68void *kpages_alloc(size_t size, int flags);
  69void *kpages_zalloc(size_t size, int flags);
  70void kpages_free(void *addr, size_t size);
  71
  72void *get_cont_pages(size_t order, int flags);
  73void free_cont_pages(void *buf, size_t order);
  74
  75void page_decref(page_t *page);
  76
  77int page_is_free(size_t ppn);
  78void lock_page(struct page *page);
  79void unlock_page(struct page *page);
  80void print_pageinfo(struct page *page);
  81static inline bool page_is_pagemap(struct page *page);
  82
  83static inline bool page_is_pagemap(struct page *page)
  84{
  85        return atomic_read(&page->pg_flags) & PG_PAGEMAP ? true : false;
  86}
  87