akaros/kern/src/page_alloc.c
<<
>>
Prefs
   1/* Copyright (c) 2009, 2010 The Regents of the University of California.
   2 * Copyright (c) 2016 Google Inc
   3 * See LICENSE for details.
   4 *
   5 * Barret Rhoden <brho@cs.berkeley.edu>
   6 * Kevin Klues <klueska@cs.berkeley.edu> */
   7
   8#include <page_alloc.h>
   9#include <pmap.h>
  10#include <kmalloc.h>
  11#include <arena.h>
  12
  13/* Helper, allocates a free page. */
  14static struct page *get_a_free_page(void)
  15{
  16        void *addr;
  17
  18        addr = kpages_alloc(PGSIZE, MEM_ATOMIC);
  19        if (!addr)
  20                return NULL;
  21        return kva2page(addr);
  22}
  23
  24/**
  25 * @brief Allocates a physical page from a pool of unused physical memory.
  26 *
  27 * Zeroes the page.
  28 *
  29 * @param[out] page  set to point to the Page struct
  30 *                   of the newly allocated page
  31 *
  32 * @return ESUCCESS on success
  33 * @return -ENOMEM  otherwise
  34 */
  35error_t upage_alloc(struct proc *p, page_t **page, bool zero)
  36{
  37        struct page *pg = get_a_free_page();
  38
  39        if (!pg)
  40                return -ENOMEM;
  41        *page = pg;
  42        if (zero)
  43                memset(page2kva(*page), 0, PGSIZE);
  44        return 0;
  45}
  46
  47error_t kpage_alloc(page_t **page)
  48{
  49        struct page *pg = get_a_free_page();
  50
  51        if (!pg)
  52                return -ENOMEM;
  53        *page = pg;
  54        return 0;
  55}
  56
  57/* Helper: allocates a refcounted page of memory for the kernel's use and
  58 * returns the kernel address (kernbase), or 0 on error. */
  59void *kpage_alloc_addr(void)
  60{
  61        struct page *pg = get_a_free_page();
  62
  63        if (!pg)
  64                return 0;
  65        return page2kva(pg);
  66}
  67
  68void *kpage_zalloc_addr(void)
  69{
  70        void *retval = kpage_alloc_addr();
  71        if (retval)
  72                memset(retval, 0, PGSIZE);
  73        return retval;
  74}
  75
  76/* Helper function for allocating from the kpages_arena.  This may be useful
  77 * later since we might send the caller to a different NUMA domain. */
  78void *kpages_alloc(size_t size, int flags)
  79{
  80        return arena_alloc(kpages_arena, size, flags);
  81}
  82
  83void *kpages_zalloc(size_t size, int flags)
  84{
  85        void *ret = arena_alloc(kpages_arena, size, flags);
  86
  87        if (!ret)
  88                return NULL;
  89        memset(ret, 0, size);
  90        return ret;
  91}
  92
  93void kpages_free(void *addr, size_t size)
  94{
  95        arena_free(kpages_arena, addr, size);
  96}
  97
  98/* Returns naturally aligned, contiguous pages of amount PGSIZE << order.  Linux
  99 * code might assume its allocations are aligned. (see dma_alloc_coherent and
 100 * bnx2x). */
 101void *get_cont_pages(size_t order, int flags)
 102{
 103        return arena_xalloc(kpages_arena, PGSIZE << order, PGSIZE << order,
 104                            0, 0, NULL, NULL, flags);
 105}
 106
 107void free_cont_pages(void *buf, size_t order)
 108{
 109        arena_xfree(kpages_arena, buf, PGSIZE << order);
 110}
 111
 112/* Frees the page */
 113void page_decref(page_t *page)
 114{
 115        assert(!page_is_pagemap(page));
 116        kpages_free(page2kva(page), PGSIZE);
 117}
 118
 119/* Attempts to get a lock on the page for IO operations.  If it is already
 120 * locked, it will block the kthread until it is unlocked.  Note that this is
 121 * really a "sleep on some event", not necessarily the IO, but it is "the page
 122 * is ready". */
 123void lock_page(struct page *page)
 124{
 125        /* when this returns, we have are the ones to have locked the page */
 126        sem_down(&page->pg_sem);
 127        assert(!(atomic_read(&page->pg_flags) & PG_LOCKED));
 128        atomic_or(&page->pg_flags, PG_LOCKED);
 129}
 130
 131/* Unlocks the page, and wakes up whoever is waiting on the lock */
 132void unlock_page(struct page *page)
 133{
 134        atomic_and(&page->pg_flags, ~PG_LOCKED);
 135        sem_up(&page->pg_sem);
 136}
 137
 138static void *__jumbo_pml2_alloc(struct arena *a, size_t size, int flags)
 139{
 140        return arena_xalloc(a, size, PML2_PTE_REACH, 0, 0, NULL, NULL, flags);
 141}
 142
 143static struct arena *jumbo_pml2_arena;
 144
 145/* Just for example; we could add qcaches too.  Do this after kmalloc_init(). */
 146void jumbo_arena_init(void)
 147{
 148        jumbo_pml2_arena = arena_create("jumbo_pml2", NULL, 0, PML2_PTE_REACH,
 149                                        __jumbo_pml2_alloc, arena_xfree,
 150                                        base_arena, 0, MEM_WAIT);
 151        assert(jumbo_pml2_arena);
 152}
 153
 154void *jumbo_page_alloc(size_t nr, int flags)
 155{
 156        return arena_alloc(jumbo_pml2_arena, nr * PML2_PTE_REACH, flags);
 157}
 158
 159void jumbo_page_free(void *buf, size_t nr)
 160{
 161        arena_free(jumbo_pml2_arena, buf, nr * PML2_PTE_REACH);
 162}
 163