akaros/kern/arch/x86/pmap_ops.h
<<
>>
Prefs
   1/* Copyright (c) 2015 Google Inc.
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * See LICENSE for details.
   4 *
   5 * Arch-specific operations for page tables and PTEs.
   6 *
   7 * Unfortunately, many of these ops are called from within a memwalk callback,
   8 * which expects a full pte.  But doing walks for a KPT and an EPT at the same
   9 * time is a pain, and for now we'll do the walks serially.  Because of that, a
  10 * given pte_t may have a KPTE and/or an EPTE.  Ideally, it'd be *and*. */
  11
  12#pragma once
  13
  14#include <arch/vmm/ept.h>
  15#include <arch/kpt.h>
  16
  17/* TODO: (EPT)  build a CONFIG mode where we assert the EPT agrees with the KPT
  18 * for all of the read ops */
  19
  20static inline bool pte_walk_okay(pte_t pte)
  21{
  22        return pte ? TRUE : FALSE;
  23}
  24
  25/* PTE states:
  26 * - present: the PTE is involved in a valid page table walk, can be used for
  27 *   some form of hardware access (read, write, user, etc), and with the
  28 *   physaddr part pointing to a physical page.
  29 *
  30 * - mapped: the PTE is involved in some sort of mapping, e.g. a VMR.  We're
  31 *   storing something in the PTE, but it is isn't necessarily present.
  32 *   Currently, all mapped pages should point to an actual physical page.  All
  33 *   present are mapped, but not vice versa.  Mapped pages can point to a real
  34 *   page, but with no access permissions, which is the main distinction between
  35 *   present and mapped.
  36 *
  37 * - paged_out: we don't actually use this yet.  Since mapped vs present is
  38 *   based on the PTE present bits, we'd need to use reserved bits in the PTE to
  39 *   differentiate between other states.  Right now, paged_out == mapped, as far
  40 *   as the code is concerned.
  41 *
  42 * - unmapped: completely unused. (0 value) */
  43static inline bool pte_is_present(pte_t pte)
  44{
  45        return kpte_is_present(pte);
  46}
  47
  48static inline bool pte_is_unmapped(pte_t pte)
  49{
  50        return kpte_is_unmapped(pte);
  51}
  52
  53static inline bool pte_is_mapped(pte_t pte)
  54{
  55        return kpte_is_mapped(pte);
  56}
  57
  58static inline bool pte_is_paged_out(pte_t pte)
  59{
  60        return kpte_is_paged_out(pte);
  61}
  62
  63static inline bool pte_is_dirty(pte_t pte)
  64{
  65        return kpte_is_dirty(pte) ||
  66               epte_is_dirty(kpte_to_epte(pte));
  67}
  68
  69static inline bool pte_is_accessed(pte_t pte)
  70{
  71        return kpte_is_accessed(pte) ||
  72               epte_is_accessed(kpte_to_epte(pte));
  73}
  74
  75/* Used in debugging code - want something better involving the walk */
  76static inline bool pte_is_jumbo(pte_t pte)
  77{
  78        return kpte_is_jumbo(pte);
  79}
  80
  81static inline physaddr_t pte_get_paddr(pte_t pte)
  82{
  83        return kpte_get_paddr(pte);
  84}
  85
  86/* Returns the PTE in an unsigned long, for debugging mostly. */
  87static inline unsigned long pte_print(pte_t pte)
  88{
  89        return kpte_print(pte);
  90}
  91
  92static inline void pte_write(pte_t pte, physaddr_t pa, int settings)
  93{
  94        kpte_write(pte, pa, settings);
  95        epte_write(kpte_to_epte(pte), pa, settings);
  96}
  97
  98static inline void pte_clear_present(pte_t pte)
  99{
 100        kpte_clear_present(pte);
 101        epte_clear_present(kpte_to_epte(pte));
 102}
 103
 104static inline void pte_clear_dirty(pte_t pte)
 105{
 106        kpte_clear_dirty(pte);
 107        epte_clear_dirty(kpte_to_epte(pte));
 108}
 109
 110static inline void pte_clear(pte_t pte)
 111{
 112        kpte_clear(pte);
 113        epte_clear(kpte_to_epte(pte));
 114}
 115
 116/* These are used by memcpy_*_user, but are very dangerous (and possibly used
 117 * incorrectly there).  These aren't the overall perms for a VA.  For U and W,
 118 * we need the intersection of the PTEs along the walk and not just the last
 119 * one.  It just so happens that the W is only cleared on the last PTE, so the
 120 * check works for that.  But if there was a page under ULIM that wasn't U due
 121 * to an intermediate PTE, we'd miss that. */
 122static inline bool pte_has_perm_ur(pte_t pte)
 123{
 124        return kpte_has_perm_ur(pte);
 125}
 126
 127static inline bool pte_has_perm_urw(pte_t pte)
 128{
 129        return kpte_has_perm_urw(pte);
 130}
 131
 132/* Settings includes protection (maskable via PTE_PROT) and other bits, such as
 133 * jumbo, dirty, accessed, etc.  Whatever this returns can get fed back to
 134 * pte_write.
 135 *
 136 * Arch-indep settings include: PTE_PERM (U, W, P, etc), PTE_D, PTE_A, PTE_PS.
 137 * Other OSs (x86) may include others. */
 138static inline int pte_get_settings(pte_t pte)
 139{
 140        return kpte_get_settings(pte);
 141}
 142
 143static inline void pte_replace_perm(pte_t pte, int perm)
 144{
 145        kpte_replace_perm(pte, perm);
 146        epte_replace_perm(kpte_to_epte(pte), perm);
 147}
 148