akaros/kern/arch/x86/pmap.c
<<
>>
Prefs
   1/* Copyright (c) 2009 The Regents of the University of California
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * See LICENSE for details.
   4 *
   5 * Physical memory managment, common to 32 and 64 bit */
   6
   7#include <arch/x86.h>
   8#include <arch/arch.h>
   9#include <arch/mmu.h>
  10#include <arch/apic.h>
  11
  12#include <error.h>
  13#include <sys/queue.h>
  14
  15#include <atomic.h>
  16#include <string.h>
  17#include <assert.h>
  18#include <pmap.h>
  19#include <env.h>
  20#include <stdio.h>
  21#include <kmalloc.h>
  22#include <page_alloc.h>
  23
  24bool enable_pse(void)
  25{
  26        uint32_t edx, cr4;
  27
  28        cpuid(0x1, 0x0, 0, 0, 0, &edx);
  29        if (edx & CPUID_PSE_SUPPORT) {
  30                cr4 = rcr4();
  31                cr4 |= CR4_PSE;
  32                lcr4(cr4);
  33                return 1;
  34        } else
  35                return 0;
  36}
  37
  38#define PAT_UC                          0x00
  39#define PAT_WC                          0x01
  40#define PAT_WT                          0x04
  41#define PAT_WP                          0x05
  42#define PAT_WB                          0x06
  43#define PAT_UCm                         0x07
  44
  45static inline uint64_t mk_pat(int pat_idx, int type)
  46{
  47        return (uint64_t)type << (8 * pat_idx);
  48}
  49
  50static void pat_init(void)
  51{
  52        uint64_t pat = 0;
  53
  54        /* Default PAT at boot:
  55         *   0: WB, 1: WT, 2: UC-, 3: UC, 4: WB, 5: WT, 6: UC-, 7: UC
  56         *
  57         * We won't use PATs 4-7, but we'll at least enforce that they are set
  58         * up the way we think they are.  I'd like to avoid using the PAT flag,
  59         * since that is also the PTE_PS (jumbo) flag.  That means we can't use
  60         * __PTE_PAT on jumbo pages, and we'd need to be careful whenever using
  61         * any unorthodox types.  We're better off just not using it.
  62         *
  63         * We want WB, WT, WC, and either UC or UC- for our memory types.  (WT
  64         * is actually optional at this point).  We'll use UC- instead of UC,
  65         * since Linux uses that for their pgprot_noncached.  The UC- type is UC
  66         * with the ability to override to WC via MTRR.  We don't use the MTRRs
  67         * much yet, and hopefully won't.  The UC- will only matter if we do.
  68         *
  69         * No one should be using the __PTE_{PAT,PCD,PWT} bits directly, and
  70         * everyone should use things like PTE_NOCACHE. */
  71        pat |= mk_pat(0, PAT_WB);       /*           |           |           */
  72        pat |= mk_pat(1, PAT_WT);       /*           |           | __PTE_PWT */
  73        pat |= mk_pat(2, PAT_WC);       /*           | __PTE_PCD |           */
  74        pat |= mk_pat(3, PAT_UCm);      /*           | __PTE_PCD | __PTE_PWT */
  75        pat |= mk_pat(4, PAT_WB);       /* __PTE_PAT |           |           */
  76        pat |= mk_pat(5, PAT_WT);       /* __PTE_PAT |           | __PTE_PWT */
  77        pat |= mk_pat(6, PAT_UCm);      /* __PTE_PAT | __PTE_PCD |           */
  78        pat |= mk_pat(7, PAT_UC);       /* __PTE_PAT | __PTE_PCD | __PTE_PWT */
  79        write_msr(MSR_IA32_CR_PAT, pat);
  80}
  81
  82// could consider having an API to allow these to dynamically change
  83// MTRRs are for physical, static ranges.  PAT are linear, more granular, and
  84// more dynamic
  85void setup_default_mtrrs(barrier_t* smp_barrier)
  86{
  87        // disable interrupts
  88        int8_t state = 0;
  89        disable_irqsave(&state);
  90        // barrier - if we're meant to do this for all cores, we'll be
  91        // passed a pointer to an initialized barrier
  92        if (smp_barrier)
  93                waiton_barrier(smp_barrier);
  94
  95        // disable caching      cr0: set CD and clear NW
  96        lcr0((rcr0() | CR0_CD) & ~CR0_NW);
  97        // flush caches
  98        cache_flush();
  99        // flush tlb
 100        tlb_flush_global();
 101        // disable MTRRs, and sets default type to WB (06)
 102#ifndef CONFIG_NOMTRRS
 103        write_msr(IA32_MTRR_DEF_TYPE, 0x00000006);
 104
 105        // Now we can actually safely adjust the MTRRs
 106        // MTRR for IO Holes (note these are 64 bit values we are writing)
 107        // 0x000a0000 - 0x000c0000 : VGA - WC 0x01
 108        write_msr(IA32_MTRR_PHYSBASE0, PTE_ADDR(VGAPHYSMEM) | 0x01);
 109        // if we need to have a full 64bit val, use the UINT64 macro
 110        write_msr(IA32_MTRR_PHYSMASK0, 0x0000000ffffe0800);
 111        // 0x000c0000 - 0x00100000 : IO devices (and ROM BIOS) - UC 0x00
 112        write_msr(IA32_MTRR_PHYSBASE1, PTE_ADDR(DEVPHYSMEM) | 0x00);
 113        write_msr(IA32_MTRR_PHYSMASK1, 0x0000000ffffc0800);
 114        // APIC/IOAPIC holes
 115        /* Going to skip them, since we set their mode using PAT when we
 116         * map them in
 117         */
 118        // make sure all other MTRR ranges are disabled (should be unnecessary)
 119        write_msr(IA32_MTRR_PHYSMASK2, 0);
 120        write_msr(IA32_MTRR_PHYSMASK3, 0);
 121        write_msr(IA32_MTRR_PHYSMASK4, 0);
 122        write_msr(IA32_MTRR_PHYSMASK5, 0);
 123        write_msr(IA32_MTRR_PHYSMASK6, 0);
 124        write_msr(IA32_MTRR_PHYSMASK7, 0);
 125
 126        // keeps default type to WB (06), turns MTRRs on, and turns off fixed
 127        // ranges
 128        write_msr(IA32_MTRR_DEF_TYPE, 0x00000806);
 129#endif
 130        pat_init();
 131        // reflush caches and TLB
 132        cache_flush();
 133        tlb_flush_global();
 134        // turn on caching
 135        lcr0(rcr0() & ~(CR0_CD | CR0_NW));
 136        // barrier
 137        if (smp_barrier)
 138                waiton_barrier(smp_barrier);
 139        // enable interrupts
 140        enable_irqsave(&state);
 141}
 142
 143void invlpg(void *addr)
 144{
 145        asm volatile("invlpg (%0)" : : "r" (addr) : "memory");
 146        if (per_cpu_info[core_id()].vmx_enabled)
 147                ept_inval_addr((uintptr_t)addr);
 148}
 149
 150void tlbflush(void)
 151{
 152        unsigned long cr3;
 153        asm volatile("mov %%cr3,%0" : "=r" (cr3));
 154        asm volatile("mov %0,%%cr3" : : "r" (cr3));
 155        if (per_cpu_info[core_id()].vmx_enabled)
 156                ept_inval_context();
 157}
 158
 159/* Flushes a TLB, including global pages.  We should always have the CR4_PGE
 160 * flag set, but just in case, we'll check.  Toggling this bit flushes the TLB.
 161 */
 162void tlb_flush_global(void)
 163{
 164        uint32_t cr4 = rcr4();
 165        if (cr4 & CR4_PGE) {
 166                lcr4(cr4 & ~CR4_PGE);
 167                lcr4(cr4);
 168        } else {
 169                lcr3(rcr3());
 170        }
 171        if (per_cpu_info[core_id_early()].vmx_enabled)
 172                ept_inval_global();
 173}
 174