akaros/kern/arch/riscv/arch.h
<<
>>
Prefs
   1#pragma once
   2
   3#include <ros/arch/arch.h>
   4#include <arch/mmu.h>
   5#include <ros/common.h>
   6#include <arch/membar.h>
   7#include <arch/riscv.h>
   8#include <arch/time.h>
   9
  10/* Arch Constants */
  11#define ARCH_CL_SIZE 64
  12
  13void print_cpuinfo(void);
  14void show_mapping(pgdir_t pgdir, uintptr_t start, size_t size);
  15void backtrace(void);
  16
  17static __inline void breakpoint(void)
  18{
  19        asm volatile ("break");
  20}
  21
  22static __inline void tlbflush(void)
  23{
  24        lcr3(rcr3());
  25}
  26
  27static __inline void invlpg(void *addr)
  28{
  29        tlbflush();
  30}
  31
  32static __inline void icache_flush_page(void* va, void* kva)
  33{
  34        asm volatile ("fence.i");
  35}
  36
  37static __inline uint64_t read_tsc(void)
  38{
  39        unsigned long t;
  40        asm volatile ("rdtime %0" : "=r"(t));
  41        return t;
  42}
  43
  44/* Continuing the poor tradition of x86 opcode functions... */
  45static __inline uint64_t read_tscp(void)
  46{
  47        return read_tsc();
  48}
  49
  50static __inline uint64_t read_tsc_serialized(void)
  51{
  52        mb();
  53        return read_tsc();
  54}
  55
  56static __inline uintptr_t enable_fp(void)
  57{
  58        return setpcr(PCR_SR, SR_EF);
  59}
  60
  61static __inline uintptr_t enable_irq(void)
  62{
  63        return setpcr(PCR_SR, SR_ET);
  64}
  65
  66static __inline uintptr_t disable_irq(void)
  67{
  68        return clearpcr(PCR_SR, SR_ET);
  69}
  70
  71static __inline void restore_irq(uintptr_t val)
  72{
  73        mtpcr(PCR_SR, val);
  74}
  75
  76static __inline int irq_is_enabled(void)
  77{
  78        return mfpcr(PCR_SR) & SR_ET;
  79}
  80
  81static __inline void enable_irqsave(int8_t* state)
  82{
  83        // *state tracks the number of nested enables and disables
  84        // initial value of state: 0 = first run / no favorite
  85        // > 0 means more enabled calls have been made
  86        // < 0 means more disabled calls have been made
  87        // Mostly doing this so we can call disable_irqsave first if we want
  88
  89        // one side or another "gets a point" if interrupts were already the way
  90        // it wanted to go.  o/w, state stays at 0.  if the state was not 0
  91        // then, enabling/disabling isn't even an option.  just
  92        // increment/decrement
  93
  94        // if enabling is winning or tied, make sure it's enabled
  95        if ((*state == 0) && !irq_is_enabled())
  96                enable_irq();
  97        else
  98                (*state)++;
  99}
 100
 101static __inline void disable_irqsave(int8_t* state)
 102{
 103        if ((*state == 0) && irq_is_enabled())
 104                disable_irq();
 105        else
 106                (*state)--;
 107}
 108
 109static __inline void cpu_relax(void)
 110{
 111        // compute and use 0/0, which stalls Rocket for dozens of cycles
 112        long scratch;
 113        asm volatile ("div %0, zero, zero; move %0, %0" : "=r"(scratch));
 114}
 115
 116static __inline void clflush(uintptr_t* addr)
 117{
 118}
 119
 120/* os_coreid -> hw_coreid */
 121static __inline int get_hw_coreid(int coreid)
 122{
 123        return coreid;
 124}
 125
 126static __inline int hw_core_id(void)
 127{
 128        return mfpcr(PCR_COREID);
 129}
 130
 131/* hw_coreid -> os_coreid */
 132static __inline int get_os_coreid(int hw_coreid)
 133{
 134        return hw_coreid;
 135}
 136
 137/* core_id() returns the OS core number, not to be confused with the
 138 * hardware-specific core identifier (such as the lapic id) returned by
 139 * hw_core_id() */
 140static __inline int core_id(void)
 141{
 142        return get_os_coreid(hw_core_id());
 143}
 144
 145static __inline int core_id_early(void)
 146{
 147        return core_id();
 148}
 149
 150static __inline void cache_flush(void)
 151{
 152}
 153
 154static __inline void reboot(void)
 155{
 156        extern void cputchar(int ch);
 157        cputchar(0);
 158}
 159
 160extern void cpu_halt(void);
 161
 162struct preempt_data;
 163static inline void cpu_halt_notif_pending(struct preempt_data *vcpd)
 164{
 165        cpu_halt();
 166}
 167
 168static inline void prefetch(void *addr)
 169{
 170}
 171
 172static inline void prefetchw(void *addr)
 173{
 174}
 175
 176/* Resets a stack pointer to sp, then calls f(arg) */
 177static inline void __attribute__((noreturn))
 178__reset_stack_pointer(void *arg, uintptr_t sp, void (*f)(void *))
 179{
 180        #error "implement me"
 181}
 182