Initial commit of RISC-V architecture port
authorAndrew Waterman <waterman@s141.Millennium.Berkeley.EDU>
Mon, 23 May 2011 09:19:57 +0000 (02:19 -0700)
committerKevin Klues <klueska@cs.berkeley.edu>
Thu, 3 Nov 2011 00:36:02 +0000 (17:36 -0700)
It builds but doesn't link and is missing key functionality, like
env/process/pmap stuff.

31 files changed:
kern/arch/riscv/Makefrag [new file with mode: 0644]
kern/arch/riscv/arch.h [new file with mode: 0644]
kern/arch/riscv/atomic.c [new file with mode: 0644]
kern/arch/riscv/atomic.h [new file with mode: 0644]
kern/arch/riscv/bitmask.h [new file with mode: 0644]
kern/arch/riscv/boot/Makefrag [new file with mode: 0644]
kern/arch/riscv/boot/boot [new symlink]
kern/arch/riscv/boot/boot.S [new file with mode: 0644]
kern/arch/riscv/boot/main.c [new file with mode: 0644]
kern/arch/riscv/colored_page_alloc.h [new file with mode: 0644]
kern/arch/riscv/console.c [new file with mode: 0644]
kern/arch/riscv/console.h [new file with mode: 0644]
kern/arch/riscv/cpuinfo.c [new file with mode: 0644]
kern/arch/riscv/endian.h [new file with mode: 0644]
kern/arch/riscv/entry.S [new file with mode: 0644]
kern/arch/riscv/i686 [new symlink]
kern/arch/riscv/init.h [new file with mode: 0644]
kern/arch/riscv/kernel.ld [new file with mode: 0644]
kern/arch/riscv/mmu.h [new file with mode: 0644]
kern/arch/riscv/pcr.h [new file with mode: 0644]
kern/arch/riscv/riscv.h [new file with mode: 0644]
kern/arch/riscv/ros/arch.h [new file with mode: 0644]
kern/arch/riscv/ros/membar.h [new file with mode: 0644]
kern/arch/riscv/ros/mmu.h [new file with mode: 0644]
kern/arch/riscv/ros/syscall.h [new file with mode: 0644]
kern/arch/riscv/ros/trapframe.h [new file with mode: 0644]
kern/arch/riscv/smp.h [new file with mode: 0644]
kern/arch/riscv/timer.c [new file with mode: 0644]
kern/arch/riscv/timer.h [new file with mode: 0644]
kern/arch/riscv/trap.h [new file with mode: 0644]
kern/arch/riscv/types.h [new file with mode: 0644]

diff --git a/kern/arch/riscv/Makefrag b/kern/arch/riscv/Makefrag
new file mode 100644 (file)
index 0000000..9b636a9
--- /dev/null
@@ -0,0 +1,16 @@
+# Makefile fragment for ROS kernel.
+# This is NOT a complete makefile;
+# you must run GNU make in the top-level directory
+# where the GNUmakefile is located.
+#
+
+KERN_ARCH_SRC_DIR = $(KERN_DIR)/arch/$(TARGET_ARCH)
+OBJDIRS += $(KERN_ARCH_SRC_DIR)
+
+# entry.S must be first, so that it's the first code in the text segment!!!
+#
+# We also snatch the use of a couple handy source files
+# from the lib directory, to avoid gratuitous code duplication.
+KERN_ARCH_SRCFILES := $(KERN_ARCH_SRC_DIR)/entry.S \
+                      $(KERN_ARCH_SRC_DIR)/cpuinfo.c \
+                      $(KERN_ARCH_SRC_DIR)/console.c \
diff --git a/kern/arch/riscv/arch.h b/kern/arch/riscv/arch.h
new file mode 100644 (file)
index 0000000..3f0f721
--- /dev/null
@@ -0,0 +1,168 @@
+#ifndef ROS_INC_ARCH_H
+#define ROS_INC_ARCH_H
+
+#include <ros/arch/arch.h>
+#include <ros/common.h>
+#include <ros/arch/membar.h>
+#include <arch/riscv.h>
+#include <arch/trap.h>
+#include <arch/timer.h>
+
+/* Arch Constants */
+#define HW_CACHE_ALIGN 64
+#define IOAPIC_BASE    0xFFFFFFFF80000000 // upper 2GB reserved (see mmu_init)
+
+void print_cpuinfo(void);
+void show_mapping(uintptr_t start, size_t size);
+void backtrace(void);
+
+static __inline void
+breakpoint(void)
+{
+       asm volatile ("break");
+}
+
+static __inline void
+tlbflush(void)
+{
+       lcr3(rcr3());
+}
+
+static __inline void 
+invlpg(void *addr)
+{ 
+       tlbflush();
+}
+
+static __inline void
+icache_flush_page(void* va, void* kva)
+{
+       asm volatile ("fence.i");
+}
+
+static __inline uint64_t
+read_tsc(void)
+{
+       unsigned long t;
+       asm volatile ("rdtime %0" : "=r"(t));
+       return t;
+}
+
+static __inline uint64_t 
+read_tsc_serialized(void)
+{
+       uint64_t tsc;
+  mb();
+       tsc = read_tsc();
+       mb();
+       return tsc;
+}
+
+static __inline void
+enable_irq(void)
+{
+  asm volatile("ei");
+}
+
+static __inline void
+disable_irq(void)
+{
+  asm volatile("di");
+}
+
+static __inline int
+irq_is_enabled(void)
+{
+  return mfpcr(PCR_SR) & SR_ET;
+}
+
+static __inline void
+enable_irqsave(int8_t* state)
+{
+       // *state tracks the number of nested enables and disables
+       // initial value of state: 0 = first run / no favorite
+       // > 0 means more enabled calls have been made
+       // < 0 means more disabled calls have been made
+       // Mostly doing this so we can call disable_irqsave first if we want
+
+       // one side or another "gets a point" if interrupts were already the
+       // way it wanted to go.  o/w, state stays at 0.  if the state was not 0
+       // then, enabling/disabling isn't even an option.  just increment/decrement
+
+       // if enabling is winning or tied, make sure it's enabled
+       if ((*state == 0) && !irq_is_enabled())
+               enable_irq();
+       else
+               (*state)++;
+}
+
+static __inline void
+disable_irqsave(int8_t* state)
+{
+       if ((*state == 0) && irq_is_enabled())
+               disable_irq();
+       else 
+               (*state)--;
+}
+
+static __inline void
+cpu_relax(void)
+{
+  for(int i = 0; i < 100; i++)
+         asm ("nop");
+}
+
+static __inline void
+cpu_halt(void)
+{
+  while(1);
+}
+
+static __inline void
+clflush(uintptr_t* addr)
+{
+}
+
+/* os_coreid -> hw_coreid */
+static __inline int
+get_hw_coreid(int coreid)
+{
+  return coreid;
+}
+
+static __inline int
+hw_core_id(void)
+{
+  return 0;
+}
+
+/* hw_coreid -> os_coreid */
+static __inline int
+get_os_coreid(int hw_coreid)
+{
+       return hw_coreid;
+}
+
+/* core_id() returns the OS core number, not to be confused with the
+ * hardware-specific core identifier (such as the lapic id) returned by
+ * hw_core_id() */
+static __inline int
+core_id(void)
+{
+       return get_os_coreid(hw_core_id());
+}
+
+static __inline void
+cache_flush(void)
+{
+}
+
+static __inline void
+reboot(void)
+{
+  extern void fesvr_die();
+       fesvr_die();
+       while(1);
+}
+
+#endif /* !ROS_INC_ARCH_H */
diff --git a/kern/arch/riscv/atomic.c b/kern/arch/riscv/atomic.c
new file mode 100644 (file)
index 0000000..66a5fd6
--- /dev/null
@@ -0,0 +1,24 @@
+#include <atomic.h>
+
+// This emulates compare and swap by hashing the address into one of
+// K buckets, acquiring the lock for that bucket, then performing the
+// operation during the critical section.  :-(
+bool atomic_comp_swap(uintptr_t *addr, uintptr_t exp_val, uintptr_t new_val)
+{
+       if (*addr != exp_val)
+               return 0;
+       
+  #define K 17
+       static spinlock_t cas_locks[K*HW_CACHE_ALIGN/sizeof(spinlock_t)];
+
+  uintptr_t bucket = (uintptr_t)addr / sizeof(uintptr_t) % K;
+       spinlock_t* lock = &cas_lock[bucket*HW_CACHE_ALIGN/sizeof(spinlock_t)];
+       
+       spin_lock_irqsave(&lock);
+       if (*addr == exp_val) {
+               atomic_swap(addr, new_val);
+               retval = 1;
+       }
+       spin_unlock_irqsave(&lock);
+       return retval;
+}
diff --git a/kern/arch/riscv/atomic.h b/kern/arch/riscv/atomic.h
new file mode 100644 (file)
index 0000000..bbfee4b
--- /dev/null
@@ -0,0 +1,133 @@
+#ifndef ROS_INCLUDE_ATOMIC_H
+#define ROS_INCLUDE_ATOMIC_H
+
+#include <ros/common.h>
+#include <arch/arch.h>
+
+typedef void* atomic_t;
+struct spinlock {
+       volatile uint32_t rlock;
+};
+typedef struct spinlock spinlock_t;
+#define SPINLOCK_INITIALIZER {0}
+
+bool atomic_comp_swap(uintptr_t* addr, uintptr_t exp_val, uintptr_t new_val);
+
+static inline void atomic_init(atomic_t* number, uintptr_t val)
+{
+  *(uintptr_t*)number = val;
+}
+
+static inline uintptr_t atomic_read(atomic_t* number)
+{
+  return *(uintptr_t*)number;
+}
+
+static inline void atomic_set(atomic_t* number, uintptr_t val)
+{
+  *(uintptr_t*)number = val;
+}
+
+/* Adds val to number, returning number's original value */
+static inline uintptr_t atomic_fetch_and_add(atomic_t* number, uintptr_t val)
+{
+       return __sync_fetch_and_add((uintptr_t*)number, val);
+}
+
+static inline void atomic_add(atomic_t* number, uintptr_t val)
+{
+       atomic_fetch_and_add(number, val);
+}
+
+static inline void atomic_inc(atomic_t* number)
+{
+       atomic_add(number, 1);
+}
+
+static inline void atomic_dec(atomic_t* number)
+{
+       atomic_add(number, -1);
+}
+
+/* Adds val to number, so long as number was not zero. Returns TRUE if the
+ * operation succeeded (added, not zero), returns FALSE if number is zero. */
+static inline bool atomic_add_not_zero(atomic_t* number, uintptr_t val)
+{
+       uintptr_t old_num, new_num;
+       do {
+               old_num = atomic_read(number);
+               if (!old_num)
+                       return FALSE;
+               new_num = old_num + val;
+       } while (!atomic_comp_swap((uintptr_t*)number, old_num, new_num));
+       return TRUE;
+}
+
+/* Subtraces val from number, returning True if the new value is 0. */
+static inline bool atomic_sub_and_test(atomic_t* number, uintptr_t val)
+{
+       return __sync_fetch_and_sub((uintptr_t*)number, val) == val;
+}
+
+static inline void atomic_or(atomic_t* number, uintptr_t mask)
+{
+       __sync_fetch_and_or(number, mask);
+}
+
+static inline uintptr_t atomic_swap(uintptr_t* addr, uintptr_t val)
+{
+       return __sync_lock_test_and_set(addr, val); // yes, really
+}
+
+// RISC-V has atomic word ops, not byte ops, so we must manipulate addresses
+static inline void atomic_andb(volatile uint8_t* number, uint8_t mask)
+{
+       uintptr_t offset = (uintptr_t)number & 3;
+       uint32_t wmask = (1<<(8*offset+8)) - (1<<(8*offset));
+       wmask = ~wmask | ((uint32_t)mask << (8*offset));
+
+       __sync_fetch_and_and((uint32_t*)((uintptr_t)number & ~3), wmask);
+}
+
+static inline void atomic_orb(volatile uint8_t* number, uint8_t mask)
+{
+       uintptr_t offset = (uintptr_t)number & 3;
+       uint32_t wmask = (uint32_t)mask << (8*offset);
+
+       __sync_fetch_and_or((uint32_t*)((uintptr_t)number & ~3), wmask);
+}
+
+static inline uint32_t spin_locked(spinlock_t* lock)
+{
+       return lock->rlock;
+}
+
+static inline void __spin_lock(volatile uint32_t* rlock)
+{
+       while(__sync_fetch_and_or(rlock, 1))
+               while(*rlock);
+}
+
+static inline void spin_lock(spinlock_t *lock)
+{
+       __spin_lock(&lock->rlock);
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+       /* Need to prevent the compiler (and some arches) from reordering older
+        * stores */
+       wmb();
+       lock->rlock = 0;
+}
+
+static inline void spinlock_init(spinlock_t *lock)
+{
+       lock->rlock = 0;
+}
+
+static inline void spinlock_debug(spinlock_t* lock)
+{
+}
+
+#endif /* !ROS_INCLUDE_ATOMIC_H */
diff --git a/kern/arch/riscv/bitmask.h b/kern/arch/riscv/bitmask.h
new file mode 100644 (file)
index 0000000..abb8a94
--- /dev/null
@@ -0,0 +1,113 @@
+#ifndef ROS_ARCH_BITMASK_H
+#define ROS_ARCH_BITMASK_H
+
+#ifndef __IVY__
+#include <ros/noivy.h>
+#endif
+
+#include <string.h>
+#include <sys/types.h>
+#include <atomic.h>
+#include <stdio.h>
+
+#define DECL_BITMASK(name, size) \
+       uint8_t (name)[BYTES_FOR_BITMASK((size))]
+
+#define BYTES_FOR_BITMASK(size) \
+       (((size) - 1) / 8 + 1)
+
+#define BYTES_FOR_BITMASK_WITH_CHECK(size) \
+       ((size) ? ((size) - (1)) / (8) + (1) : (0))
+
+static bool GET_BITMASK_BIT(uint8_t* name, size_t bit) 
+{
+       return (((name)[(bit)/8] & (1 << ((bit) % 8))) ? 1 : 0);
+}
+
+#define SET_BITMASK_BIT(name, bit) \
+       ((name)[(bit)/8] |= (1 << ((bit) % 8)));
+/*
+static void SET_BITMASK_BIT(uint8_t* name, size_t bit)
+{
+       ((name)[(bit)/8] |= (1 << ((bit) % 8)));
+}
+*/
+
+#define CLR_BITMASK_BIT(name, bit) \
+       ((name)[(bit)/8] &= ~(1 << ((bit) % 8)));
+/*
+static void CLR_BITMASK_BIT(uint8_t* name, size_t bit) 
+{
+       ((name)[(bit)/8] &= ~(1 << ((bit) % 8)));
+}
+*/
+
+static void SET_BITMASK_BIT_ATOMIC(uint8_t* name, size_t bit) 
+{
+       (atomic_orb(&(name)[(bit)/8], (1 << ((bit) % 8))));
+}
+
+#define CLR_BITMASK_BIT_ATOMIC(name, bit) \
+       (atomic_andb(&(name)[(bit)/8], ~(1 << ((bit) % 8))))
+
+#define CLR_BITMASK(name, size) \
+({ \
+       {TRUSTEDBLOCK \
+       memset((void*)((uintptr_t)(name)), 0, BYTES_FOR_BITMASK((size))); \
+       } \
+})
+
+#define FILL_BITMASK(name, size) \
+({ \
+       {TRUSTEDBLOCK \
+       memset((void*)((uintptr_t)(name)), 255, BYTES_FOR_BITMASK((size))); \
+       } \
+       (name)[BYTES_FOR_BITMASK((size))-1] >>= (((size) % 8) ? (8 - ((size) % 8)) : 0 ); \
+}) 
+
+#define COPY_BITMASK(newmask, oldmask, size) \
+({ \
+       {TRUSTEDBLOCK \
+       memcpy((void*)((uintptr_t)(newmask)), \
+           (void*)((uintptr_t)(oldmask)), \
+           BYTES_FOR_BITMASK((size))); \
+       } \
+})
+
+// this checks the entire last byte, so keep it 0 in the other macros
+#define BITMASK_IS_CLEAR(name, size) ({ \
+       uint32_t __n = BYTES_FOR_BITMASK((size)); \
+       bool clear = 1; \
+       while (__n-- > 0) { \
+               if ((name)[__n]) { \
+                       clear = 0; \
+                       break;\
+               }\
+       } \
+       clear; })
+
+static inline bool BITMASK_IS_FULL(uint8_t* map, size_t size)
+{
+       int _size = size;
+       for (int i = 0; i < BYTES_FOR_BITMASK(size); i++) {
+               for (int j = 0; j < MIN(8,_size); j++)
+                       if(!((map[i] >> j) &1))
+                               return FALSE;
+                       _size--;
+       }
+       return TRUE;
+}
+
+#define PRINT_BITMASK(name, size) { \
+       int i;  \
+       int _size = size; \
+       for (i = 0; i < BYTES_FOR_BITMASK(size); i++) { \
+               int j;  \
+               for (j = 0; j < MIN(8,_size); j++) \
+                       printk("%x", ((name)[i] >> j) & 1);     \
+                       _size--; \
+       } \
+       printk("\n"); \
+}
+
+#endif /* ROS_ARCH_BITMASK_H */
diff --git a/kern/arch/riscv/boot/Makefrag b/kern/arch/riscv/boot/Makefrag
new file mode 100644 (file)
index 0000000..648bb7a
--- /dev/null
@@ -0,0 +1,31 @@
+#\r
+# Makefile fragment for the ROS kernel.\r
+# This is NOT a complete makefile;\r
+# you must run GNU make in the top-level directory\r
+# where the GNUmakefile is located.\r
+#\r
+\r
+KERN_BOOT_DIR := $(KERN_DIR)/boot\r
+OBJDIRS += $(KERN_BOOT_DIR)\r
+\r
+KERN_BOOT_CFLAGS  += $(KERN_CFLAGS) -Os\r
+KERN_BOOT_LDFLAGS := $(KERN_LDFLAGS)\r
+KERN_BOOT_OBJS    := $(OBJDIR)/$(KERN_BOOT_DIR)/boot.o \\r
+                     $(OBJDIR)/$(KERN_BOOT_DIR)/main.o\r
+\r
+$(OBJDIR)/$(KERN_BOOT_DIR)/%.o: $(KERN_BOOT_DIR)/%.c\r
+       @echo + cc [BOOT] $<\r
+       @mkdir -p $(@D)\r
+       $(V)$(CC) $(KERN_BOOT_CFLAGS) -c -o $@ $<\r
+\r
+$(OBJDIR)/$(KERN_BOOT_DIR)/%.o: $(KERN_BOOT_DIR)/%.S\r
+       @echo + as [BOOT] $<\r
+       @mkdir -p $(@D)\r
+       $(V)$(CC) $(KERN_BOOT_CFLAGS) -c -o $@ $<\r
+\r
+$(OBJDIR)/$(KERN_DIR)/boot: $(KERN_BOOT_OBJS)\r
+       @echo + ld [BOOT] $<\r
+       $(V)$(LD) $(KERN_BOOT_LDFLAGS) -o $@.out $^\r
+       $(V)$(OBJDUMP) -S $@.out >$@.asm\r
+       $(V)$(OBJCOPY) -S -O binary $@.out $@\r
+       $(V)perl $(KERN_BOOT_DIR)/sign.pl $(OBJDIR)/$(KERN_DIR)/boot\r
diff --git a/kern/arch/riscv/boot/boot b/kern/arch/riscv/boot/boot
new file mode 120000 (symlink)
index 0000000..cf41078
--- /dev/null
@@ -0,0 +1 @@
+arch/i686/boot
\ No newline at end of file
diff --git a/kern/arch/riscv/boot/boot.S b/kern/arch/riscv/boot/boot.S
new file mode 100644 (file)
index 0000000..04200f6
--- /dev/null
@@ -0,0 +1,68 @@
+/* See COPYRIGHT for copyright information. */
+
+#include <arch/arch.h>
+#include <ros/memlayout.h>
+
+///////////////////////////////////////////////////////////////////
+// The kernel (this code) is linked at address (KERNBASE + 0x00000000),
+// but we tell the bootloader to load it at physical address 
+// 0x00000000, which is the start of extended memory.
+// (See kernel.ld)
+///////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////
+// RELOC(x) maps a symbol x from its link address to its actual
+// location in physical memory (its load address).
+///////////////////////////////////////////////////////////////////
+#define RELOC(x) ((x) - KERNBASE)
+
+///////////////////////////////////////////////////////////////////
+// entry point
+///////////////////////////////////////////////////////////////////
+
+.text
+
+.global _start
+.ent    _start
+_start:
+  // This is the first kernel code that executes; it is run only by core 0.
+
+  // set up stack and terminate frame pointer for backtracing
+  li     $fp, 0
+  la     $sp, RELOC(bootstacktop)
+
+  // set up trap entry point
+  la     $t0, RELOC(trap_entry)
+  mtpcr  $t0, ASM_CR(PCR_EVEC)
+
+  // enable traps
+  li     $t0, SR_S | SR_ET | SR_SX
+  mtpcr  $t0, ASM_CR(PCR_SR)
+
+       // turn on paging (no RELOC() since jal is pc-relative)
+       jal    mmu_init
+
+  // relocate stack and call into C code using absolute jump, not pc-relative
+  la     $sp, bootstacktop
+       lui    $t0, %hi(cmain)
+       jalr.j $t0, %lo(cmain)
+
+.end    _start
+
+///////////////////////////////////////////////////////////////////
+// boot stack and regular stacks.
+// (boot stack cannot be in .bss, as .bss is later zereoed by the kernel.)
+///////////////////////////////////////////////////////////////////
+
+.data
+  .align  PGSIZE
+  .space  KSTKSIZE
+  .global bootstacktop
+bootstacktop:
+
+.bss
+  .align  PGSIZE
+  .global percore_stacks
+percore_stacks:
+  .space  KSTKSIZE*MAX_NUM_CPUS
diff --git a/kern/arch/riscv/boot/main.c b/kern/arch/riscv/boot/main.c
new file mode 100644 (file)
index 0000000..ab47b20
--- /dev/null
@@ -0,0 +1,105 @@
+#include <multiboot.h>
+#include <ros/memlayout.h>
+
+static void
+build_multiboot_info(multiboot_info_t* mbi)
+{
+       long memsize_kb = mfpcr(PCR_MEMSIZE)*(PGSIZE/1024);
+       long basemem_kb = EXTPHYSMEM/1024;
+
+       memset(mbi, 0, sizeof(mbi));
+
+       mbi->flags = 0x00000001;
+       mbi->mem_lower = basemem_kb;
+       mbi->mem_upper = memsize_kb - basemem_kb;
+}
+
+#ifdef __riscv64
+#define NL3PT ((KERNSIZE+L2PGSIZE-1)/L2PGSIZE)
+static pte_t l1pt[NL1ENTRIES], l2pt[NL2ENTRIES], l3pts[NL3PT][NL3ENTRIES]
+      __attribute__((section("data"))) __attribute__((aligned(PGSIZE)));
+#else
+static pte_t l1pt[NL1ENTRIES];
+#endif
+
+void
+mmu_init()
+{
+       pte_t* l1pt_phys = (pte_t*)((uint8_t*)l1pt - KERNBASE);
+       pte_t* l2pt_phys = (pte_t*)((uint8_t*)l2pt - KERNBASE);
+
+       // Retain the identity mapping [0,KERNSIZE]
+       for(uintptr_t i = 0; i < (KERNSIZE+L1PGSIZE-1)/L1PGSIZE; i++)
+               l1pt_phys[i] = PTE(LA2PPN(i*L1PGSIZE), PTE_KERN_RW|PTE_E);
+
+       #ifdef __riscv64
+       // for rv64, we need to create an L1 and an L2 PT, and many L3 PTs.
+
+       // kernel can be mapped by a single L1 page
+       static_assert(KERNSIZE <= L1PGSIZE);
+       static_assert(KERNBASE % L3PGSIZE == 0);
+
+       // highest L1 page contains KERNBASE mapping
+       uintptr_t l1x = L1X(KERNBASE);
+       l1pt_phys[l1x] = PTD(l2pt);
+
+       for(uintptr_t i = 0; i < NL3PT; i++)
+       {
+               uintptr_t l2x = L2X(KERNBASE + i*L2PGSIZE);
+               l2pt_phys[l2x] = PTD(l3pts[l2x]);
+               for(uintptr_t l3x = 0; l3x < NPTENTRIES; l3x++)
+               {
+                       uintptr_t addr = PGADDR(l1x, l2x, l3x, 0, 0);
+                       if(addr >= KERNBASE)
+                               l3pts[l2x][l3x] = PTE(LA2PPN(addr), PTE_KERN_RW | PTE_E);
+               }
+       }
+
+       // KERNBASE mapping
+       l1pt_phys[NPTENTRIES-1] = PTD(l2pt_phys);
+       for(uintptr_t i = 0; i < (KERNSIZE+L2PGSIZE-1)/L2PGSIZE; i++)
+               l2pt_phys[i] = PTD(l1pt_phys + i*NPTENTRIES);
+       
+  // Map the upper 2GB (0xFFFFFFFF80000000 and up) to alias the KERNBASE
+       // mapping.  We'll use this region to reference static/global variables
+       // more efficiently with a LUI/ADD pair, which can only reach addresses
+       // 0x00000000->0x7FFFF7FF and 0xFFFFFFFF80000000->0xFFFFFFFFFFFFF7FF.
+       // The alternative requires an 8-instruction sequence in the general case.
+  uintptr_t start = 0xFFFFFFFF80000000;
+       static_assert(start % L2PGSIZE == 0);
+       for(uintptr_t i = 0; i < ((uintptr_t)-start)/L2PGSIZE; i++)
+         l2pt[i+start/L2PGSIZE] = PTE(LA2PPN(i*L2PGSIZE), PTE_KERN_RW|PTE_E);
+       #else
+       // for rv32, just create the L1 page table.
+       static_assert(KERNBASE % L1PGSIZE == 0);
+
+       // KERNBASE mapping
+       for(uintptr_t i = 0; i < KERNSIZE/L1PGSIZE; i++)
+               l1pt_phys[i+KERNBASE/L1PGSIZE] = PTE(LA2PPN(i*L1PGSIZE), PTE_KERN_RW|PTE_E);
+       #endif
+
+       lcr3(l1pt_phys);
+       mtpcr(PCR_SR, mfpcr(PCR_SR) | SR_VM);
+}
+
+static void
+mmu_init_cleanup()
+{
+       // after relocation, we no longer rely on the identity mapping
+       for(uintptr_t i = 0; i < (KERNSIZE+L1PGSIZE-1)/L1PGSIZE; i++)
+               l1pt_phys[i] = 0;
+       tlbflush();
+}
+
+void
+cmain()
+{
+       mmu_init_cleanup();
+
+       multiboot_info_t mbi;
+       build_multiboot_info(&mbi);
+
+       extern void kernel_init(multiboot_info_t *mboot_info);
+       // kernel_init expects a pre-relocation mbi address
+       kernel_init((multiboot_info_t*)((uint8_t*)&mbi - KERNBASE));
+}
diff --git a/kern/arch/riscv/colored_page_alloc.h b/kern/arch/riscv/colored_page_alloc.h
new file mode 100644 (file)
index 0000000..4aa5813
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2009 The Regents of the University  of California.  
+ * See the COPYRIGHT files at the top of this source tree for full 
+ * license information.
+ */
+/**
+ * @author Kevin Klues <klueska@cs.berkeley.edu>
+ */
+#ifndef ROS_KERN_ARCH_COLORED_PAGE_ALLOC_H
+#define ROS_KERN_ARCH_COLORED_PAGE_ALLOC_H
+
+/********** Page Coloring Related Macros ************/
+// Define these to make sure that each level of the cache
+// is initialized and managed properly
+#define DECLARE_CACHE_COLORED_PAGE_LINKS()                    \
+       DECLARE_CACHE_COLORED_PAGE_LINK(l1)                       \
+       DECLARE_CACHE_COLORED_PAGE_LINK(l2)                       \
+       DECLARE_CACHE_COLORED_PAGE_LINK(l3)
+
+#define DECLARE_CACHE_COLORED_PAGE_FREE_LISTS()               \
+       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l1)                  \
+       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l2)                  \
+       DECLARE_CACHE_COLORED_PAGE_FREE_LIST(l3)
+       
+#define DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LISTS()        \
+       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l1)           \
+       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l2)           \
+       DECLARE_EXTERN_CACHE_COLORED_PAGE_FREE_LIST(l3)
+       
+#define DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTIONS()          \
+       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l1)             \
+       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l2)             \
+       DECLARE_CACHE_COLORED_PAGE_ALLOC_FUNCTION(l3)
+
+#define INIT_CACHE_COLORED_PAGE_FREE_LISTS()                  \
+       INIT_CACHE_COLORED_PAGE_FREE_LIST(l1)                     \
+       INIT_CACHE_COLORED_PAGE_FREE_LIST(l2)                     \
+       INIT_CACHE_COLORED_PAGE_FREE_LIST(l3)
+
+#define REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LISTS(page)      \
+       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l1)       \
+       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l2)       \
+       REMOVE_CACHE_COLORING_PAGE_FROM_FREE_LIST(page, l3)
+       
+#define INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LISTS(page)      \
+       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l1)       \
+       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l2)       \
+       INSERT_CACHE_COLORING_PAGE_ONTO_FREE_LIST(page, l3)
+
+#endif // CACHE_COLORING_PAGE_ALLOC_H
diff --git a/kern/arch/riscv/console.c b/kern/arch/riscv/console.c
new file mode 100644 (file)
index 0000000..4dfc2cb
--- /dev/null
@@ -0,0 +1,99 @@
+#include <arch/console.h>
+#include <pmap.h>
+#include <atomic.h>
+
+long
+fesvr_syscall(long n, long a0, long a1, long a2, long a3)
+{
+  static volatile uint64_t magic_mem[8];
+
+  static spinlock_t lock = SPINLOCK_INITIALIZER;
+  spin_lock_irqsave(&lock);
+
+  magic_mem[0] = n;
+  magic_mem[1] = a0;
+  magic_mem[2] = a1;
+  magic_mem[3] = a2;
+  magic_mem[4] = a3;
+
+  asm volatile ("cflush; fence");
+
+  mtpcr(PCR_TOHOST, magic_mem);
+  while(mfpcr(PCR_FROMHOST) == 0);
+
+  long ret = magic_mem[0];
+
+  spin_unlock_irqsave(&lock);
+  return ret;
+}
+
+void
+fesvr_die()
+{
+       fesvr_syscall(FESVR_SYS_exit, 0, 0, 0, 0);
+}
+
+void
+cons_init(void)
+{
+}
+
+// `High'-level console I/O.  Used by readline and cprintf.
+
+void
+cputbuf(const char* buf, int len)
+{
+       fesvr_syscall(FESVR_SYS_write, 1, PADDR((uintptr_t)buf), len, 0);
+}
+
+// Low-level console I/O
+
+void
+cons_putc(int c)
+{
+       if(c == '\b' || c == 0x7F)
+       {
+               char buf[3] = {'\b', ' ', '\b'};
+               cputbuf(buf,3);
+       }
+       else
+       {
+               char ch = c;
+               cputbuf(&ch,1);
+       }
+}
+
+void
+cputchar(int c)
+{
+       char ch = c;
+       cputbuf(&ch,1);
+}
+
+int
+cons_getc()
+{
+       char ch;
+       uintptr_t paddr = PADDR((uintptr_t)&ch);
+       long ret = fesvr_syscall(FESVR_SYS_read, 0, paddr, 1, 0);
+       if(ch == 0x7F)
+               ch = '\b';
+       return ret <= 0 ? 0 : ch;
+}
+
+int
+getchar(void)
+{
+       int c;
+
+       while ((c = cons_getc()) == 0)
+               /* do nothing */;
+       return c;
+}
+
+int
+iscons(int fdnum)
+{
+       // used by readline
+       return 1;
+}
diff --git a/kern/arch/riscv/console.h b/kern/arch/riscv/console.h
new file mode 100644 (file)
index 0000000..3d54d5b
--- /dev/null
@@ -0,0 +1,41 @@
+/* See COPYRIGHT for copyright information. */
+
+#ifndef _CONSOLE_H_
+#define _CONSOLE_H_
+#ifndef ROS_KERNEL
+# error "This is a ROS kernel header; user programs should not #include it"
+#endif
+
+#include <ros/common.h>
+
+#define CRT_ROWS       25
+#define CRT_COLS       80
+#define CRT_SIZE       (CRT_ROWS * CRT_COLS)
+
+void cons_init(void);
+void cons_putc(int c);
+int cons_getc(void);
+
+#define        FESVR_SYS_exit  1
+#define        FESVR_SYS_getpid        20
+#define        FESVR_SYS_kill  37
+#define        FESVR_SYS_read  3
+#define        FESVR_SYS_write 4
+#define        FESVR_SYS_open  5
+#define        FESVR_SYS_close 6
+#define        FESVR_SYS_lseek 19
+#define        FESVR_SYS_brk           17
+#define        FESVR_SYS_link  9
+#define        FESVR_SYS_unlink        10
+#define        FESVR_SYS_chdir 12
+#define FESVR_SYS_stat 18
+#define FESVR_SYS_fstat        28
+#define        FESVR_SYS_lstat 84
+#define        FESVR_SYS_pread 180
+#define        FESVR_SYS_pwrite 181
+#define FESVR_SYS_getmainvars 201
+
+long fesvr_syscall(long n, long a0, long a1, long a2, long a3);
+void fesvr_die();
+
+#endif /* _CONSOLE_H_ */
diff --git a/kern/arch/riscv/cpuinfo.c b/kern/arch/riscv/cpuinfo.c
new file mode 100644 (file)
index 0000000..cfe56b9
--- /dev/null
@@ -0,0 +1,51 @@
+#include <arch/arch.h>
+#include <arch/mmu.h>
+#include <stdio.h>
+#include <assert.h>
+#include <smp.h>
+#include <umem.h>
+#include <pmap.h>
+
+static void
+static_asserts_can_go_here()
+{
+       static_assert(SIZEOF_TRAPFRAME_T == sizeof(trapframe_t));
+}
+
+void
+print_cpuinfo(void)
+{
+       cprintf("CPU Info: Not Just Any Other RISC-V Core (TM)\n");
+}
+
+void show_mapping(uintptr_t start, size_t size)
+{
+  pde_t* pt = (pde_t*)KADDR(rcr3());
+       pte_t* pte;
+       uintptr_t i;
+       page_t* page;
+
+       cprintf("      Virtual            Physical      SR SW SX UR UW UX D R\n");
+       cprintf("------------------------------------------------------------\n");
+       for(i = 0; i < size; i += PGSIZE, start += PGSIZE)
+       {
+               page = page_lookup(pt, (void*)start, &pte);
+               cprintf("%016p  ",start);
+               if(page)
+               {
+                       cprintf("%016p  %1d  %1d  %1d  %1d  %1d  %1d %1d %1d\n",
+                               page2pa(page),
+                               !!(*pte & PTE_SR), !!(*pte & PTE_SW), !!(*pte & PTE_SX),
+                               !!(*pte & PTE_UR), !!(*pte & PTE_UW), !!(*pte & PTE_UX),
+                               !!(*pte & PTE_D), !!(*pte & PTE_R));
+               }
+               else
+                       cprintf("%016p\n",0);
+       }
+}
+
+void
+backtrace(void)
+{
+  panic("No backtrace yet!");
+}
diff --git a/kern/arch/riscv/endian.h b/kern/arch/riscv/endian.h
new file mode 100644 (file)
index 0000000..e4ed150
--- /dev/null
@@ -0,0 +1,30 @@
+/* Copyright (c) 2010 The Regents of the University of California
+ * Barret Rhoden <brho@cs.berkeley.edu>
+ * See LICENSE for details.
+ *
+ * Endian #def and byte_swapping functions */
+
+#ifndef ROS_KERN_ARCH_ENDIAN_H
+#define ROS_KERN_ARCH_ENDIAN_H
+
+#include <ros/common.h>
+#include <arch/types.h>
+
+static inline uint16_t byte_swap16(uint16_t x)
+{
+       return (uint16_t)(x << 8 | x >> 8);
+}
+
+static inline uint32_t byte_swap32(uint32_t x)
+{
+       return (uint32_t)(((uint32_t)byte_swap16(x & 0xffff) << 16) |
+                         (byte_swap16(x >> 16)));
+}
+
+static inline uint64_t byte_swap64(uint64_t x)
+{
+       return (uint64_t)(((uint64_t)byte_swap32(x & 0xffffffff) << 32) |
+                         (byte_swap32(x >> 32)));
+}
+
+#endif /* ROS_KERN_ARCH_ENDIAN_H */
diff --git a/kern/arch/riscv/entry.S b/kern/arch/riscv/entry.S
new file mode 100644 (file)
index 0000000..844cded
--- /dev/null
@@ -0,0 +1,167 @@
+#include <arch/pcr.h>
+#include <arch/trap.h>
+
+#ifdef __riscv64
+# define STORE    sd
+# define LOAD     ld
+# define REGBYTES 8
+#else
+# define STORE    sw
+# define LOAD     lw
+# define REGBYTES 4
+#endif
+
+  .text
+  .ent    save_kernel_tf_asm
+  .global save_kernel_tf_asm
+save_kernel_tf_asm:
+  STORE  $s0,20*REGBYTES($a0)
+  STORE  $s1,21*REGBYTES($a0)
+  STORE  $s2,22*REGBYTES($a0)
+  STORE  $s3,23*REGBYTES($a0)
+  STORE  $s4,24*REGBYTES($a0)
+  STORE  $s5,25*REGBYTES($a0)
+  STORE  $s6,26*REGBYTES($a0)
+  STORE  $s7,27*REGBYTES($a0)
+  STORE  $s8,28*REGBYTES($a0)
+  STORE  $s9,29*REGBYTES($a0)
+  STORE  $sp,30*REGBYTES($a0)
+
+  mfpcr  $t0,ASM_CR(PCR_SR)
+  STORE  $t0,32*REGBYTES($a0)
+
+  # set EPC to this function's return address
+  STORE  $ra,33*REGBYTES($x2)
+
+  .end  save_kernel_tf_asm
+
+  .ent  save_tf
+save_tf:  # write the trap frame onto the stack
+
+  # save gprs
+  STORE  $x3,3*REGBYTES($x2)
+  STORE  $x4,4*REGBYTES($x2)
+  STORE  $x5,5*REGBYTES($x2)
+  STORE  $x6,6*REGBYTES($x2)
+  STORE  $x7,7*REGBYTES($x2)
+  STORE  $x8,8*REGBYTES($x2)
+  STORE  $x9,9*REGBYTES($x2)
+  STORE  $x10,10*REGBYTES($x2)
+  STORE  $x11,11*REGBYTES($x2)
+  STORE  $x12,12*REGBYTES($x2)
+  STORE  $x13,13*REGBYTES($x2)
+  STORE  $x14,14*REGBYTES($x2)
+  STORE  $x15,15*REGBYTES($x2)
+  STORE  $x16,16*REGBYTES($x2)
+  STORE  $x17,17*REGBYTES($x2)
+  STORE  $x18,18*REGBYTES($x2)
+  STORE  $x19,19*REGBYTES($x2)
+  STORE  $x20,20*REGBYTES($x2)
+  STORE  $x21,21*REGBYTES($x2)
+  STORE  $x22,22*REGBYTES($x2)
+  STORE  $x23,23*REGBYTES($x2)
+  STORE  $x24,24*REGBYTES($x2)
+  STORE  $x25,25*REGBYTES($x2)
+  STORE  $x26,26*REGBYTES($x2)
+  STORE  $x27,27*REGBYTES($x2)
+  STORE  $x28,28*REGBYTES($x2)
+  STORE  $x29,29*REGBYTES($x2)
+  STORE  $x30,30*REGBYTES($x2)
+  STORE  $x31,31*REGBYTES($x2)
+
+  mfpcr  $x3,ASM_CR(PCR_K0)
+  STORE  $x3,1*REGBYTES($x2)          # $x1 is in $PCR_K0
+  mfpcr  $x3,ASM_CR(PCR_K1)
+  STORE  $x3,2*REGBYTES($x2)          # $x2 is in $PCR_K1
+
+  # get sr, epc, badvaddr, cause
+  mfpcr  $x3,ASM_CR(PCR_SR)          # sr
+  STORE  $x3,32*REGBYTES($x2)
+  mfpcr  $x4,ASM_CR(PCR_EPC)          # epc
+  STORE  $x4,33*REGBYTES($x2)
+  mfpcr  $x3,ASM_CR(PCR_BADVADDR)      # badvaddr
+  STORE  $x3,34*REGBYTES($x2)
+  mfpcr  $x3,ASM_CR(PCR_CAUSE)        # cause
+  STORE  $x3,35*REGBYTES($x2)
+
+  # get faulting insn, if it wasn't a fetch-related trap
+  li    $x5, CAUSE_MISALIGNED_FETCH
+  li    $x6, CAUSE_FAULT_FETCH
+  beq   $x3, $x5, 1f
+  beq   $x3, $x6, 1f
+  lh    $x3,0($x4)
+  lh    $x4,2($x4)
+  sh    $x3,  36*REGBYTES($x2)
+  sh    $x4,2+36*REGBYTES($x2)
+1:
+  ret
+  .end  save_tf
+
+  .globl  pop_tf
+  .ent  pop_tf
+pop_tf:  # write the trap frame onto the stack
+  # restore gprs
+  LOAD  $t0,32*REGBYTES($a0)  # restore sr (should disable interrupts)
+  mtpcr  $t0,ASM_CR(PCR_SR)
+
+  LOAD  $x1,1*REGBYTES($a0)
+  mtpcr  $x1,ASM_CR(PCR_K0)
+  LOAD  $x1,2*REGBYTES($a0)
+  mtpcr $x1,ASM_CR(PCR_K1)
+  move  $x1,$a0
+  LOAD  $x3,3*REGBYTES($x1)
+  LOAD  $x4,4*REGBYTES($x1)
+  LOAD  $x5,5*REGBYTES($x1)
+  LOAD  $x6,6*REGBYTES($x1)
+  LOAD  $x7,7*REGBYTES($x1)
+  LOAD  $x8,8*REGBYTES($x1)
+  LOAD  $x9,9*REGBYTES($x1)
+  LOAD  $x10,10*REGBYTES($x1)
+  LOAD  $x11,11*REGBYTES($x1)
+  LOAD  $x12,12*REGBYTES($x1)
+  LOAD  $x13,13*REGBYTES($x1)
+  LOAD  $x14,14*REGBYTES($x1)
+  LOAD  $x15,15*REGBYTES($x1)
+  LOAD  $x16,16*REGBYTES($x1)
+  LOAD  $x17,17*REGBYTES($x1)
+  LOAD  $x18,18*REGBYTES($x1)
+  LOAD  $x19,19*REGBYTES($x1)
+  LOAD  $x20,20*REGBYTES($x1)
+  LOAD  $x21,21*REGBYTES($x1)
+  LOAD  $x22,22*REGBYTES($x1)
+  LOAD  $x23,23*REGBYTES($x1)
+  LOAD  $x24,24*REGBYTES($x1)
+  LOAD  $x25,25*REGBYTES($x1)
+  LOAD  $x26,26*REGBYTES($x1)
+  LOAD  $x27,27*REGBYTES($x1)
+  LOAD  $x28,28*REGBYTES($x1)
+  LOAD  $x29,29*REGBYTES($x1)
+  LOAD  $x30,30*REGBYTES($x1)
+  LOAD  $x31,31*REGBYTES($x1)
+
+  # gtfo!
+  LOAD  $x2,33*REGBYTES($x1)
+  mtpcr $x2,ASM_CR(PCR_EPC)
+  mfpcr $x1,ASM_CR(PCR_K0)
+  mfpcr $x2,ASM_CR(PCR_K1)
+  eret
+  .end  pop_tf
+
+  .global  trap_entry
+  .ent  trap_entry
+trap_entry:
+  mtpcr $ra,ASM_CR(PCR_K0)
+  mtpcr $x2,ASM_CR(PCR_K1)
+
+  # when coming from kernel, continue below its stack
+  mfpcr $ra,ASM_CR(PCR_SR)
+  and   $ra,$ra,SR_PS
+  add   $x2, $sp, -SIZEOF_TRAPFRAME_T
+  bnez  $ra, 1f
+  la    $x2,bootstacktop-SIZEOF_TRAPFRAME_T
+
+1:jal   save_tf
+  move  $sp,$x2
+  move  $a0,$x2
+  jal    handle_trap
+  .end  trap_entry
diff --git a/kern/arch/riscv/i686 b/kern/arch/riscv/i686
new file mode 120000 (symlink)
index 0000000..7f061e5
--- /dev/null
@@ -0,0 +1 @@
+../arch/i686
\ No newline at end of file
diff --git a/kern/arch/riscv/init.h b/kern/arch/riscv/init.h
new file mode 100644 (file)
index 0000000..f412030
--- /dev/null
@@ -0,0 +1,11 @@
+/* See COPYRIGHT for copyright information. */
+
+#ifndef ROS_ARCH_INIT_H
+#define ROS_ARCH_INIT_H
+
+extern size_t argc;
+extern char** argv;
+
+void arch_init();
+
+#endif // !ROS_ARCH_INIT_H
diff --git a/kern/arch/riscv/kernel.ld b/kern/arch/riscv/kernel.ld
new file mode 100644 (file)
index 0000000..dce3cf4
--- /dev/null
@@ -0,0 +1,61 @@
+/* Simple linker script for the ROS kernel.
+   See the GNU ld 'info' manual ("info ld") to learn the syntax. */
+
+OUTPUT_ARCH("mips:riscv")
+ENTRY(_start)
+
+SECTIONS
+{
+       /* Link the kernel for 0xC01000C0, but load it at 0x001000C0) */
+
+       .text 0xFFFFFF8000000000 : AT(0) {
+               *(.text .stub .text.* .gnu.linkonce.t.*)
+       }
+
+       PROVIDE(etext = .);     /* Define the 'etext' symbol to this value */
+
+       .rodata : {
+               *(.rodata .rodata.* .gnu.linkonce.r.*)
+       }
+
+       /* Include debugging information in kernel memory */
+       .stab : {
+               PROVIDE(stab = .);
+               PROVIDE(__STAB_BEGIN__ = .);
+               *(.stab);
+               PROVIDE(estab = .);
+               PROVIDE(__STAB_END__ = .);
+               BYTE(0)         /* Force the linker to allocate space
+                                  for this section */
+       }
+
+       .stabstr : {
+               PROVIDE(stabstr = .);
+               PROVIDE(__STABSTR_BEGIN__ = .);
+               *(.stabstr);
+               PROVIDE(estabstr = .);
+               PROVIDE(__STABSTR_END__ = .);
+               BYTE(0)         /* Force the linker to allocate space
+                                  for this section */
+       }
+
+       /* Adjust the address for the data segment to the next page */
+       . = ALIGN(0x1000);
+
+       /* The data segment */
+       .data : {
+               *(.data)
+       }
+
+       PROVIDE(edata = .);
+
+       .bss : {
+               *(.bss)
+       }
+
+       PROVIDE(end = .);
+
+       /DISCARD/ : {
+               *(.eh_frame .note.GNU-stack)
+       }
+}
diff --git a/kern/arch/riscv/mmu.h b/kern/arch/riscv/mmu.h
new file mode 100644 (file)
index 0000000..bc58e22
--- /dev/null
@@ -0,0 +1,118 @@
+#ifndef ROS_ARCH_MMU_H
+#define ROS_ARCH_MMU_H
+
+/*
+ * This file contains definitions for the RISC-V MMU.
+ */
+
+#include <ros/arch/mmu.h>
+
+/*
+ *
+ *     Part 1.  Paging data structures and constants.
+ *
+ */
+
+// RV64 virtual addresses are 48 bits, sign-extended out to 64 bits,
+// creating a hole between 0x0000 7FFF FFFF FFFF and 0xFFFF 8000 0000 0000.
+// Bits 11-0 are the page offset; L1/L2/L3/L4 page table indices are given
+// by bits 47-39, 38-30, 29-21, and 20-12, respectively.
+//
+// In RV32, virtual addresses are 32 bits; bits 11-0 are the page offset;
+// and L1/L2 page table indices are given by bits 31-22 and 21-12,
+// respectively.
+//
+// In both cases, the last-level page size is 4KB, as is the page table size.
+
+// page number field of address
+#define LA2PPN(la)     (((uintptr_t) (la)) >> PGSHIFT)
+
+// page number field of PPN
+#define PTE2PPN(pte)   (((uintptr_t) (pte)) >> PTE_PPN_SHIFT)
+
+// index into L1 PT
+#define L1X(la)                ((((uintptr_t) (la)) >> L1PGSHIFT) & (NPTENTRIES-1))
+
+// index into L2 PT
+#define L2X(la)                ((((uintptr_t) (la)) >> L2PGSHIFT) & (NPTENTRIES-1))
+
+#ifdef __riscv64
+// index into L3 PT
+#define L3X(la)                ((((uintptr_t) (la)) >> L3PGSHIFT) & (NPTENTRIES-1))
+
+// index into L4 PT
+#define L4X(la)                ((((uintptr_t) (la)) >> L4PGSHIFT) & (NPTENTRIES-1))
+
+// construct linear address from indexes and offset
+#define PGADDR(l1, l2, l3, l4, o) ((void*) ((l1) << L1PGSHIFT | (l2) << L2PGSHIFT | (l3) << L3PGSHIFT | (l4) << L4PGSHIFT | (o)))
+#else
+// construct linear address from indexes and offset
+#define PGADDR(l1, l2, o) ((void*) ((l1) << L1PGSHIFT | (l2) << L2PGSHIFT | (o)))
+#endif
+
+// offset in page
+#define PGOFF(la)      (((uintptr_t) (la)) & (PGSIZE-1))
+
+// construct PTE from PPN and flags
+#define PTE(ppn, flags) ((ppn) << PTE_PPN_SHIFT | (flags))
+
+// construct PTD from physical address
+#define PTD(pa) ((uintptr_t)(pa) | PTE_T)
+
+// Page directory and page table constants
+#define NPTENTRIES (PTSIZE/sizeof(pte_t))
+
+// Page table/directory entry flags.
+#define PTE_T    0x001 // Entry is a page Table descriptor
+#define PTE_E    0x002 // Entry is a page table Entry
+#define PTE_R    0x004 // Referenced
+#define PTE_D    0x008 // Dirty
+#define PTE_UX   0x010 // User eXecute permission
+#define PTE_UW   0x020 // User Read permission
+#define PTE_UR   0x040 // User Write permission
+#define PTE_SX   0x080 // Supervisor eXecute permission
+#define PTE_SW   0x100 // Supervisor Read permission
+#define PTE_SR   0x200 // Supervisor Write permission
+#define PTE_PERM (PTE_SR | PTE_SW | PTE_SX | PTE_UR | PTE_UW | PTE_UX)
+#define PTE_PPN_SHIFT 12
+
+// commly used access modes
+#define PTE_KERN_RW    (PTE_SR | PTE_SW | PTE_SX)
+#define PTE_KERN_RO    (PTE_SR | PTE_SX)
+#define PTE_USER_RW    (PTE_SR | PTE_SW | PTE_UR | PTE_UW | PTE_UX)
+#define PTE_USER_RO    (PTE_SR | PTE_UR | PTE_UX)
+
+// x86 equivalencies
+#define PTE_P      PTE_E
+#define NPDENTRIES NPTENTRIES
+#define PDX(la)    L1X(la)                     // for env stuff
+
+// address in page table entry
+#define PTE_ADDR(pte)  ((physaddr_t) (pte) & (PGSIZE-1))
+
+// address in page table descriptor
+#define PTD_ADDR(ptd)  PTE_ADDR(ptd)
+
+// MMU Control Register flags
+#define MMU_CR_E       0x00000001      // Protection Enable
+#define MMU_CR_NF      0x00000002      // No Fault mode
+#define MMU_CR_PSO     0x00000080      // Partial Store Order (TSO disabled)
+
+// MMU Fault Status Register flags
+#define MMU_FSR_USER   0x00000020      // Fault caused by user-space access
+#define MMU_FSR_EX     0x00000040      // Fault occured in instruction-space
+#define MMU_FSR_WR     0x00000080      // Fault caused by a store
+
+// MMU Register Addresses
+#define MMU_REG_CTRL   0x00000000      // MMU Control Register
+#define MMU_REG_CTXTBL 0x00000100      // MMU Context Table Pointer Register
+#define MMU_REG_CTX    0x00000200      // MMU Context Register
+#define MMU_REG_FSR    0x00000300      // MMU Fault Status Register
+#define MMU_REG_FAR    0x00000400      // MMU Fault Address Register
+
+// we must guarantee that for any PTE, exactly one of the following is true
+#define PAGE_PRESENT(pte) ((pte) & PTE_P)
+#define PAGE_UNMAPPED(pte) ((pte) == 0)
+#define PAGE_PAGED_OUT(pte) (!PAGE_PRESENT(pte) && !PAGE_UNMAPPED(pte))
+
+#endif /* !ROS_INC_MMU_H */
diff --git a/kern/arch/riscv/pcr.h b/kern/arch/riscv/pcr.h
new file mode 100644 (file)
index 0000000..a4defef
--- /dev/null
@@ -0,0 +1,82 @@
+#ifndef _RISCV_COP0_H
+#define _RISCV_COP0_H
+
+#define SR_ET    0x0000000000000001
+#define SR_EF    0x0000000000000002
+#define SR_EV    0x0000000000000004
+#define SR_EC    0x0000000000000008
+#define SR_PS    0x0000000000000010
+#define SR_S     0x0000000000000020
+#define SR_UX    0x0000000000000040
+#define SR_SX    0x0000000000000080
+#define SR_IM    0x000000000000FF00
+#define SR_IM7   0x0000000000008000
+
+#define PCR_SR       0
+#define PCR_EPC      1
+#define PCR_BADVADDR 2
+#define PCR_EVEC     3
+#define PCR_COUNT    4
+#define PCR_COMPARE  5
+#define PCR_CAUSE    6
+#define PCR_MEMSIZE  8
+#define PCR_PTBR     9
+#define PCR_TOHOST   16
+#define PCR_FROMHOST 17
+#define PCR_CONSOLE  18
+#define PCR_K0       24
+#define PCR_K1       25
+
+#define CR_FSR       0
+#define CR_TID       29
+
+#define MEMSIZE_SHIFT 12
+
+#define TIMER_PERIOD 0x1000
+#define TIMER_IRQ 7
+
+#define CAUSE_EXCCODE 0x000000FF
+#define CAUSE_IP      0x0000FF00
+#define CAUSE_EXCCODE_SHIFT 0
+#define CAUSE_IP_SHIFT      8
+
+#define CAUSE_MISALIGNED_FETCH 0
+#define CAUSE_FAULT_FETCH 1
+#define CAUSE_ILLEGAL_INSTRUCTION 2
+#define CAUSE_PRIVILEGED_INSTRUCTION 3
+#define CAUSE_FP_DISABLED 4
+#define CAUSE_INTERRUPT 5
+#define CAUSE_SYSCALL 6
+#define CAUSE_BREAKPOINT 7
+#define CAUSE_MISALIGNED_LOAD 8
+#define CAUSE_MISALIGNED_STORE 9
+#define CAUSE_FAULT_LOAD 10
+#define CAUSE_FAULT_STORE 11
+#define CAUSE_VECTOR_DISABLED 12
+#define NUM_CAUSES 13
+
+#define ASM_CR(r)   _ASM_CR(r)
+#define _ASM_CR(r)  $cr##r
+
+#ifndef __ASSEMBLER__
+
+#define mtpcr(reg,val) ({ long __tmp = (long)(val); \
+          asm volatile ("mtpcr %0,$cr%1"::"r"(__tmp),"i"(reg)); })
+
+#define mfpcr(reg) ({ long __tmp; \
+          asm volatile ("mfpcr %0,$cr%1" : "=r"(__tmp) : "i"(reg)); \
+          __tmp; })
+
+#define mtcr(reg,val) ({ long __tmp = (long)(val); \
+          asm volatile ("mtcr %0,$cr%1"::"r"(__tmp),"i"(reg)); })
+
+#define mfcr(reg) ({ long __tmp; \
+          asm volatile ("mfcr %0,$cr%1" : "=r"(__tmp) : "i"(reg)); \
+          __tmp; })
+
+#define irq_disable() asm volatile("di")
+#define irq_enable() asm volatile("ei")
+
+#endif
+
+#endif
diff --git a/kern/arch/riscv/riscv.h b/kern/arch/riscv/riscv.h
new file mode 100644 (file)
index 0000000..7f8e7d4
--- /dev/null
@@ -0,0 +1,26 @@
+#ifndef ROS_INC_RISCV_H
+#define ROS_INC_RISCV_H
+
+#include <arch/pcr.h>
+
+static __inline void
+lcr3(uintptr_t val)
+{
+  mtpcr(PCR_PTBR, val);
+}
+
+static __inline uintptr_t
+rcr3(void)
+{
+  return mfpcr(PCR_PTBR);
+}
+
+static __inline uintptr_t
+read_pc(void)
+{
+  uintptr_t pc;
+       asm ("rdnpc %0" : "=r"(pc));
+  return pc;
+}
+
+#endif
diff --git a/kern/arch/riscv/ros/arch.h b/kern/arch/riscv/ros/arch.h
new file mode 100644 (file)
index 0000000..6c32ea5
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _ROS_ARCH_ARCH_H
+#define _ROS_ARCH_ARCH_H
+
+#define MAX_NUM_CPUS                           64
+
+#endif
diff --git a/kern/arch/riscv/ros/membar.h b/kern/arch/riscv/ros/membar.h
new file mode 100644 (file)
index 0000000..aa5faf8
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _ARCH_MEMBAR_H
+#define _ARCH_MEMBAR_H
+
+#define mb() __sync_synchronize()
+#define rmb() mb()
+#define wmb() mb()
+/* Compiler memory barrier */
+#define cmb() ({ asm volatile("" ::: "memory"); })
+/* Force a wmb, used in cases where an IPI could beat a write, even though
+ * write-orderings are respected. */
+#define wmb_f() mb()
+
+#endif
diff --git a/kern/arch/riscv/ros/mmu.h b/kern/arch/riscv/ros/mmu.h
new file mode 100644 (file)
index 0000000..7360c57
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef _ROS_ARCH_MMU_H
+#define _ROS_ARCH_MMU_H
+
+// All physical memory mapped at this address
+#ifdef __riscv64
+# define KERNBASE  0xFFFFFF8000000000
+# define NLEVELS                    4
+# define L1PGSHIFT         (12+9+9+9)
+# define L1PGSIZE    (1 << L1PGSHIFT)
+# define L2PGSHIFT           (12+9+9)
+# define L2PGSIZE    (1 << L2PGSHIFT)
+# define L3PGSHIFT             (12+9)
+# define L3PGSIZE    (1 << L3PGSHIFT)
+# define L4PGSHIFT               (12)
+# define L4PGSIZE    (1 << L4PGSHIFT)
+# define PGSHIFT            L4PGSHIFT
+# define KPGSHIFT           L3PGSHIFT
+#else
+# define KERNBASE          0x80000000
+# define NLEVELS                    2
+# define L1PGSHIFT            (12+10)
+# define L1PGSIZE    (1 << L1PGSHIFT)
+# define L2PGSHIFT                 12
+# define L2PGSIZE    (1 << L2PGSHIFT)
+# define PGSHIFT            L2PGSHIFT
+# define KPGSHIFT           L1PGSHIFT
+#endif
+
+#define PGSIZE (1 << PGSHIFT)
+#define KPGSIZE (1 << KPGSHIFT)
+#define PTSIZE PGSIZE
+
+#ifndef __ASSEMBLER__
+typedef unsigned long pte_t;
+typedef unsigned long pde_t;
+#endif
+
+#endif
diff --git a/kern/arch/riscv/ros/syscall.h b/kern/arch/riscv/ros/syscall.h
new file mode 100644 (file)
index 0000000..797c478
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _ROS_ARCH_SYSCALL_H
+#define _ROS_ARCH_SYSCALL_H
+
+#ifndef ROS_KERNEL
+
+static inline long __attribute__((always_inline))
+__ros_arch_syscall(long _a0, long _a1)
+{
+       register long a0 asm("a0") = _a0;
+       register long a1 asm("a1") = _a1;
+
+       asm volatile("syscall" : "=r"(a0) : "0"(a0),"r"(a1) : "memory");
+
+       return a0;
+}
+
+#endif /* ifndef ROS_KERNEL */
+
+#endif
diff --git a/kern/arch/riscv/ros/trapframe.h b/kern/arch/riscv/ros/trapframe.h
new file mode 100644 (file)
index 0000000..b41b7c3
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef ROS_INCLUDE_ARCH_TRAPFRAME_H
+#define ROS_INCLUDE_ARCH_TRAPFRAME_H
+
+#include <ros/common.h>
+#include <stdint.h>
+
+typedef struct trapframe
+{
+  uintptr_t gpr[32];
+  uintptr_t sr;
+  uintptr_t epc;
+  uintptr_t badvaddr;
+  uintptr_t cause;
+  uintptr_t insn;
+} trapframe_t;
+
+/* TODO: consider using a user-space specific trapframe, since they don't need
+ * all of this information.  Will do that eventually, but til then: */
+#define user_trapframe trapframe
+
+typedef struct ancillary_state
+{
+       uint64_t fpr[32];
+       uint32_t fsr;
+} ancillary_state_t;
+
+#endif /* !ROS_INCLUDE_ARCH_TRAPFRAME_H */
diff --git a/kern/arch/riscv/smp.h b/kern/arch/riscv/smp.h
new file mode 100644 (file)
index 0000000..62da4eb
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2009 The Regents of the University of California
+ * Barret Rhoden <brho@cs.berkeley.edu>
+ * See LICENSE for details.
+ */
+
+#ifndef ROS_ARCH_SMP_H
+#define ROS_ARCH_SMP_H
+
+#include <atomic.h>
+
+// be careful changing this, esp if you go over 16
+#define NUM_HANDLER_WRAPPERS           5
+
+struct HandlerWrapper {
+       checklist_t* cpu_list;
+       uint8_t vector;
+};
+
+typedef struct HandlerWrapper LCKD(&cpu_list->lock) handler_wrapper_t;
+
+#endif /* !ROS_ARCH_SMP_H */
diff --git a/kern/arch/riscv/timer.c b/kern/arch/riscv/timer.c
new file mode 100644 (file)
index 0000000..0e26ea5
--- /dev/null
@@ -0,0 +1,54 @@
+#include <arch/timer.h>
+#include <ros/common.h>
+#include <arch/trap.h>
+#include <arch/arch.h>
+#include <stdio.h>
+#include <assert.h>
+
+system_timing_t system_timing = {0};
+
+void
+timer_init(void)
+{      
+  mtpcr(PCR_COUNT, 0);
+  mtpcr(PCR_COMPARE, 0);
+       mtpcr(PCR_SR, mfpcr(PCR_SR) | SR_IM7);
+
+       system_timing.tsc_freq = TSC_HZ;
+       cprintf("TSC Frequency: %llu\n", system_timing.tsc_freq);
+}
+
+/* Warning: one-shot timers are unsupported; all timers are periodic.
+ * Perhaps this support could be added with a per_cpu boolean, set
+ * by set_core_timer, and interpreted by the interrupt handler. */
+void
+set_core_timer(uint32_t usec, bool periodic)
+{
+       uint32_t clocks =  (uint64_t)usec*TSC_HZ/1000000;
+
+  uint8_t irq_state = 0;
+       disable_irqsave(&irq_state);
+
+  mtpcr(PCR_COMPARE, mfpcr(PCR_COUNT) + clocks);
+
+       enable_irqsave(&irq_state);
+}
+
+void
+udelay(uint64_t usec)
+{
+       if (system_timing.tsc_freq != 0)
+       {
+               uint64_t start, end, now;
+        
+               start = read_tsc();
+               end = start + (system_timing.tsc_freq * usec) / 1000000;
+
+               do
+               {
+                       cpu_relax();
+                       now = read_tsc();
+               } while (now < end || (now > start && end < start));
+       }
+       else panic("udelay() was called before timer_init(), moron!");
+}
diff --git a/kern/arch/riscv/timer.h b/kern/arch/riscv/timer.h
new file mode 100644 (file)
index 0000000..4ba459e
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef ROS_ARCH_TIMER_H
+#define ROS_ARCH_TIMER_H
+
+#define TSC_HZ 1000000000 // really, this is the core clock frequency
+
+#include <ros/common.h>
+
+typedef struct system_timing {
+       uint64_t tsc_freq;
+} system_timing_t;
+
+extern system_timing_t system_timing;
+
+void timer_init(void);
+void set_timer(uint32_t usec);
+
+#endif /* !ROS_ARCH_TIMER_H */
diff --git a/kern/arch/riscv/trap.h b/kern/arch/riscv/trap.h
new file mode 100644 (file)
index 0000000..4dca119
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef ROS_ARCH_TRAP_H
+#define ROS_ARCH_TRAP_H
+
+#ifdef __riscv64
+# define SIZEOF_TRAPFRAME_T (37*8)
+#else
+# define SIZEOF_TRAPFRAME_T (37*4)
+#endif
+
+#ifndef __ASSEMBLER__
+
+#include <ros/arch/trapframe.h>
+#include <arch/arch.h>
+
+static inline bool in_kernel(struct trapframe *tf)
+{
+       return tf->sr & SR_PS;
+}
+
+static inline void __attribute__((always_inline))
+set_stack_pointer(uintptr_t sp)
+{
+       asm volatile("move $sp,%0" : : "r"(sp) : "memory");
+}
+
+/* Save's the current kernel context into tf, setting the PC to the end of this
+ * function.  Note the kernel doesn't need to save a lot.
+ * Implemented with extern function to cause compiler to clobber most regs. */
+static inline void save_kernel_tf(struct trapframe *tf)
+{
+  extern void save_kernel_tf_asm(struct trapframe*);
+       save_kernel_tf_asm(tf);
+}
+
+#endif
+
+#endif
diff --git a/kern/arch/riscv/types.h b/kern/arch/riscv/types.h
new file mode 100644 (file)
index 0000000..305210d
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef ROS_INC_TYPES_H
+#define ROS_INC_TYPES_H
+
+#include <stddef.h>
+
+#define LITTLE_ENDIAN
+
+#ifdef __riscv64
+# define NUM_ADDR_BITS 64
+#else
+# define NUM_ADDR_BITS 32
+#endif
+
+#define MAX_VADDR     ((uint64_t)(~0) >> (64-NUM_ADDR_BITS))
+
+//Constants for byte sizes
+#define ONE_KILOBYTE  (1L<<10)
+#define ONE_MEGABYTE  (1L<<20)
+#define ONE_GIGABYTE  (1L<<30)
+
+// Explicitly-sized versions of integer types
+typedef __signed char int8_t;
+typedef unsigned char uint8_t;
+typedef short int16_t;
+typedef unsigned short uint16_t;
+typedef int int32_t;
+typedef unsigned int uint32_t;
+typedef long long int64_t;
+typedef unsigned long long uint64_t;
+
+typedef long ssize_t;
+typedef long pid_t;
+typedef long uid_t;
+typedef long gid_t;
+
+typedef unsigned long uintptr_t;
+
+#endif /* !ROS_INC_TYPES_H */