Build system to map MMIO into our virtual address space.
authorPaul Pearce <pearce@eecs.berkeley.edu>
Thu, 22 Apr 2010 21:32:59 +0000 (14:32 -0700)
committerKevin Klues <klueska@cs.berkeley.edu>
Thu, 3 Nov 2011 00:35:58 +0000 (17:35 -0700)
cherry-pick paul commit #1
Added a new function, mmio_alloc, which pulls pages from the space
above LAPIC_BASE + PGSIZE, and maps them to physical pages that
are used for MMIO by devices.

This removes all the hacky MMIO code from the driver and arch/pmap.c

kern/arch/i686/e1000.c
kern/arch/i686/pmap.c
kern/arch/sparc/pmap.c
kern/include/pmap.h

index 34eab27..0a01850 100644 (file)
@@ -195,15 +195,12 @@ int e1000_scan_pci() {
                                        e1000_addr_size = result;
                     e1000_debug("-->MMIO Size %x\n", e1000_addr_size);
                                        outl(PCI_CONFIG_DATA, e1000_mmio_base_addr);
+                                       e1000_mmio_base_addr = (uint32_t) mmio_alloc(e1000_mmio_base_addr,
+                                                                                                                               e1000_addr_size);
+                                       if (e1000_mmio_base_addr == 0x00) {
+                                               panic("Could not map in E1000 MMIO space\n");
+                                       }
                
-                                       #ifdef __CONFIG_E1000_MMIO_HACK__
-                                       /* TODO: WARNING - EXTREMELY GHETTO */
-                                       // Map the page in.
-                                       e1000_debug("HACK FOR BROKEN MMIO\n");
-                                       e1000_mmio_base_addr = E1000_MMIO_ADDR;
-                                       outl(PCI_CONFIG_DATA, e1000_mmio_base_addr);
-                                       e1000_mmio_base_addr = 0xfee00000 + 0x1000;
-                                       #endif
                                }
                        }                                               
                }
@@ -592,7 +589,7 @@ void e1000_interrupt_handler(trapframe_t *tf, void* data) {
                //printk("Interrupt status: %x\n", interrupt_status);
 
                if ((interrupt_status & E1000_ICR_INT_ASSERTED) && (interrupt_status & E1000_ICR_RXT0)) {
-                       e1000_debug("---->Packet Received\n");
+                       e1000_interrupt_debug("---->Packet Received\n");
                        e1000_handle_rx_packet();
                }       
                // Clear interrupts     
index f64065c..0e4814e 100644 (file)
@@ -29,6 +29,9 @@ physaddr_t RO boot_cr3;               // Physical address of boot time page directory
 // Global variables
 page_t *RO pages = NULL;          // Virtual address of physical page array
 
+// Base of unalloced MMIO space
+void* mmio_base = (void*)LAPIC_BASE + PGSIZE;
+
 // Global descriptor table.
 //
 // The kernel and user segments are identical (except for the DPL).
@@ -366,12 +369,6 @@ vm_init(void)
        boot_map_segment(pgdir, (uintptr_t)LAPIC_BASE, PGSIZE, LAPIC_BASE,
                         PTE_PCD | PTE_PWT | PTE_W | PTE_G);
 
-#ifdef __CONFIG_E1000_MMIO_HACK__
-       // MMIO HACK
-       boot_map_segment(pgdir, (uintptr_t)LAPIC_BASE + PGSIZE, 0x20000, 
-                        E1000_MMIO_ADDR, PTE_PCD | PTE_PWT | PTE_W | PTE_G);
-#endif
-
        // Check that the initial page directory has been set up correctly.
        check_boot_pgdir(pse);
 
@@ -802,6 +799,67 @@ page_check(void)
        cprintf("page_check() succeeded!\n");
 }
 
+// Allocate size bytes from the MMIO region of virtual memory,
+//  then map it in. Note: We allocate size bytes starting
+//  from a size alligned address. This may introduce holes,
+//  which we arent worrying about.
+void* mmio_alloc(physaddr_t pa, size_t size) {
+
+       printd("MMIO_ALLOC: Asking for %x bytes for pa %x\n", size, pa);
+
+       extern int booting;
+
+       // Ensure the PA is on a page bound
+       if (ROUNDUP(pa, PGSIZE) != pa) {
+               warn("MMIO_ALLOC: PA is not page aligned!\n");
+               return NULL;
+       }
+
+       if (booting == 0) {
+               warn("MMIO_ALLOC: Can only request MMIO space while booting.\n");
+               return NULL;
+       }
+
+       printd("MMIO_ALLOC: mmio_base was: %x\n", mmio_base);
+
+       void* curr_addr = mmio_base;
+       void* old_mmio_base = mmio_base;
+
+       printd("MMIO_ALLOC: Starting allocation at %x\n", curr_addr);
+
+       // Check if we are out of (32bit) virtual memory.
+       if ((mmio_base + size) < mmio_base) {
+               // Crap...
+               warn("MMIO_ALLOC: No more MMIO space\n");
+               return NULL;
+       }
+
+       for ( ; curr_addr < (mmio_base + size); pa = pa + PGSIZE, curr_addr = curr_addr + PGSIZE) {
+
+               printd("MMIO_ALLOC: Mapping PA %x @ VA %x\n", pa, curr_addr);
+
+               pte_t* pte = pgdir_walk(boot_pgdir, curr_addr, 1);
+               
+               // Check for a mapping error
+               if (!pte || (*pte != 0)) {
+                       // We couldnt map the page. Adjust the mmio_base to exlude the 
+                       //  ernoniously inserted pages, and fail.
+                       warn("MMIO_ALLOC: Bad pgdir walk. Some memory may be lost.\n");
+                       mmio_base = curr_addr;
+                       return NULL;
+               }               
+
+               *pte = PTE(pa >> PGSHIFT, PTE_P | PTE_KERN_RW);
+       }       
+
+       mmio_base = curr_addr;
+
+       printd("MMIO_ALLOC: New mmio_base: %x\n", mmio_base);
+       printd("MMIO_ALLOC: Returning VA %x\n", old_mmio_base);
+
+       return old_mmio_base;
+}
+
 /* 
 
     // testing code for boot_pgdir_walk 
index bf7d22a..45910eb 100644 (file)
@@ -125,3 +125,9 @@ void
 page_check(void)
 {
 }
+
+void* mmio_alloc(physaddr_t pa, size_t size) {
+
+       return NULL;
+
+}
index 3fa652b..379624f 100644 (file)
@@ -78,6 +78,7 @@ void tlb_flush_global(void);
 /* Arch specific implementations for these */
 pte_t *pgdir_walk(pde_t *COUNT(NPDENTRIES) pgdir, const void *SNT va, int create);
 int get_va_perms(pde_t *COUNT(NPDENTRIES) pgdir, const void *SNT va);
+void* mmio_alloc(physaddr_t pa, size_t size);
 
 static inline page_t *SAFE ppn2page(size_t ppn)
 {