X-Git-Url: http://akaros.cs.berkeley.edu/gitweb/?p=akaros.git;a=blobdiff_plain;f=kern%2Finclude%2Fmm.h;h=e0133fe577e1c49dae63301fbdedfb683ba33516;hp=7b557f9cc3ad22b85de40fb368da6ae524457db0;hb=6a99017900d4e3daae31dafe166eb5358692262f;hpb=17337ff9859d55df73244f4a46bdaa65b6f7fc30 diff --git a/kern/include/mm.h b/kern/include/mm.h index 7b557f9..e0133fe 100644 --- a/kern/include/mm.h +++ b/kern/include/mm.h @@ -1,83 +1,86 @@ -/* - * Copyright (c) 2009 The Regents of the University of California +/* Copyright (c) 2009, 2010 The Regents of the University of California * Barret Rhoden * See LICENSE for details. * * Memory management for processes: syscall related functions, virtual memory - * regions, etc. - */ + * regions, etc. */ #ifndef ROS_KERN_MM_H #define ROS_KERN_MM_H #include -#include +#include #include #include +#include -/* Memory region for a process, consisting of linear(virtual) addresses. This - * is what the kernel allocates a process, and the physical mapping can be done - * lazily (or not). This way, if a page is swapped out, and the PTE says it - * isn't present, we still have a way to account for how the whole region ought - * to be dealt with. - * Some things are per-region: - * - probably something with shared memory - * - mmaping files: we can have a logical connection to something other than - * anonymous memory - * - on a fault, was this memory supposed to be there? (swap, lazy, etc), or is - * the region free? - * Others are per-page: - * - was this page supposed to be protected somehow(guard)? could be per-region - * - where is this page in the swap? - * If we try to store this info in the PTE, we only have 31 bits, and it's more - * arch dependent. Handling jumbos is a pain. And it's replicated across all - * pages for a coarse granularity things. And we can't add things easily. - * - * so a process has a (sorted) list of these for it's VA space, hanging off it's - * struct proc. or off it's mm? - * - we don't share an mm between processes anymore (tasks/threads) - * - though we share most everything with vpm. - * - want to be able to do all the same things with vpm as with regular mem - * (file back mmap, etc) - * - contexts or whatever share lots of mem things, like accounting, limits, - * overall process stuff, the rest of the page tables. - * - so there should be some overall mm, and probably directly in the - * struct proc (or just one other struct directly embedded, not a pointer - * to one where a bunch of processes use it) - * - if we embed, mm.h doesn't need to know about process.h - * so an mm can have a bunch of "address spaces" - or at least different - * contexts - * - * how does this change or where does this belong with virtual private memory? - * will also affect get_free_va_range - * - also, do we want a separate brk per? or just support mmap on private mem? - */ +struct file; +struct proc; /* preprocessor games */ + +/* Basic structure defining a region of a process's virtual memory. Note we + * don't refcnt these. Either they are in the TAILQ/tree, or they should be + * freed. There should be no other references floating around. We still need + * to sort out how we share memory and how we'll do private memory with these + * VMRs. */ struct vm_region { - TAILQ_ENTRY(vm_region) link; // actually, i'd like a sorted tree of these - uintptr_t base; - size_t len; - int perm; + TAILQ_ENTRY(vm_region) vm_link; + TAILQ_ENTRY(vm_region) vm_pm_link; + struct proc *vm_proc; /* owning process, for now */ + uintptr_t vm_base; + uintptr_t vm_end; + int vm_prot; + int vm_flags; + struct file *vm_file; + size_t vm_foff; }; -TAILQ_HEAD(vm_region_list, vm_region); // Declares 'struct memregion_list' +TAILQ_HEAD(vmr_tailq, vm_region); /* Declares 'struct vmr_tailq' */ -struct mm { - spinlock_t mm_lock; - // per-process memory management stuff - // cr3(s), accounting, possibly handler methods for certain types of faults - // lists of vm_regions for all contexts - // base cr3 for all contexts - // previous brk, last checked vm_region +/* VM Region Management Functions. For now, these just maintain themselves - + * anything related to mapping needs to be done by the caller. */ +void vmr_init(void); +struct vm_region *create_vmr(struct proc *p, uintptr_t va, size_t len); +struct vm_region *split_vmr(struct vm_region *vmr, uintptr_t va); +int merge_vmr(struct vm_region *first, struct vm_region *second); +struct vm_region *merge_me(struct vm_region *vmr); +int grow_vmr(struct vm_region *vmr, uintptr_t va); +int shrink_vmr(struct vm_region *vmr, uintptr_t va); +void destroy_vmr(struct vm_region *vmr); +struct vm_region *find_vmr(struct proc *p, uintptr_t va); +struct vm_region *find_first_vmr(struct proc *p, uintptr_t va); +void isolate_vmrs(struct proc *p, uintptr_t va, size_t len); +void unmap_and_destroy_vmrs(struct proc *p); +int duplicate_vmrs(struct proc *p, struct proc *new_p); +void print_vmrs(struct proc *p); -}; -// would rather this be a mm struct +/* mmap() related functions. These manipulate VMRs and change the hardware page + * tables. Any requests below the LOWEST_VA will silently be upped. This may + * be a dynamic proc-specific variable later. */ +#define MMAP_LOWEST_VA PGSIZE void *mmap(struct proc *p, uintptr_t addr, size_t len, int prot, int flags, int fd, size_t offset); -struct file; void *do_mmap(struct proc *p, uintptr_t addr, size_t len, int prot, int flags, - struct file* f, size_t offset); -int mprotect(struct proc* p, void* addr, size_t len, int prot); -int munmap(struct proc* p, void* addr, size_t len); + struct file *f, size_t offset); +int mprotect(struct proc *p, uintptr_t addr, size_t len, int prot); +int munmap(struct proc *p, uintptr_t addr, size_t len); +int handle_page_fault(struct proc *p, uintptr_t va, int prot); + +/* These assume the mm_lock is held already */ +void *__do_mmap(struct proc *p, uintptr_t addr, size_t len, int prot, int flags, + struct file *f, size_t offset); +int __do_mprotect(struct proc *p, uintptr_t addr, size_t len, int prot); +int __do_munmap(struct proc *p, uintptr_t addr, size_t len); +int __handle_page_fault(struct proc* p, uintptr_t va, int prot); -int handle_page_fault(struct proc* p, uintptr_t va, int prot); +/* Kernel Dynamic Memory Mappings */ +/* These two are just about reserving VA space */ +uintptr_t get_vmap_segment(unsigned long num_pages); +uintptr_t put_vmap_segment(uintptr_t vaddr, unsigned long num_pages); +/* These two are about actually mapping stuff in some reserved space */ +int map_vmap_segment(uintptr_t vaddr, uintptr_t paddr, unsigned long num_pages, + int perm); +int unmap_vmap_segment(uintptr_t vaddr, unsigned long num_pages); +/* Helper wrappers, since no one will probably call the *_segment funcs */ +uintptr_t vmap_pmem(uintptr_t paddr, size_t nr_bytes); +int vunmap_vmem(uintptr_t vaddr, size_t nr_bytes); -#endif // !ROS_KERN_MM_H +#endif /* !ROS_KERN_MM_H */