Fixes double-free when destroying a process
[akaros.git] / kern / src / page_alloc.c
1 /* Copyright (c) 2009 The Regents of the University  of California. 
2  * See the COPYRIGHT files at the top of this source tree for full 
3  * license information.
4  * 
5  * Kevin Klues <klueska@cs.berkeley.edu>    
6  */
7
8 #ifdef __SHARC__
9 #pragma nosharc
10 #endif
11
12 #include <sys/queue.h>
13 #include <arch/bitmask.h>
14 #include <page_alloc.h>
15 #include <pmap.h>
16 #include <string.h>
17 #include <kmalloc.h>
18
19 #define l1 (available_caches.l1)
20 #define l2 (available_caches.l2)
21 #define l3 (available_caches.l3)
22
23 static void __page_decref(page_t *CT(1) page);
24 static void __page_incref(page_t *CT(1) page);
25 static error_t __page_alloc_specific(page_t** page, size_t ppn);
26 static error_t __page_free(page_t *CT(1) page);
27
28 // Global list of colors allocated to the general purpose memory allocator
29 uint8_t* global_cache_colors_map;
30
31 void colored_page_alloc_init()
32 {
33         global_cache_colors_map = 
34                kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
35         CLR_BITMASK(global_cache_colors_map, llc_cache->num_colors);
36         for(int i = 0; i < llc_cache->num_colors/2; i++)
37                 cache_color_alloc(llc_cache, global_cache_colors_map);
38 }
39
40 /**
41  * @brief Clear a Page structure.
42  *
43  * The result has null links and 0 refcount.
44  * Note that the corresponding physical page is NOT initialized!
45  */
46 static void __page_clear(page_t *SAFE page)
47 {
48         memset(page, 0, sizeof(page_t));
49 }
50
51 #define __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, predicate) \
52         /* Find first available color with pages available */                   \
53     /* in the given range */                                                \
54         int i = base_color;                                                     \
55         for (i; i < (base_color+range); i++) {                                  \
56                 if((predicate))                                                     \
57                         break;                                                          \
58         }                                                                       \
59         /* Allocate a page from that color */                                   \
60         if(i < (base_color+range)) {                                            \
61                 *page = LIST_FIRST(&colored_page_free_list[i]);                     \
62                 LIST_REMOVE(*page, page_link);                                      \
63                 __page_clear(*page);                                                \
64                 return i;                                                           \
65         }                                                                       \
66         return -ENOMEM;
67
68 static ssize_t __page_alloc_from_color_range(page_t** page,  
69                                            uint16_t base_color,
70                                            uint16_t range) 
71 {
72         __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, 
73                          !LIST_EMPTY(&colored_page_free_list[i]));
74 }
75
76 static ssize_t __page_alloc_from_color_map_range(page_t** page, uint8_t* map, 
77                                               size_t base_color, size_t range)
78 {  
79         __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, 
80                     GET_BITMASK_BIT(map, i) && !LIST_EMPTY(&colored_page_free_list[i]))
81 }
82
83 static ssize_t __colored_page_alloc(uint8_t* map, page_t** page, 
84                                                size_t next_color)
85 {
86         ssize_t ret;
87         if((ret = __page_alloc_from_color_map_range(page, map, 
88                                    next_color, llc_cache->num_colors - next_color)) < 0)
89                 ret = __page_alloc_from_color_map_range(page, map, 0, next_color);
90         return ret;
91 }
92
93 /* Internal version of page_alloc_specific.  Grab the lock first. */
94 static error_t __page_alloc_specific(page_t** page, size_t ppn)
95 {
96         page_t* sp_page = ppn2page(ppn);
97         if( sp_page->page_ref != 0 )
98                 return -ENOMEM;
99         *page = sp_page;
100         LIST_REMOVE(*page, page_link);
101
102         __page_clear(*page);
103         return 0;
104 }
105
106 /**
107  * @brief Allocates a physical page from a pool of unused physical memory.
108  *
109  * Zeroes the page.
110  *
111  * @param[out] page  set to point to the Page struct
112  *                   of the newly allocated page
113  *
114  * @return ESUCCESS on success
115  * @return -ENOMEM  otherwise
116  */
117 error_t upage_alloc(struct proc* p, page_t** page, int zero)
118 {
119         spin_lock_irqsave(&colored_page_free_list_lock);
120         ssize_t ret = __colored_page_alloc(p->cache_colors_map, 
121                                              page, p->next_cache_color);
122         spin_unlock_irqsave(&colored_page_free_list_lock);
123
124         if(ret >= 0)
125         {
126                 if(zero)
127                         memset(page2kva(*page),0,PGSIZE);
128                 p->next_cache_color = (ret + 1) & (llc_cache->num_colors-1);
129                 return 0;
130         }
131         return ret;
132 }
133
134 error_t kpage_alloc(page_t** page) 
135 {
136         static size_t next_color = 0;
137         ssize_t ret;
138         spin_lock_irqsave(&colored_page_free_list_lock);
139         if((ret = __page_alloc_from_color_range(page, next_color, 
140                                     llc_cache->num_colors - next_color)) < 0)
141                 ret = __page_alloc_from_color_range(page, 0, next_color);
142
143         if(ret >= 0) {
144                 next_color = ret;        
145                 page_incref(*page);
146                 ret = ESUCCESS;
147         }
148         spin_unlock_irqsave(&colored_page_free_list_lock);
149         
150         return ret;
151 }
152
153 /**
154  * @brief Allocated 2^order contiguous physical pages.  Will increment the
155  * reference count for the pages.
156  *
157  * @param[in] order order of the allocation
158  * @param[in] flags memory allocation flags
159  *
160  * @return The KVA of the first page, NULL otherwise.
161  */
162 void *get_cont_pages(size_t order, int flags)
163 {
164         size_t npages = 1 << order;     
165
166         // Find 'npages' free consecutive pages
167         int first = -1;
168         spin_lock_irqsave(&colored_page_free_list_lock);
169         for(int i=(naddrpages-1); i>=(npages-1); i--) {
170                 int j;
171                 for(j=i; j>=(i-(npages-1)); j--) {
172                         if( !page_is_free(j) ) {
173                                 i = j - 1;
174                                 break;
175                         }
176                 }
177                 if( j == (i-(npages-1)-1)) {
178                         first = j+1;
179                         break;
180                 }
181         }
182         //If we couldn't find them, return NULL
183         if( first == -1 ) {
184                 spin_unlock_irqsave(&colored_page_free_list_lock);
185                 return NULL;
186         }
187
188         for(int i=0; i<npages; i++) {
189                 page_t* page;
190                 __page_alloc_specific(&page, first+i);
191                 page_incref(page); 
192         }
193         spin_unlock_irqsave(&colored_page_free_list_lock);
194         return ppn2kva(first);
195 }
196
197 void free_cont_pages(void *buf, size_t order)
198 {
199         size_t npages = 1 << order;     
200         spin_lock_irqsave(&colored_page_free_list_lock);
201         for (int i = kva2ppn(buf); i < kva2ppn(buf) + npages; i++) {
202                 __page_decref(ppn2page(i));
203                 assert(page_is_free(i));
204         }
205         spin_unlock_irqsave(&colored_page_free_list_lock);
206         return; 
207 }
208
209 /*
210  * Allocates a specific physical page.
211  * Does NOT set the contents of the physical page to zero -
212  * the caller must do that if necessary.
213  *
214  * ppn         -- the page number to allocate
215  * *page       -- is set to point to the Page struct 
216  *                of the newly allocated page
217  *
218  * RETURNS 
219  *   ESUCCESS  -- on success
220  *   -ENOMEM   -- otherwise 
221  */
222 error_t upage_alloc_specific(struct proc* p, page_t** page, size_t ppn)
223 {
224         spin_lock_irqsave(&colored_page_free_list_lock);
225         __page_alloc_specific(page, ppn);
226         spin_unlock_irqsave(&colored_page_free_list_lock);
227         return 0;
228 }
229
230 error_t kpage_alloc_specific(page_t** page, size_t ppn)
231 {
232         spin_lock_irqsave(&colored_page_free_list_lock);
233         __page_alloc_specific(page, ppn);
234         page_incref(*page);
235         spin_unlock_irqsave(&colored_page_free_list_lock);
236         return 0;
237 }
238
239 /*
240  * Return a page to the free list.
241  * (This function should only be called when pp->page_ref reaches 0.)
242  * You must hold the page_free list lock before calling this.
243  */
244 static error_t __page_free(page_t* page) 
245 {
246         __page_clear(page);
247
248         LIST_INSERT_HEAD(
249            &(colored_page_free_list[get_page_color(page2ppn(page), llc_cache)]),
250            page,
251            page_link
252         );
253
254         return ESUCCESS;
255 }
256
257 error_t page_free(page_t *SAFE page)
258 {
259         error_t retval;
260         spin_lock_irqsave(&colored_page_free_list_lock);
261         retval = __page_free(page);
262         spin_unlock_irqsave(&colored_page_free_list_lock);
263         return retval;
264 }
265
266 /*
267  * Check if a page with the given physical page # is free
268  */
269 int page_is_free(size_t ppn) {
270         page_t* page = ppn2page(ppn);
271         if( page->page_ref == 0 )
272                 return TRUE;
273         return FALSE;
274 }
275
276 /*
277  * Increment the reference count on a page
278  */
279 void page_incref(page_t *page)
280 {
281         __page_incref(page);
282 }
283
284 void __page_incref(page_t *page)
285 {
286         page->page_ref++;
287 }
288
289 /*
290  * Decrement the reference count on a page,
291  * freeing it if there are no more refs.
292  */
293 void page_decref(page_t *page)
294 {
295         spin_lock_irqsave(&colored_page_free_list_lock);
296         __page_decref(page);
297         spin_unlock_irqsave(&colored_page_free_list_lock);
298 }
299
300 /*
301  * Decrement the reference count on a page,
302  * freeing it if there are no more refs.
303  */
304 static void __page_decref(page_t *page)
305 {
306         if (page->page_ref == 0) {
307                 panic("Trying to Free already freed page: %d...\n", page2ppn(page));
308                 return;
309         }
310         if (--page->page_ref == 0)
311                 __page_free(page);
312 }
313
314 /*
315  * Set the reference count on a page to a specific value
316  */
317 void page_setref(page_t *page, size_t val)
318 {
319         page->page_ref = val;
320 }
321
322 /*
323  * Get the reference count on a page
324  */
325 size_t page_getref(page_t *page)
326 {
327         return page->page_ref;
328 }
329