Fixed upage_alloc bug with page-coloring disabled
[akaros.git] / kern / src / page_alloc.c
1 /* Copyright (c) 2009 The Regents of the University  of California. 
2  * See the COPYRIGHT files at the top of this source tree for full 
3  * license information.
4  * 
5  * Kevin Klues <klueska@cs.berkeley.edu>    
6  */
7
8 #ifdef __SHARC__
9 #pragma nosharc
10 #endif
11
12 #include <sys/queue.h>
13 #include <arch/bitmask.h>
14 #include <page_alloc.h>
15 #include <pmap.h>
16 #include <string.h>
17 #include <kmalloc.h>
18
19 #define l1 (available_caches.l1)
20 #define l2 (available_caches.l2)
21 #define l3 (available_caches.l3)
22
23 static void __page_decref(page_t *CT(1) page);
24 static void __page_incref(page_t *CT(1) page);
25 static error_t __page_alloc_specific(page_t** page, size_t ppn);
26 static error_t __page_free(page_t *CT(1) page);
27
28 // Global list of colors allocated to the general purpose memory allocator
29 uint8_t* global_cache_colors_map;
30 size_t global_next_color = 0;
31
32 void colored_page_alloc_init()
33 {
34         global_cache_colors_map = 
35                kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
36         CLR_BITMASK(global_cache_colors_map, llc_cache->num_colors);
37         for(int i = 0; i < llc_cache->num_colors/8; i++)
38                 cache_color_alloc(llc_cache, global_cache_colors_map);
39 }
40
41 /**
42  * @brief Clear a Page structure.
43  *
44  * The result has null links and 0 refcount.
45  * Note that the corresponding physical page is NOT initialized!
46  */
47 static void __page_clear(page_t *SAFE page)
48 {
49         memset(page, 0, sizeof(page_t));
50 }
51
52 #define __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, predicate) \
53         /* Find first available color with pages available */                   \
54     /* in the given range */                                                \
55         int i = base_color;                                                     \
56         for (i; i < (base_color+range); i++) {                                  \
57                 if((predicate))                                                     \
58                         break;                                                          \
59         }                                                                       \
60         /* Allocate a page from that color */                                   \
61         if(i < (base_color+range)) {                                            \
62                 *page = LIST_FIRST(&colored_page_free_list[i]);                     \
63                 LIST_REMOVE(*page, page_link);                                      \
64                 __page_clear(*page);                                                \
65                 return i;                                                           \
66         }                                                                       \
67         return -ENOMEM;
68
69 static ssize_t __page_alloc_from_color_range(page_t** page,  
70                                            uint16_t base_color,
71                                            uint16_t range) 
72 {
73         __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, 
74                          !LIST_EMPTY(&colored_page_free_list[i]));
75 }
76
77 static ssize_t __page_alloc_from_color_map_range(page_t** page, uint8_t* map, 
78                                               size_t base_color, size_t range)
79 {  
80         __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, 
81                     GET_BITMASK_BIT(map, i) && !LIST_EMPTY(&colored_page_free_list[i]))
82 }
83
84 static ssize_t __colored_page_alloc(uint8_t* map, page_t** page, 
85                                                size_t next_color)
86 {
87         ssize_t ret;
88         if((ret = __page_alloc_from_color_map_range(page, map, 
89                                    next_color, llc_cache->num_colors - next_color)) < 0)
90                 ret = __page_alloc_from_color_map_range(page, map, 0, next_color);
91         return ret;
92 }
93
94 /* Internal version of page_alloc_specific.  Grab the lock first. */
95 static error_t __page_alloc_specific(page_t** page, size_t ppn)
96 {
97         page_t* sp_page = ppn2page(ppn);
98         if( sp_page->page_ref != 0 )
99                 return -ENOMEM;
100         *page = sp_page;
101         LIST_REMOVE(*page, page_link);
102
103         __page_clear(*page);
104         return 0;
105 }
106
107 /**
108  * @brief Allocates a physical page from a pool of unused physical memory.
109  *
110  * Zeroes the page.
111  *
112  * @param[out] page  set to point to the Page struct
113  *                   of the newly allocated page
114  *
115  * @return ESUCCESS on success
116  * @return -ENOMEM  otherwise
117  */
118 #ifdef __CONFIG_PAGE_COLORING__
119 error_t upage_alloc(struct proc* p, page_t** page, int zero)
120 {
121         spin_lock_irqsave(&colored_page_free_list_lock);
122         ssize_t ret = __colored_page_alloc(p->cache_colors_map, 
123                                              page, p->next_cache_color);
124         spin_unlock_irqsave(&colored_page_free_list_lock);
125
126         if(ret >= 0)
127         {
128                 if(zero)
129                         memset(page2kva(*page),0,PGSIZE);
130                 p->next_cache_color = (ret + 1) & (llc_cache->num_colors-1);
131                 return 0;
132         }
133         return ret;
134 }
135 #else 
136 error_t upage_alloc(struct proc* p, page_t** page, int zero)
137 {
138         ssize_t ret;
139         spin_lock_irqsave(&colored_page_free_list_lock);
140         if((ret = __page_alloc_from_color_range(page, global_next_color, 
141                                     llc_cache->num_colors - global_next_color)) < 0)
142                 ret = __page_alloc_from_color_range(page, 0, global_next_color);
143
144         if(ret >= 0) {
145                 if(zero)
146                         memset(page2kva(*page),0,PGSIZE);
147                 global_next_color = ret;        
148                 ret = ESUCCESS;
149         }
150         spin_unlock_irqsave(&colored_page_free_list_lock);
151         
152         return ret;
153 }
154 #endif
155
156 error_t kpage_alloc(page_t** page) 
157 {
158         ssize_t ret;
159         spin_lock_irqsave(&colored_page_free_list_lock);
160         if((ret = __page_alloc_from_color_range(page, global_next_color, 
161                                     llc_cache->num_colors - global_next_color)) < 0)
162                 ret = __page_alloc_from_color_range(page, 0, global_next_color);
163
164         if(ret >= 0) {
165                 global_next_color = ret;        
166                 page_incref(*page);
167                 ret = ESUCCESS;
168         }
169         spin_unlock_irqsave(&colored_page_free_list_lock);
170         
171         return ret;
172 }
173
174 /**
175  * @brief Allocated 2^order contiguous physical pages.  Will increment the
176  * reference count for the pages.
177  *
178  * @param[in] order order of the allocation
179  * @param[in] flags memory allocation flags
180  *
181  * @return The KVA of the first page, NULL otherwise.
182  */
183 void *get_cont_pages(size_t order, int flags)
184 {
185         size_t npages = 1 << order;     
186
187         // Find 'npages' free consecutive pages
188         int first = -1;
189         spin_lock_irqsave(&colored_page_free_list_lock);
190         for(int i=(naddrpages-1); i>=(npages-1); i--) {
191                 int j;
192                 for(j=i; j>=(i-(npages-1)); j--) {
193                         if( !page_is_free(j) ) {
194                                 i = j - 1;
195                                 break;
196                         }
197                 }
198                 if( j == (i-(npages-1)-1)) {
199                         first = j+1;
200                         break;
201                 }
202         }
203         //If we couldn't find them, return NULL
204         if( first == -1 ) {
205                 spin_unlock_irqsave(&colored_page_free_list_lock);
206                 return NULL;
207         }
208
209         for(int i=0; i<npages; i++) {
210                 page_t* page;
211                 __page_alloc_specific(&page, first+i);
212                 page_incref(page); 
213         }
214         spin_unlock_irqsave(&colored_page_free_list_lock);
215         return ppn2kva(first);
216 }
217
218 void free_cont_pages(void *buf, size_t order)
219 {
220         size_t npages = 1 << order;     
221         spin_lock_irqsave(&colored_page_free_list_lock);
222         for (int i = kva2ppn(buf); i < kva2ppn(buf) + npages; i++) {
223                 __page_decref(ppn2page(i));
224                 assert(page_is_free(i));
225         }
226         spin_unlock_irqsave(&colored_page_free_list_lock);
227         return; 
228 }
229
230 /*
231  * Allocates a specific physical page.
232  * Does NOT set the contents of the physical page to zero -
233  * the caller must do that if necessary.
234  *
235  * ppn         -- the page number to allocate
236  * *page       -- is set to point to the Page struct 
237  *                of the newly allocated page
238  *
239  * RETURNS 
240  *   ESUCCESS  -- on success
241  *   -ENOMEM   -- otherwise 
242  */
243 error_t upage_alloc_specific(struct proc* p, page_t** page, size_t ppn)
244 {
245         spin_lock_irqsave(&colored_page_free_list_lock);
246         __page_alloc_specific(page, ppn);
247         spin_unlock_irqsave(&colored_page_free_list_lock);
248         return 0;
249 }
250
251 error_t kpage_alloc_specific(page_t** page, size_t ppn)
252 {
253         spin_lock_irqsave(&colored_page_free_list_lock);
254         __page_alloc_specific(page, ppn);
255         page_incref(*page);
256         spin_unlock_irqsave(&colored_page_free_list_lock);
257         return 0;
258 }
259
260 /*
261  * Return a page to the free list.
262  * (This function should only be called when pp->page_ref reaches 0.)
263  * You must hold the page_free list lock before calling this.
264  */
265 static error_t __page_free(page_t* page) 
266 {
267         __page_clear(page);
268
269         LIST_INSERT_HEAD(
270            &(colored_page_free_list[get_page_color(page2ppn(page), llc_cache)]),
271            page,
272            page_link
273         );
274
275         return ESUCCESS;
276 }
277
278 error_t page_free(page_t *SAFE page)
279 {
280         error_t retval;
281         spin_lock_irqsave(&colored_page_free_list_lock);
282         retval = __page_free(page);
283         spin_unlock_irqsave(&colored_page_free_list_lock);
284         return retval;
285 }
286
287 /*
288  * Check if a page with the given physical page # is free
289  */
290 int page_is_free(size_t ppn) {
291         page_t* page = ppn2page(ppn);
292         if( page->page_ref == 0 )
293                 return TRUE;
294         return FALSE;
295 }
296
297 /*
298  * Increment the reference count on a page
299  */
300 void page_incref(page_t *page)
301 {
302         __page_incref(page);
303 }
304
305 void __page_incref(page_t *page)
306 {
307         page->page_ref++;
308 }
309
310 /*
311  * Decrement the reference count on a page,
312  * freeing it if there are no more refs.
313  */
314 void page_decref(page_t *page)
315 {
316         spin_lock_irqsave(&colored_page_free_list_lock);
317         __page_decref(page);
318         spin_unlock_irqsave(&colored_page_free_list_lock);
319 }
320
321 /*
322  * Decrement the reference count on a page,
323  * freeing it if there are no more refs.
324  */
325 static void __page_decref(page_t *page)
326 {
327         if (page->page_ref == 0) {
328                 panic("Trying to Free already freed page: %d...\n", page2ppn(page));
329                 return;
330         }
331         if (--page->page_ref == 0)
332                 __page_free(page);
333 }
334
335 /*
336  * Set the reference count on a page to a specific value
337  */
338 void page_setref(page_t *page, size_t val)
339 {
340         page->page_ref = val;
341 }
342
343 /*
344  * Get the reference count on a page
345  */
346 size_t page_getref(page_t *page)
347 {
348         return page->page_ref;
349 }
350