19f1085b55c34cfa1dadcafb2125f453ea20ecc5
[akaros.git] / kern / src / page_alloc.c
1 /* Copyright (c) 2009 The Regents of the University  of California. 
2  * See the COPYRIGHT files at the top of this source tree for full 
3  * license information.
4  * 
5  * Kevin Klues <klueska@cs.berkeley.edu>    
6  */
7
8 #ifdef __SHARC__
9 #pragma nosharc
10 #endif
11
12 #include <sys/queue.h>
13 #include <arch/bitmask.h>
14 #include <page_alloc.h>
15 #include <pmap.h>
16 #include <string.h>
17 #include <kmalloc.h>
18
19 #define l1 (available_caches.l1)
20 #define l2 (available_caches.l2)
21 #define l3 (available_caches.l3)
22
23 static void __page_decref(page_t *page);
24 static void __page_incref(page_t *page);
25 static error_t __page_alloc_specific(page_t** page, size_t ppn);
26 static error_t __page_free(page_t* page);
27
28 // Global list of colors allocated to the general purpose memory allocator
29 uint8_t* global_cache_colors_map;
30
31 void colored_page_alloc_init()
32 {
33         global_cache_colors_map = 
34                kmalloc(BYTES_FOR_BITMASK(llc_cache->num_colors), 0);
35         CLR_BITMASK(global_cache_colors_map, llc_cache->num_colors);
36         cache_color_alloc(llc_cache, global_cache_colors_map);
37         cache_color_alloc(llc_cache, global_cache_colors_map);
38         cache_color_alloc(llc_cache, global_cache_colors_map);
39         cache_color_alloc(llc_cache, global_cache_colors_map);
40         cache_color_alloc(llc_cache, global_cache_colors_map);
41 }
42
43 /**
44  * @brief Clear a Page structure.
45  *
46  * The result has null links and 0 refcount.
47  * Note that the corresponding physical page is NOT initialized!
48  */
49 static void __page_clear(page_t *SAFE page)
50 {
51         memset(page, 0, sizeof(page_t));
52 }
53
54 #define __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, predicate) \
55         /* Find first available color with pages available */                   \
56     /* in the given range */                                                \
57         int i = base_color;                                                     \
58         for (i; i < (base_color+range); i++) {                                  \
59                 if((predicate))                                                     \
60                         break;                                                          \
61         }                                                                       \
62         /* Allocate a page from that color */                                   \
63         if(i < (base_color+range)) {                                            \
64                 *page = LIST_FIRST(&colored_page_free_list[i]);                     \
65                 LIST_REMOVE(*page, page_link);                                      \
66                 __page_clear(*page);                                                \
67                 return i;                                                           \
68         }                                                                       \
69         return -ENOMEM;
70
71 static ssize_t __page_alloc_from_color_range(page_t** page,  
72                                            uint16_t base_color,
73                                            uint16_t range) 
74 {
75         __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, 
76                          !LIST_EMPTY(&colored_page_free_list[i]));
77 }
78
79 static ssize_t __page_alloc_from_color_map_range(page_t** page, uint8_t* map, 
80                                               size_t base_color, size_t range)
81 {  
82         __PAGE_ALLOC_FROM_RANGE_GENERIC(page, base_color, range, 
83                     GET_BITMASK_BIT(map, i) && !LIST_EMPTY(&colored_page_free_list[i]))
84 }
85
86 static ssize_t __colored_page_alloc(uint8_t* map, page_t** page, 
87                                                size_t next_color)
88 {
89         ssize_t ret;
90         if((ret = __page_alloc_from_color_map_range(page, map, 
91                                    next_color, llc_cache->num_colors - next_color)) < 0)
92                 ret = __page_alloc_from_color_map_range(page, map, 0, next_color);
93         return ret;
94 }
95
96 /* Internal version of page_alloc_specific.  Grab the lock first. */
97 static error_t __page_alloc_specific(page_t** page, size_t ppn)
98 {
99         page_t* sp_page = ppn2page(ppn);
100         if( sp_page->page_ref != 0 )
101                 return -ENOMEM;
102         *page = sp_page;
103         LIST_REMOVE(*page, page_link);
104
105         __page_clear(*page);
106         return 0;
107 }
108
109 /**
110  * @brief Allocates a physical page from a pool of unused physical memory.
111  *
112  * Zeroes the page.
113  *
114  * @param[out] page  set to point to the Page struct
115  *                   of the newly allocated page
116  *
117  * @return ESUCCESS on success
118  * @return -ENOMEM  otherwise
119  */
120 error_t upage_alloc(struct proc* p, page_t** page)
121 {
122         spin_lock_irqsave(&colored_page_free_list_lock);
123         ssize_t ret = __colored_page_alloc(p->cache_colors_map, 
124                                              page, p->next_cache_color);
125         spin_unlock_irqsave(&colored_page_free_list_lock);
126
127         if(ret >= 0)
128         {
129                 memset(page2kva(*page),0,PGSIZE);
130                 p->next_cache_color = (ret + 1) % llc_cache->num_colors;
131         }
132         return ret;
133 }
134
135 error_t kpage_alloc(page_t** page) 
136 {
137         static size_t next_color = 0;
138         ssize_t ret;
139         spin_lock_irqsave(&colored_page_free_list_lock);
140         if((ret = __page_alloc_from_color_range(page, next_color, 
141                                     llc_cache->num_colors)) < 0)
142                 ret = __page_alloc_from_color_range(page, 0, next_color);
143
144         if(ret >= 0) {
145                 next_color = ret;        
146                 page_incref(*page);
147                 ret = ESUCCESS;
148         }
149         spin_unlock_irqsave(&colored_page_free_list_lock);
150         
151         return ret;
152 }
153
154 /**
155  * @brief Allocated 2^order contiguous physical pages.  Will incrememnt the
156  * reference count for the pages.
157  *
158  * @param[in] order order of the allocation
159  * @param[in] flags memory allocation flags
160  *
161  * @return The KVA of the first page, NULL otherwise.
162  */
163 void *get_cont_pages(size_t order, int flags)
164 {
165         size_t npages = 1 << order;     
166
167         // Find 'npages' free consecutive pages
168         int first = -1;
169         spin_lock_irqsave(&colored_page_free_list_lock);
170         for(int i=(naddrpages-1); i>=(npages-1); i--) {
171                 int j;
172                 for(j=i; j>=(i-(npages-1)); j--) {
173                         if( !page_is_free(j) ) {
174                                 i = j - 1;
175                                 break;
176                         }
177                 }
178                 if( j == (i-(npages-1)-1)) {
179                         first = j+1;
180                         break;
181                 }
182         }
183         //If we couldn't find them, return NULL
184         if( first == -1 ) {
185                 spin_unlock_irqsave(&colored_page_free_list_lock);
186                 return NULL;
187         }
188
189         for(int i=0; i<npages; i++) {
190                 page_t* page;
191                 __page_alloc_specific(&page, first+i);
192                 page_incref(page); 
193         }
194         spin_unlock_irqsave(&colored_page_free_list_lock);
195         return ppn2kva(first);
196 }
197
198 void free_cont_pages(void *buf, size_t order)
199 {
200         size_t npages = 1 << order;     
201         spin_lock_irqsave(&colored_page_free_list_lock);
202         for (int i = kva2ppn(buf); i < kva2ppn(buf) + npages; i++) {
203                 __page_decref(ppn2page(i));
204                 assert(page_is_free(i));
205         }
206         spin_unlock_irqsave(&colored_page_free_list_lock);
207         return; 
208 }
209
210 /*
211  * Allocates a specific physical page.
212  * Does NOT set the contents of the physical page to zero -
213  * the caller must do that if necessary.
214  *
215  * ppn         -- the page number to allocate
216  * *page       -- is set to point to the Page struct 
217  *                of the newly allocated page
218  *
219  * RETURNS 
220  *   ESUCCESS  -- on success
221  *   -ENOMEM   -- otherwise 
222  */
223 error_t upage_alloc_specific(struct proc* p, page_t** page, size_t ppn)
224 {
225         spin_lock_irqsave(&colored_page_free_list_lock);
226         __page_alloc_specific(page, ppn);
227         spin_unlock_irqsave(&colored_page_free_list_lock);
228         return 0;
229 }
230
231 error_t kpage_alloc_specific(page_t** page, size_t ppn)
232 {
233         spin_lock_irqsave(&colored_page_free_list_lock);
234         __page_alloc_specific(page, ppn);
235         page_incref(*page);
236         spin_unlock_irqsave(&colored_page_free_list_lock);
237         return 0;
238 }
239
240 /*
241  * Return a page to the free list.
242  * (This function should only be called when pp->page_ref reaches 0.)
243  * You must hold the page_free list lock before calling this.
244  */
245 static error_t __page_free(page_t* page) 
246 {
247         __page_clear(page);
248
249         LIST_INSERT_HEAD(
250            &(colored_page_free_list[get_page_color(page2ppn(page), llc_cache)]),
251            page,
252            page_link
253         );
254
255         return ESUCCESS;
256 }
257
258 error_t page_free(page_t *SAFE page)
259 {
260         error_t retval;
261         spin_lock_irqsave(&colored_page_free_list_lock);
262         retval = __page_free(page);
263         spin_unlock_irqsave(&colored_page_free_list_lock);
264         return retval;
265 }
266
267 /*
268  * Check if a page with the given pyhysical page # is free
269  */
270 int page_is_free(size_t ppn) {
271         page_t* page = ppn2page(ppn);
272         if( page->page_ref == 0 )
273                 return TRUE;
274         return FALSE;
275 }
276
277 /*
278  * Increment the reference count on a page
279  */
280 void page_incref(page_t *page)
281 {
282         __page_incref(page);
283 }
284
285 void __page_incref(page_t *page)
286 {
287         page->page_ref++;
288 }
289
290 /*
291  * Decrement the reference count on a page,
292  * freeing it if there are no more refs.
293  */
294 void page_decref(page_t *page)
295 {
296         spin_lock_irqsave(&colored_page_free_list_lock);
297         __page_decref(page);
298         spin_unlock_irqsave(&colored_page_free_list_lock);
299 }
300
301 /*
302  * Decrement the reference count on a page,
303  * freeing it if there are no more refs.
304  */
305 static void __page_decref(page_t *page)
306 {
307         if (page->page_ref == 0) {
308                 panic("Trying to Free already freed page: %d...\n", page2ppn(page));
309                 return;
310         }
311         if (--page->page_ref == 0)
312                 __page_free(page);
313 }
314
315 /*
316  * Set the reference count on a page to a specific value
317  */
318 void page_setref(page_t *page, size_t val)
319 {
320         page->page_ref = val;
321 }
322
323 /*
324  * Get the reference count on a page
325  */
326 size_t page_getref(page_t *page)
327 {
328         return page->page_ref;
329 }
330