parlib: Make sync objects static (XCC)
[akaros.git] / user / vmm / sched.c
1 /* Copyright (c) 2016 Google Inc.
2  * Barret Rhoden <brho@cs.berkeley.edu>
3  * See LICENSE for details.
4  *
5  * 2LS for virtual machines */
6
7 #include <vmm/sched.h>
8 #include <vmm/vmm.h>
9 #include <sys/mman.h>
10 #include <stdlib.h>
11 #include <assert.h>
12 #include <parlib/spinlock.h>
13 #include <parlib/event.h>
14 #include <parlib/ucq.h>
15 #include <parlib/arch/trap.h>
16 #include <parlib/ros_debug.h>
17 #include <parlib/vcore_tick.h>
18
19 int vmm_sched_period_usec = 1000;
20
21 /* For now, we only have one VM managed by the 2LS.  If we ever expand that,
22  * we'll need something analogous to current_uthread, so the 2LS knows which VM
23  * it is working on. */
24 static struct virtual_machine *current_vm;
25
26 static struct spin_pdr_lock queue_lock = SPINPDR_INITIALIZER;
27 /* Runnable queues, broken up by thread type. */
28 static struct vmm_thread_tq rnbl_tasks = TAILQ_HEAD_INITIALIZER(rnbl_tasks);
29 static struct vmm_thread_tq rnbl_guests = TAILQ_HEAD_INITIALIZER(rnbl_guests);
30 /* Counts of *unblocked* threads.  Unblocked = Running + Runnable. */
31 static atomic_t nr_unblk_tasks;
32 static atomic_t nr_unblk_guests;
33 /* Global evq for all syscalls.  Could make this per vcore or whatever. */
34 static struct event_queue *sysc_evq;
35
36 static void vmm_sched_entry(void);
37 static void vmm_thread_runnable(struct uthread *uth);
38 static void vmm_thread_paused(struct uthread *uth);
39 static void vmm_thread_blockon_sysc(struct uthread *uth, void *sysc);
40 static void vmm_thread_has_blocked(struct uthread *uth, uth_sync_t *sync_obj,
41                                    int flags);
42 static void vmm_thread_refl_fault(struct uthread *uth,
43                                   struct user_context *ctx);
44 static void vmm_thread_exited(struct uthread *uth);
45 static struct uthread *vmm_thread_create(void *(*func)(void *), void *arg);
46
47 struct schedule_ops vmm_sched_ops = {
48         .sched_entry = vmm_sched_entry,
49         .thread_runnable = vmm_thread_runnable,
50         .thread_paused = vmm_thread_paused,
51         .thread_blockon_sysc = vmm_thread_blockon_sysc,
52         .thread_has_blocked = vmm_thread_has_blocked,
53         .thread_refl_fault = vmm_thread_refl_fault,
54         .thread_exited = vmm_thread_exited,
55         .thread_create = vmm_thread_create,
56 };
57
58 /* Helpers */
59 static void vmm_handle_syscall(struct event_msg *ev_msg, unsigned int ev_type,
60                                void *data);
61 static void acct_thread_blocked(struct vmm_thread *vth);
62 static void acct_thread_unblocked(struct vmm_thread *vth);
63 static void enqueue_vmm_thread(struct vmm_thread *vth);
64 static struct vmm_thread *alloc_vmm_thread(struct virtual_machine *vm,
65                                            int type);
66 static void *__alloc_stack(size_t stacksize);
67 static void __free_stack(void *stacktop, size_t stacksize);
68
69
70 static void restart_thread(struct syscall *sysc)
71 {
72         struct uthread *ut_restartee = (struct uthread*)sysc->u_data;
73
74         /* uthread stuff here: */
75         assert(ut_restartee);
76         assert(ut_restartee->sysc == sysc);     /* set in uthread.c */
77         ut_restartee->sysc = 0; /* so we don't 'reblock' on this later */
78         vmm_thread_runnable(ut_restartee);
79 }
80
81 static void vmm_handle_syscall(struct event_msg *ev_msg, unsigned int ev_type,
82                                void *data)
83 {
84         struct syscall *sysc;
85
86         /* I think we can make this assert now.  If not, check pthread.c. (concern
87          * was having old ev_qs firing and running this handler). */
88         assert(ev_msg);
89         sysc = ev_msg->ev_arg3;
90         assert(sysc);
91         restart_thread(sysc);
92 }
93
94 /* Helper: allocates a UCQ-based event queue suitable for syscalls.  Will
95  * attempt to route the notifs/IPIs to vcoreid */
96 static struct event_queue *setup_sysc_evq(int vcoreid)
97 {
98         struct event_queue *evq;
99         uintptr_t mmap_block;
100
101         mmap_block = (uintptr_t)mmap(0, PGSIZE * 2,
102                                      PROT_WRITE | PROT_READ,
103                                      MAP_POPULATE | MAP_ANONYMOUS, -1, 0);
104         evq = get_eventq_raw();
105         assert(mmap_block && evq);
106         evq->ev_flags = EVENT_IPI | EVENT_INDIR | EVENT_SPAM_INDIR | EVENT_WAKEUP;
107         evq->ev_vcore = vcoreid;
108         evq->ev_mbox->type = EV_MBOX_UCQ;
109         ucq_init_raw(&evq->ev_mbox->ucq, mmap_block, mmap_block + PGSIZE);
110         return evq;
111 }
112
113 static void __attribute__((constructor)) vmm_lib_init(void)
114 {
115         struct task_thread *thread0;
116
117         parlib_init_once_racy(return);
118         uthread_lib_init();
119
120         /* Note that thread0 doesn't belong to a VM.  We can set this during
121          * vmm_init() if we need to. */
122         thread0 = (struct task_thread*)alloc_vmm_thread(0, VMM_THREAD_TASK);
123         assert(thread0);
124         acct_thread_unblocked((struct vmm_thread*)thread0);
125         thread0->stacksize = USTACK_NUM_PAGES * PGSIZE;
126         thread0->stacktop = (void*)USTACKTOP;
127         /* for lack of a better vcore, might as well send to 0 */
128         sysc_evq = setup_sysc_evq(0);
129         uthread_2ls_init((struct uthread*)thread0, &vmm_sched_ops,
130                      vmm_handle_syscall, NULL);
131 }
132
133 /* The scheduling policy is encapsulated in the next few functions (from here
134  * down to sched_entry()). */
135
136 static int desired_nr_vcores(void)
137 {
138         /* Sanity checks on our accounting. */
139         assert(atomic_read(&nr_unblk_guests) >= 0);
140         assert(atomic_read(&nr_unblk_tasks) >= 0);
141         /* Lockless peak.  This is always an estimate.  Some of our tasks busy-wait,
142          * so it's not enough to just give us one vcore for all tasks, yet. */
143         return atomic_read(&nr_unblk_guests) + atomic_read(&nr_unblk_tasks);
144 }
145
146 static struct vmm_thread *__pop_first(struct vmm_thread_tq *tq)
147 {
148         struct vmm_thread *vth;
149
150         vth = TAILQ_FIRST(tq);
151         if (vth)
152                 TAILQ_REMOVE(tq, vth, tq_next);
153         return vth;
154 }
155
156 static struct vmm_thread *pick_a_thread_degraded(void)
157 {
158         struct vmm_thread *vth = 0;
159         static int next_class = VMM_THREAD_GUEST;
160
161         /* We don't have a lot of cores (maybe 0), so we'll alternate which type of
162          * thread we look at first.  Basically, we're RR within a class of threads,
163          * and we'll toggle between those two classes. */
164         spin_pdr_lock(&queue_lock);
165         if (next_class == VMM_THREAD_GUEST) {
166                 if (!vth)
167                         vth = __pop_first(&rnbl_guests);
168                 if (!vth)
169                         vth = __pop_first(&rnbl_tasks);
170                 next_class = VMM_THREAD_TASK;
171         } else {
172                 if (!vth)
173                         vth = __pop_first(&rnbl_tasks);
174                 if (!vth)
175                         vth = __pop_first(&rnbl_guests);
176                 next_class = VMM_THREAD_GUEST;
177         };
178         spin_pdr_unlock(&queue_lock);
179         return vth;
180 }
181
182 /* We have plenty of cores - run whatever we want.  We'll prioritize tasks. */
183 static struct vmm_thread *pick_a_thread_plenty(void)
184 {
185         struct vmm_thread *vth = 0;
186
187         spin_pdr_lock(&queue_lock);
188         if (!vth)
189                 vth = __pop_first(&rnbl_tasks);
190         if (!vth)
191                 vth = __pop_first(&rnbl_guests);
192         spin_pdr_unlock(&queue_lock);
193         return vth;
194 }
195
196 static void yield_current_uth(void)
197 {
198         struct vmm_thread *vth;
199
200         if (!current_uthread)
201                 return;
202         vth = (struct vmm_thread*)stop_current_uthread();
203         enqueue_vmm_thread(vth);
204 }
205
206 /* Helper, tries to get the right number of vcores.  Returns TRUE if we think we
207  * have enough, FALSE otherwise.
208  *
209  * TODO: this doesn't handle a lot of issues, like preemption, how to
210  * run/yield our vcores, dynamic changes in the number of runnables, where
211  * to send events, how to avoid interfering with gpcs, etc. */
212 static bool try_to_get_vcores(void)
213 {
214         int nr_vcores_wanted = desired_nr_vcores();
215         bool have_enough = nr_vcores_wanted <= num_vcores();
216
217         if (have_enough) {
218                 vcore_tick_disable();
219                 return TRUE;
220         }
221         vcore_tick_enable(vmm_sched_period_usec);
222         vcore_request_total(nr_vcores_wanted);
223         return FALSE;
224 }
225
226 static void __attribute__((noreturn)) vmm_sched_entry(void)
227 {
228         struct vmm_thread *vth;
229         bool have_enough;
230
231         have_enough = try_to_get_vcores();
232         if (!have_enough && vcore_tick_poll()) {
233                 /* slightly less than ideal: we grab the queue lock twice */
234                 yield_current_uth();
235         }
236         if (current_uthread)
237                 run_current_uthread();
238         if (have_enough)
239                 vth = pick_a_thread_plenty();
240         else
241                 vth = pick_a_thread_degraded();
242         if (!vth)
243                 vcore_yield_or_restart();
244         run_uthread((struct uthread*)vth);
245 }
246
247 static void vmm_thread_runnable(struct uthread *uth)
248 {
249         /* A thread that was blocked is now runnable.  This counts as becoming
250          * unblocked (running + runnable) */
251         acct_thread_unblocked((struct vmm_thread*)uth);
252         enqueue_vmm_thread((struct vmm_thread*)uth);
253 }
254
255 static void vmm_thread_paused(struct uthread *uth)
256 {
257         /* The thread stopped for some reason, usually a preemption.  We'd like to
258          * just run it whenever we get a chance.  Note that it didn't become
259          * 'blocked' - it's still runnable. */
260         enqueue_vmm_thread((struct vmm_thread*)uth);
261 }
262
263 static void vmm_thread_blockon_sysc(struct uthread *uth, void *syscall)
264 {
265         struct syscall *sysc = (struct syscall*)syscall;
266
267         acct_thread_blocked((struct vmm_thread*)uth);
268         sysc->u_data = uth;
269         if (!register_evq(sysc, sysc_evq)) {
270                 /* Lost the race with the call being done.  The kernel won't send the
271                  * event.  Just restart him. */
272                 restart_thread(sysc);
273         }
274         /* GIANT WARNING: do not touch the thread after this point. */
275 }
276
277 static void vmm_thread_has_blocked(struct uthread *uth, uth_sync_t *sync_obj,
278                                    int flags)
279 {
280         /* The thread blocked on something like a mutex.  It's not runnable, so we
281          * don't need to put it on a list, but we do need to account for it not
282          * running.  We'll find out (via thread_runnable) when it starts up again.
283          */
284         acct_thread_blocked((struct vmm_thread*)uth);
285         if (sync_obj)
286                 __uth_default_sync_enqueue(uth, sync_obj);
287 }
288
289 static void refl_error(struct uthread *uth, unsigned int trap_nr,
290                        unsigned int err, unsigned long aux)
291 {
292         printf("Thread has unhandled fault: %d, err: %d, aux: %p\n",
293                trap_nr, err, aux);
294         /* Note that uthread.c already copied out our ctx into the uth
295          * struct */
296         print_user_context(&uth->u_ctx);
297         printf("Turn on printx to spew unhandled, malignant trap info\n");
298         exit(-1);
299 }
300
301 static bool handle_page_fault(struct uthread *uth, unsigned int err,
302                               unsigned long aux)
303 {
304         if (!(err & PF_VMR_BACKED))
305                 return FALSE;
306         syscall_async(&uth->local_sysc, SYS_populate_va, aux, 1);
307         __block_uthread_on_async_sysc(uth);
308         return TRUE;
309 }
310
311 static void vmm_thread_refl_hw_fault(struct uthread *uth,
312                                      unsigned int trap_nr,
313                                      unsigned int err, unsigned long aux)
314 {
315         switch (trap_nr) {
316         case HW_TRAP_PAGE_FAULT:
317                 if (!handle_page_fault(uth, err, aux))
318                         refl_error(uth, trap_nr, err, aux);
319                 break;
320         default:
321                 refl_error(uth, trap_nr, err, aux);
322         }
323 }
324
325 /* Yield callback for __ctlr_entry */
326 static void __swap_to_gth(struct uthread *uth, void *dummy)
327 {
328         struct ctlr_thread *cth = (struct ctlr_thread*)uth;
329
330         /* We just immediately run our buddy.  The ctlr and the guest are accounted
331          * together ("pass the token" back and forth). */
332         current_uthread = NULL;
333         run_uthread((struct uthread*)cth->buddy);
334         assert(0);
335 }
336
337 /* All ctrl threads start here, each time their guest has a fault.  They can
338  * block and unblock along the way.  Once a ctlr does its final uthread_yield,
339  * the next time it will start again from the top. */
340 static void __ctlr_entry(void)
341 {
342         struct ctlr_thread *cth = (struct ctlr_thread*)current_uthread;
343         struct virtual_machine *vm = gth_to_vm(cth->buddy);
344
345         if (!handle_vmexit(cth->buddy)) {
346                 struct vm_trapframe *vm_tf = gth_to_vmtf(cth->buddy);
347
348                 fprintf(stderr, "vmm: handle_vmexit returned false\n");
349                 fprintf(stderr, "Note: this may be a kernel module, not the kernel\n");
350                 fprintf(stderr, "RSP was %p, ", (void *)vm_tf->tf_rsp);
351                 fprintf(stderr, "RIP was %p:\n", (void *)vm_tf->tf_rip);
352                 /* TODO: properly walk the kernel page tables to map the tf_rip
353                  * to a physical address. For now, however, this hack is good
354                  * enough.
355                  */
356                 hexdump(stderr, (void *)(vm_tf->tf_rip & 0x3fffffff), 16);
357                 showstatus(stderr, cth->buddy);
358                 exit(0);
359         }
360         /* We want to atomically yield and start/reenqueue our buddy.  We do so in
361          * vcore context on the other side of the yield. */
362         uthread_yield(FALSE, __swap_to_gth, 0);
363 }
364
365 static void vmm_thread_refl_vm_fault(struct uthread *uth)
366 {
367         struct guest_thread *gth = (struct guest_thread*)uth;
368         struct ctlr_thread *cth = gth->buddy;
369
370         /* The ctlr starts frm the top every time we get a new fault. */
371         cth->uthread.flags |= UTHREAD_SAVED;
372         init_user_ctx(&cth->uthread.u_ctx, (uintptr_t)&__ctlr_entry,
373                       (uintptr_t)(cth->stacktop));
374         /* We just immediately run our buddy.  The ctlr and the guest are accounted
375          * together ("pass the token" back and forth). */
376         current_uthread = NULL;
377         run_uthread((struct uthread*)cth);
378         assert(0);
379 }
380
381 static void vmm_thread_refl_fault(struct uthread *uth,
382                                   struct user_context *ctx)
383 {
384         switch (ctx->type) {
385         case ROS_HW_CTX:
386                 /* Guests should only ever VM exit */
387                 assert(((struct vmm_thread*)uth)->type != VMM_THREAD_GUEST);
388                 vmm_thread_refl_hw_fault(uth, __arch_refl_get_nr(ctx),
389                                          __arch_refl_get_err(ctx),
390                                          __arch_refl_get_aux(ctx));
391                 break;
392         case ROS_VM_CTX:
393                 vmm_thread_refl_vm_fault(uth);
394                 break;
395         default:
396                 assert(0);
397         }
398 }
399
400 static void vmm_thread_exited(struct uthread *uth)
401 {
402         struct vmm_thread *vth = (struct vmm_thread*)uth;
403         struct task_thread *tth = (struct task_thread*)uth;
404
405         /* Catch bugs.  Right now, only tasks threads can exit. */
406         assert(vth->type == VMM_THREAD_TASK);
407
408         acct_thread_blocked((struct vmm_thread*)tth);
409         uthread_cleanup(uth);
410         __free_stack(tth->stacktop, tth->stacksize);
411         free(tth);
412 }
413
414 static void destroy_guest_thread(struct guest_thread *gth)
415 {
416         struct ctlr_thread *cth = gth->buddy;
417
418         __free_stack(cth->stacktop, cth->stacksize);
419         uthread_cleanup((struct uthread*)cth);
420         free(cth);
421         uthread_cleanup((struct uthread*)gth);
422         free(gth);
423 }
424
425 static struct guest_thread *create_guest_thread(struct virtual_machine *vm,
426                                                 unsigned int gpcoreid)
427 {
428         struct guest_thread *gth;
429         struct ctlr_thread *cth;
430         /* Guests won't use TLS; they always operate in Ring V.  The controller
431          * might - not because of anything we do, but because of glibc calls. */
432         struct uth_thread_attr gth_attr = {.want_tls = FALSE};
433         struct uth_thread_attr cth_attr = {.want_tls = TRUE};
434
435         gth = (struct guest_thread*)alloc_vmm_thread(vm, VMM_THREAD_GUEST);
436         cth = (struct ctlr_thread*)alloc_vmm_thread(vm, VMM_THREAD_CTLR);
437         if (!gth || !cth) {
438                 free(gth);
439                 free(cth);
440                 return 0;
441         }
442         gth->buddy = cth;
443         cth->buddy = gth;
444         gth->gpc_id = gpcoreid;
445         cth->stacksize = VMM_THR_STACKSIZE;
446         cth->stacktop = __alloc_stack(cth->stacksize);
447         if (!cth->stacktop) {
448                 free(gth);
449                 free(cth);
450                 return 0;
451         }
452         gth->uthread.u_ctx.type = ROS_VM_CTX;
453         gth->uthread.u_ctx.tf.vm_tf.tf_guest_pcoreid = gpcoreid;
454         /* No need to init the ctlr.  It gets re-init'd each time it starts. */
455         uthread_init((struct uthread*)gth, &gth_attr);
456         uthread_init((struct uthread*)cth, &cth_attr);
457         /* TODO: give it a correct FP state.  Our current one is probably fine */
458         restore_fp_state(&gth->uthread.as);
459         gth->uthread.flags |= UTHREAD_FPSAVED;
460         gth->halt_mtx = uth_mutex_alloc();
461         gth->halt_cv = uth_cond_var_alloc();
462         return gth;
463 }
464
465 int vmm_init(struct virtual_machine *vm, int flags)
466 {
467         struct guest_thread **gths;
468
469         if (current_vm)
470                 return -1;
471         current_vm = vm;
472         if (syscall(SYS_vmm_setup, vm->nr_gpcs, vm->gpcis, flags) != vm->nr_gpcs)
473                 return -1;
474         gths = malloc(vm->nr_gpcs * sizeof(struct guest_thread *));
475         if (!gths)
476                 return -1;
477         for (int i = 0; i < vm->nr_gpcs; i++) {
478                 gths[i] = create_guest_thread(vm, i);
479                 if (!gths[i]) {
480                         for (int j = 0; j < i; j++)
481                                 destroy_guest_thread(gths[j]);
482                         free(gths);
483                         return -1;
484                 }
485         }
486         vm->gths = gths;
487         uthread_mcp_init();
488         return 0;
489 }
490
491 void start_guest_thread(struct guest_thread *gth)
492 {
493         acct_thread_unblocked((struct vmm_thread*)gth);
494         enqueue_vmm_thread((struct vmm_thread*)gth);
495 }
496
497 static void __task_thread_run(void)
498 {
499         struct task_thread *tth = (struct task_thread*)current_uthread;
500
501         uth_2ls_thread_exit(tth->func(tth->arg));
502 }
503
504 /* Helper, creates and starts a task thread. */
505 static struct task_thread *__vmm_run_task(struct virtual_machine *vm,
506                                           void *(*func)(void *), void *arg,
507                                           struct uth_thread_attr *tth_attr)
508 {
509         struct task_thread *tth;
510
511         tth = (struct task_thread*)alloc_vmm_thread(vm, VMM_THREAD_TASK);
512         if (!tth)
513                 return 0;
514         tth->stacksize = VMM_THR_STACKSIZE;
515         tth->stacktop = __alloc_stack(tth->stacksize);
516         if (!tth->stacktop) {
517                 free(tth);
518                 return 0;
519         }
520         tth->func = func;
521         tth->arg = arg;
522         init_user_ctx(&tth->uthread.u_ctx, (uintptr_t)&__task_thread_run,
523                       (uintptr_t)(tth->stacktop));
524         uthread_init((struct uthread*)tth, tth_attr);
525         acct_thread_unblocked((struct vmm_thread*)tth);
526         enqueue_vmm_thread((struct vmm_thread*)tth);
527         return tth;
528 }
529
530 struct task_thread *vmm_run_task(struct virtual_machine *vm,
531                                  void *(*func)(void *), void *arg)
532 {
533         struct uth_thread_attr tth_attr = {.want_tls = TRUE, .detached = TRUE};
534
535         return __vmm_run_task(vm, func, arg, &tth_attr);
536 }
537
538 static struct uthread *vmm_thread_create(void *(*func)(void *), void *arg)
539 {
540         struct uth_thread_attr tth_attr = {.want_tls = TRUE, .detached = FALSE};
541         struct task_thread *tth;
542
543         /* It's OK to not have a VM for a generic thread */
544         tth = __vmm_run_task(NULL, func, arg, &tth_attr);
545         /* But just in case, let's poison it */
546         ((struct vmm_thread*)tth)->vm = (void*)0xdeadbeef;
547         return (struct uthread*)tth;
548 }
549
550 /* Helpers for tracking nr_unblk_* threads. */
551 static void acct_thread_blocked(struct vmm_thread *vth)
552 {
553         switch (vth->type) {
554         case VMM_THREAD_GUEST:
555         case VMM_THREAD_CTLR:
556                 atomic_dec(&nr_unblk_guests);
557                 break;
558         case VMM_THREAD_TASK:
559                 atomic_dec(&nr_unblk_tasks);
560                 break;
561         }
562 }
563
564 static void acct_thread_unblocked(struct vmm_thread *vth)
565 {
566         switch (vth->type) {
567         case VMM_THREAD_GUEST:
568         case VMM_THREAD_CTLR:
569                 atomic_inc(&nr_unblk_guests);
570                 break;
571         case VMM_THREAD_TASK:
572                 atomic_inc(&nr_unblk_tasks);
573                 break;
574         }
575 }
576
577 static void enqueue_vmm_thread(struct vmm_thread *vth)
578 {
579         spin_pdr_lock(&queue_lock);
580         switch (vth->type) {
581         case VMM_THREAD_GUEST:
582         case VMM_THREAD_CTLR:
583                 TAILQ_INSERT_TAIL(&rnbl_guests, vth, tq_next);
584                 break;
585         case VMM_THREAD_TASK:
586                 TAILQ_INSERT_TAIL(&rnbl_tasks, vth, tq_next);
587                 break;
588         }
589         spin_pdr_unlock(&queue_lock);
590         try_to_get_vcores();
591 }
592
593 static struct vmm_thread *alloc_vmm_thread(struct virtual_machine *vm, int type)
594 {
595         struct vmm_thread *vth;
596         int ret;
597
598         ret = posix_memalign((void**)&vth, __alignof__(struct vmm_thread),
599                              sizeof(struct vmm_thread));
600         if (ret)
601                 return 0;
602         memset(vth, 0, sizeof(struct vmm_thread));
603         vth->type = type;
604         vth->vm = vm;
605         return vth;
606 }
607
608 static void __free_stack(void *stacktop, size_t stacksize)
609 {
610         munmap(stacktop - stacksize, stacksize);
611 }
612
613 static void *__alloc_stack(size_t stacksize)
614 {
615         int force_a_page_fault;
616         void *stacktop;
617         void *stackbot = mmap(0, stacksize, PROT_READ | PROT_WRITE | PROT_EXEC,
618                               MAP_ANONYMOUS, -1, 0);
619
620         if (stackbot == MAP_FAILED)
621                 return 0;
622         stacktop = stackbot + stacksize;
623         /* Want the top of the stack populated, but not the rest of the stack;
624          * that'll grow on demand (up to stacksize, then will clobber memory). */
625         force_a_page_fault = ACCESS_ONCE(*(int*)(stacktop - sizeof(int)));
626         return stacktop;
627 }