akaros/user/parlib/vcore.c
<<
>>
Prefs
   1#include <parlib/arch/arch.h>
   2#include <stdbool.h>
   3#include <errno.h>
   4#include <parlib/vcore.h>
   5#include <parlib/mcs.h>
   6#include <sys/param.h>
   7#include <parlib/parlib.h>
   8#include <unistd.h>
   9#include <stdlib.h>
  10#include <sys/mman.h>
  11#include <parlib/event.h>
  12#include <parlib/uthread.h>
  13#include <parlib/ucq.h>
  14#include <ros/arch/membar.h>
  15#include <parlib/printf-ext.h>
  16#include <parlib/poke.h>
  17#include <parlib/assert.h>
  18#include <parlib/stdio.h>
  19
  20__thread int __vcoreid = 0;
  21__thread bool __vcore_context = FALSE;
  22
  23__thread struct syscall __vcore_one_sysc = {.flags = (atomic_t)SC_DONE, 0};
  24
  25/* Per vcore entery function used when reentering at the top of a vcore's stack */
  26static __thread void (*__vcore_reentry_func)(void) = NULL;
  27
  28/* The default user vcore_entry function. */
  29void __attribute__((noreturn)) __vcore_entry(void)
  30{
  31        extern void uthread_vcore_entry(void);
  32
  33        uthread_vcore_entry();
  34        fprintf(stderr, "vcore_entry() should never return!\n");
  35        abort();
  36        __builtin_unreachable();
  37}
  38void vcore_entry(void) __attribute__((weak, alias ("__vcore_entry")));
  39
  40/* TODO: probably don't want to dealloc.  Considering caching */
  41static void free_transition_tls(int id)
  42{
  43        if (get_vcpd_tls_desc(id)) {
  44                /* Note we briefly have no TLS desc in VCPD.  This is fine so
  45                 * long as that vcore doesn't get started fresh before we put in
  46                 * a new desc */
  47                free_tls(get_vcpd_tls_desc(id));
  48                set_vcpd_tls_desc(id, NULL);
  49        }
  50}
  51
  52static int allocate_transition_tls(int id)
  53{
  54        /* Libc function to initialize TLS-based locale info for ctype
  55         * functions. */
  56        extern void __ctype_init(void);
  57
  58        /* We want to free and then reallocate the tls rather than simply
  59         * reinitializing it because its size may have changed.  TODO: not sure
  60         * if this is right.  0-ing is one thing, but freeing and reallocating
  61         * can be expensive, esp if syscalls are involved.  Check out glibc's
  62         * allocatestack.c for what might work. */
  63        free_transition_tls(id);
  64
  65        void *tcb = allocate_tls();
  66
  67        if (!tcb) {
  68                errno = ENOMEM;
  69                return -1;
  70        }
  71
  72        /* Setup some intitial TLS data for the newly allocated transition tls.
  73         */
  74        void *temp_tcb = get_tls_desc();
  75
  76        set_tls_desc(tcb);
  77        begin_safe_access_tls_vars();
  78        __vcoreid = id;
  79        __vcore_context = TRUE;
  80        __ctype_init();
  81        end_safe_access_tls_vars();
  82        set_tls_desc(temp_tcb);
  83
  84        /* Install the new tls into the vcpd. */
  85        set_vcpd_tls_desc(id, tcb);
  86        return 0;
  87}
  88
  89static void free_vcore_stack(int id)
  90{
  91        // don't actually free stacks
  92}
  93
  94static int allocate_vcore_stack(int id)
  95{
  96        struct preempt_data *vcpd = vcpd_of(id);
  97
  98        if (vcpd->vcore_stack)
  99                return 0; // reuse old stack
 100
 101        void* stackbot = mmap(0, TRANSITION_STACK_SIZE,
 102                              PROT_READ | PROT_WRITE | PROT_EXEC,
 103                              MAP_POPULATE | MAP_ANONYMOUS | MAP_PRIVATE, -1,
 104                              0);
 105
 106        if (stackbot == MAP_FAILED)
 107                return -1; // errno set by mmap
 108
 109        vcpd->vcore_stack = (uintptr_t)stackbot + TRANSITION_STACK_SIZE;
 110
 111        return 0;
 112}
 113
 114/* Helper: prepares a vcore for use.  Takes a block of pages for the UCQs.
 115 *
 116 * Vcores need certain things, such as a stack and TLS.  These are determined by
 117 * userspace.  Every vcore needs these set up before we drop into vcore context
 118 * on that vcore.  This means we need to prep before asking the kernel for those
 119 * vcores.
 120 *
 121 * We could have this function do its own mmap, at the expense of O(n) syscalls
 122 * when we prepare the extra vcores. */
 123static void __prep_vcore(int vcoreid, uintptr_t mmap_block)
 124{
 125        struct preempt_data *vcpd = vcpd_of(vcoreid);
 126        int ret;
 127
 128        ret = allocate_vcore_stack(vcoreid);
 129                assert(!ret);
 130        ret = allocate_transition_tls(vcoreid);
 131                assert(!ret);
 132
 133        vcpd->ev_mbox_public.type = EV_MBOX_UCQ;
 134        ucq_init_raw(&vcpd->ev_mbox_public.ucq,
 135                     mmap_block + 0 * PGSIZE,
 136                     mmap_block + 1 * PGSIZE);
 137        vcpd->ev_mbox_private.type = EV_MBOX_UCQ;
 138        ucq_init_raw(&vcpd->ev_mbox_private.ucq,
 139                     mmap_block + 2 * PGSIZE,
 140                     mmap_block + 3 * PGSIZE);
 141
 142        /* Set the lowest level entry point for each vcore. */
 143        vcpd->vcore_entry = (uintptr_t)__kernel_vcore_entry;
 144}
 145
 146static void prep_vcore_0(void)
 147{
 148        uintptr_t mmap_block;
 149
 150        mmap_block = (uintptr_t)mmap(0, PGSIZE * 4,
 151                                     PROT_WRITE | PROT_READ,
 152                                     MAP_POPULATE | MAP_ANONYMOUS | MAP_PRIVATE,
 153                                     -1, 0);
 154        assert((void*)mmap_block != MAP_FAILED);
 155        __prep_vcore(0, mmap_block);
 156}
 157
 158static void prep_remaining_vcores(void)
 159{
 160        uintptr_t mmap_block;
 161
 162        mmap_block = (uintptr_t)mmap(0, PGSIZE * 4 * (max_vcores() - 1),
 163                                     PROT_WRITE | PROT_READ,
 164                                     MAP_POPULATE | MAP_ANONYMOUS | MAP_PRIVATE,
 165                                     -1, 0);
 166        assert((void*)mmap_block != MAP_FAILED);
 167        for (int i = 1; i < max_vcores(); i++)
 168                __prep_vcore(i, mmap_block + 4 * (i - 1) * PGSIZE);
 169}
 170
 171/* Run libc specific early setup code. */
 172static void vcore_libc_init(void)
 173{
 174        register_printf_specifier('r', printf_errstr, printf_errstr_info);
 175        /* TODO: register for other kevents/signals and whatnot (can probably
 176         * reuse the simple ev_q).  Could also do this via explicit functions
 177         * from the program. */
 178}
 179
 180/* We need to separate the guts of vcore_lib_ctor() into a separate function,
 181 * since the uthread ctor depends on this ctor running first.
 182 *
 183 * Also note that if you make a global ctor (not static, like this used to be),
 184 * any shared objects that you load when the binary is built with -rdynamic will
 185 * run the global ctor from the binary, not the one from the .so. */
 186void vcore_lib_init(void)
 187{
 188        /* Note this is racy, but okay.  The first time through, we are _S.
 189         * Also, this is the "lowest" level constructor for now, so we don't
 190         * need to call any other init functions after our run_once() call. This
 191         * may change in the future. */
 192        parlib_init_once_racy(return);
 193        /* Need to alloc vcore0's transition stuff here (technically, just the
 194         * TLS) so that schedulers can use vcore0's transition TLS before it
 195         * comes up in vcore_entry() */
 196        prep_vcore_0();
 197        assert(!in_vcore_context());
 198        vcore_libc_init();
 199}
 200
 201static void __attribute__((constructor)) vcore_lib_ctor(void)
 202{
 203        if (__in_fake_parlib())
 204                return;
 205        vcore_lib_init();
 206}
 207
 208/* Helper functions used to reenter at the top of a vcore's stack for an
 209 * arbitrary function */
 210static void __attribute__((noinline, noreturn)) __vcore_reenter()
 211{
 212        __vcore_reentry_func();
 213        assert(0);
 214}
 215
 216void vcore_reenter(void (*entry_func)(void))
 217{
 218        assert(in_vcore_context());
 219        struct preempt_data *vcpd = vcpd_of(vcore_id());
 220        
 221        __vcore_reentry_func = entry_func;
 222        set_stack_pointer((void*)vcpd->vcore_stack);
 223        cmb();
 224        __vcore_reenter();
 225}
 226
 227/* Helper, picks some sane defaults and changes the process into an MCP */
 228void vcore_change_to_m(void)
 229{
 230        int ret;
 231
 232        prep_remaining_vcores();
 233        __procdata.res_req[RES_CORES].amt_wanted = 1;
 234        __procdata.res_req[RES_CORES].amt_wanted_min = 1;       /* whatever */
 235        assert(!in_multi_mode());
 236        assert(!in_vcore_context());
 237        ret = sys_change_to_m();
 238        assert(!ret);
 239        assert(in_multi_mode());
 240        assert(!in_vcore_context());
 241}
 242
 243static void __vc_req_poke(void *nr_vc_wanted)
 244{
 245        long nr_vcores_wanted = *(long*)nr_vc_wanted;
 246
 247        /* We init'd up to max_vcores() VCs during init.  This assumes the
 248         * kernel doesn't magically change that value (which it should not do).
 249         * */
 250        nr_vcores_wanted = MIN(nr_vcores_wanted, max_vcores());
 251        if (nr_vcores_wanted > __procdata.res_req[RES_CORES].amt_wanted)
 252                __procdata.res_req[RES_CORES].amt_wanted = nr_vcores_wanted;
 253        if (nr_vcores_wanted > num_vcores())
 254                sys_poke_ksched(0, RES_CORES);  /* 0 -> poke for ourselves */
 255}
 256static struct poke_tracker vc_req_poke = POKE_INITIALIZER(__vc_req_poke);
 257
 258/* Requests the kernel that we have a total of nr_vcores_wanted.
 259 *
 260 * This is callable by multiple threads/vcores concurrently.  Exactly one of
 261 * them will actually run __vc_req_poke.  The others will just return.
 262 *
 263 * This means that two threads could ask for differing amounts, and only one of
 264 * them will succeed.  This is no different than a racy write to a shared
 265 * variable.  The poke provides a single-threaded environment, so that we don't
 266 * worry about racing on VCPDs or hitting the kernel with excessive SYS_pokes.
 267 *
 268 * Since we're using the post-and-poke style, we can do a 'last write wins'
 269 * policy for the value used in the poke (and subsequent pokes). */
 270void vcore_request_total(long nr_vcores_wanted)
 271{
 272        static long nr_vc_wanted;
 273
 274        if (parlib_never_vc_request || !parlib_wants_to_be_mcp)
 275                return;
 276        if (nr_vcores_wanted == __procdata.res_req[RES_CORES].amt_wanted)
 277                return;
 278
 279        /* We race to "post our work" here.  Whoever handles the poke will get
 280         * the latest value written here. */
 281        nr_vc_wanted = nr_vcores_wanted;
 282        poke(&vc_req_poke, &nr_vc_wanted);
 283}
 284
 285/* This tries to get "more vcores", based on the number we currently have.
 286 *
 287 * What happens is we can have a bunch of threads trying to get "another vcore",
 288 * which currently means more than num_vcores().  If you have someone ask for
 289 * two more, and then someone else ask for one more, how many you ultimately ask
 290 * for depends on if the kernel heard you and adjusted num_vcores in between the
 291 * two calls.  Or maybe your amt_wanted already was num_vcores + 5, so neither
 292 * call is telling the kernel anything new.  It comes down to "one more than I
 293 * have" vs "one more than I've already asked for".
 294 *
 295 * So for now, this will keep the older behavior (one more than I have).  This
 296 * is all quite racy, so we can just guess and request a total number of vcores.
 297 */
 298void vcore_request_more(long nr_new_vcores)
 299{
 300        vcore_request_total(nr_new_vcores + num_vcores());
 301}
 302
 303/* This can return, if you failed to yield due to a concurrent event.  Note
 304 * we're atomicly setting the CAN_RCV flag, and aren't bothering with CASing
 305 * (either with the kernel or uthread's handle_indirs()).  We don't particularly
 306 * care what other code does - we intend to set those flags no matter what. */
 307void vcore_yield(bool preempt_pending)
 308{
 309        unsigned long old_nr;
 310        uint32_t vcoreid = vcore_id();
 311        struct preempt_data *vcpd = vcpd_of(vcoreid);
 312
 313        if (!preempt_pending && parlib_never_yield)
 314                return;
 315        __sync_fetch_and_and(&vcpd->flags, ~VC_CAN_RCV_MSG);
 316        /* no wrmb() necessary, handle_events() has an mb() if it is checking */
 317        /* Clears notif pending and tries to handle events.  This is an
 318         * optimization to avoid the yield syscall if we have an event pending.
 319         * If there is one, we want to unwind and return to the 2LS loop, where
 320         * we may not want to yield anymore.
 321         *
 322         * Note that the kernel only cares about CAN_RCV_MSG for the desired
 323         * vcore; when spamming, it relies on membership of lists within the
 324         * kernel.  Look at spam_list_member() for more info (k/s/event.c). */
 325        if (handle_events(vcoreid)) {
 326                __sync_fetch_and_or(&vcpd->flags, VC_CAN_RCV_MSG);
 327                return;
 328        }
 329        /* If we are yielding since we don't want the core, tell the kernel we
 330         * want one less vcore (vc_yield assumes a dumb 2LS).
 331         *
 332         * If yield fails (slight race), we may end up having more vcores than
 333         * amt_wanted for a while, and might lose one later on (after a
 334         * preempt/timeslicing) - the 2LS will have to notice eventually if it
 335         * actually needs more vcores (which it already needs to do).
 336         * amt_wanted could even be 0.
 337         *
 338         * In general, any time userspace decrements or sets to 0, it could get
 339         * preempted, so the kernel will still give us at least one, until the
 340         * last vcore properly yields without missing a message (and becomes a
 341         * WAITING proc, which the ksched will not give cores to).
 342         *
 343         * I think it's possible for userspace to do this (lock, read
 344         * amt_wanted, check all message queues for all vcores, subtract
 345         * amt_wanted (not set to 0), unlock) so long as every event handler +1s
 346         * the amt wanted, but that's a huge pain, and we already have event
 347         * handling code making sure a process can't sleep (transition to
 348         * WAITING) if a message arrives (can't yield if notif_pending, can't go
 349         * WAITING without yielding, and the event posting the notif_pending
 350         * will find the online VC or be delayed by spinlock til the proc is
 351         * WAITING). */
 352        if (!preempt_pending) {
 353                do {
 354                        old_nr = __procdata.res_req[RES_CORES].amt_wanted;
 355                        if (old_nr == 0)
 356                                break;
 357                } while (!__sync_bool_compare_and_swap(
 358                             &__procdata.res_req[RES_CORES].amt_wanted,
 359                             old_nr, old_nr - 1));
 360        }
 361        /* We can probably yield.  This may pop back up if notif_pending became
 362         * set by the kernel after we cleared it and we lost the race. */
 363        sys_yield(preempt_pending);
 364        __sync_fetch_and_or(&vcpd->flags, VC_CAN_RCV_MSG);
 365}
 366
 367/* Enables notifs, and deals with missed notifs by self notifying.  This should
 368 * be rare, so the syscall overhead isn't a big deal.  The other alternative
 369 * would be to uthread_yield(), which would require us to revert some uthread
 370 * interface changes. */
 371void enable_notifs(uint32_t vcoreid)
 372{
 373        __enable_notifs(vcoreid);
 374        wrmb(); /* need to read after the write that enabled notifs */
 375        /* Note we could get migrated before executing this.  If that happens,
 376         * our vcore had gone into vcore context (which is what we wanted), and
 377         * this self_notify to our old vcore is spurious and harmless. */
 378        if (vcpd_of(vcoreid)->notif_pending)
 379                sys_self_notify(vcoreid, EV_NONE, 0, TRUE);
 380}
 381
 382/* Helper to disable notifs.  It simply checks to make sure we disabled uthread
 383 * migration, which is a common mistake. */
 384void disable_notifs(uint32_t vcoreid)
 385{
 386        if (!in_vcore_context() && current_uthread)
 387                assert(current_uthread->flags & UTHREAD_DONT_MIGRATE);
 388        __disable_notifs(vcoreid);
 389}
 390
 391/* Like smp_idle(), this will put the core in a state that it can only be woken
 392 * up by an IPI.  For now, this is a halt.  Maybe an mwait in the future.
 393 *
 394 * This will return if an event was pending (could be the one you were waiting
 395 * for) or if the halt failed for some reason, such as a concurrent RKM.  If
 396 * successful, this will not return at all, and the vcore will restart from the
 397 * top next time it wakes.  Any sort of IRQ will wake the core.
 398 *
 399 * Alternatively, I might make this so it never returns, if that's easier to
 400 * work with (similar issues with yield). */
 401void vcore_idle(void)
 402{
 403        uint32_t vcoreid = vcore_id();
 404
 405        /* Once we enable notifs, the calling context will be treated like a
 406         * uthread (saved into the uth slot).  We don't want to ever run it
 407         * again, so we need to make sure there's no cur_uth. */
 408        assert(!current_uthread);
 409        /* This clears notif_pending (check, signal, check again pattern). */
 410        if (handle_events(vcoreid))
 411                return;
 412        /* This enables notifs, but also checks notif pending.  At this point,
 413         * any new notifs will restart the vcore from the top. */
 414        enable_notifs(vcoreid);
 415        /* From now, til we get into the kernel, any notifs will permanently
 416         * destroy this context and start the VC from the top.
 417         *
 418         * Once we're in the kernel, any messages (__notify, __preempt), will be
 419         * RKMs.  halt will need to check for those atomically.  Checking for
 420         * notif_pending in the kernel (sleep only if not set) is not enough,
 421         * since not all reasons for the kernel to stay awak set notif_pending
 422         * (e.g., __preempts and __death).
 423         *
 424         * At this point, we're out of VC ctx, so anyone who sets notif_pending
 425         * should also send an IPI / __notify */
 426        sys_halt_core(0);
 427        /* in case halt returns without actually restarting the VC ctx. */
 428        disable_notifs(vcoreid);
 429}
 430
 431/* Helper, that actually makes sure a vcore is running.  Call this is you really
 432 * want vcoreid.  More often, you'll want to call the regular version. */
 433static void __ensure_vcore_runs(uint32_t vcoreid)
 434{
 435        if (vcore_is_preempted(vcoreid)) {
 436                printd("[vcore]: VC %d changing to VC %d\n", vcore_id(),
 437                       vcoreid);
 438                /* Note that at this moment, the vcore could still be mapped
 439                 * (we're racing with __preempt.  If that happens, we'll just
 440                 * fail the sys_change_vcore(), and next time __ensure runs
 441                 * we'll get it. */
 442                /* We want to recover them from preemption.  Since we know they
 443                 * have notifs disabled, they will need to be directly
 444                 * restarted, so we can skip the other logic and cut straight to
 445                 * the sys_change_vcore() */
 446                sys_change_vcore(vcoreid, FALSE);
 447        }
 448}
 449
 450/* Helper, looks for any preempted vcores, making sure each of them runs at some
 451 * point.  This is pretty heavy-weight, and should be used to help get out of
 452 * weird deadlocks (spinning in vcore context, waiting on another vcore).  If
 453 * you might know which vcore you are waiting on, use ensure_vc_runs. */
 454static void __ensure_all_run(void)
 455{
 456        for (int i = 0; i < max_vcores(); i++)
 457                __ensure_vcore_runs(i);
 458}
 459
 460/* Makes sure a vcore is running.  If it is preempted, we'll switch to
 461 * it.  This will return, either immediately if the vcore is running, or later
 462 * when someone preempt-recovers us.
 463 *
 464 * If you pass in your own vcoreid, this will make sure all other preempted
 465 * vcores run. */
 466void ensure_vcore_runs(uint32_t vcoreid)
 467{
 468        /* if the vcoreid is ourselves, make sure everyone else is running */
 469        if (vcoreid == vcore_id()) {
 470                __ensure_all_run();
 471                return;
 472        }
 473        __ensure_vcore_runs(vcoreid);
 474}
 475
 476#define NR_RELAX_SPINS 1000
 477/* If you are spinning and waiting on another vcore, call this.  Pass in the
 478 * vcoreid of the core you are waiting on, or your own vcoreid if you don't
 479 * know.  It will spin for a bit before firing up the potentially expensive
 480 * __ensure_all_run(). */
 481void cpu_relax_vc(uint32_t other_vcoreid)
 482{
 483        static __thread unsigned int __vc_relax_spun = 0;
 484
 485        /* Uthreads with notifs enabled can just spin normally.  This actually
 486         * depends on the 2LS preemption policy.  Currently, we receive notifs
 487         * whenever another core is preempted, so we don't need to poll. */
 488        if (notif_is_enabled(vcore_id())) {
 489                cpu_relax();
 490                return;
 491        }
 492        if (__vc_relax_spun++ >= NR_RELAX_SPINS) {
 493                /* if other_vcoreid == vcore_id(), this might be expensive */
 494                ensure_vcore_runs(other_vcoreid);
 495                __vc_relax_spun = 0;
 496        }
 497        cpu_relax();
 498}
 499
 500/* Check with the kernel to determine what vcore we are.  Normally, you should
 501 * never call this, since your vcoreid is stored in your TLS.  Also, if you call
 502 * it from a uthread, you could get migrated, so you should drop into some form
 503 * of vcore context (DONT_MIGRATE on) */
 504uint32_t get_vcoreid(void)
 505{
 506        if (!in_vcore_context()) {
 507                assert(current_uthread);
 508                assert(current_uthread->flags & UTHREAD_DONT_MIGRATE);
 509        }
 510        return __get_vcoreid();
 511}
 512
 513/* Debugging helper.  Pass in the string you want printed if your vcoreid is
 514 * wrong, and pass in what vcoreid you think you are.  Don't call from uthread
 515 * context unless migrations are disabled.  Will print some stuff and return
 516 * FALSE if you were wrong. */
 517bool check_vcoreid(const char *str, uint32_t vcoreid)
 518{
 519        uint32_t kvcoreid = get_vcoreid();
 520        if (vcoreid != kvcoreid) {
 521                printf("%s: VC %d thought it was VC %d\n", str, kvcoreid,
 522                       vcoreid);
 523                return FALSE;
 524        }
 525        return TRUE;
 526}
 527
 528/* Helper.  Yields the vcore, or restarts it from scratch. */
 529void __attribute__((noreturn)) vcore_yield_or_restart(void)
 530{
 531        struct preempt_data *vcpd = vcpd_of(vcore_id());
 532
 533        vcore_yield(FALSE);
 534        /* If vcore_yield returns, we have an event.  Just restart vcore
 535         * context. */
 536        set_stack_pointer((void*)vcpd->vcore_stack);
 537        vcore_entry();
 538}
 539
 540void vcore_wake(uint32_t vcoreid, bool force_ipi)
 541{
 542        struct preempt_data *vcpd = vcpd_of(vcoreid);
 543
 544        vcpd->notif_pending = true;
 545        if (vcoreid == vcore_id())
 546                return;
 547        if (force_ipi || !arch_has_mwait())
 548                sys_self_notify(vcoreid, EV_NONE, 0, true);
 549}
 550