akaros/kern/src/schedule.c
<<
>>
Prefs
   1/* Copyright (c) 2009, 2012 The Regents of the University of California
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * See LICENSE for details.
   4 *
   5 * Scheduling and dispatching. */
   6
   7#include <schedule.h>
   8#include <corerequest.h>
   9#include <process.h>
  10#include <monitor.h>
  11#include <stdio.h>
  12#include <assert.h>
  13#include <atomic.h>
  14#include <smp.h>
  15#include <manager.h>
  16#include <alarm.h>
  17#include <sys/queue.h>
  18#include <arsc_server.h>
  19#include <hashtable.h>
  20
  21/* Process Lists.  'unrunnable' is a holding list for SCPs that are running or
  22 * waiting or otherwise not considered for sched decisions. */
  23struct proc_list unrunnable_scps = TAILQ_HEAD_INITIALIZER(unrunnable_scps);
  24struct proc_list runnable_scps = TAILQ_HEAD_INITIALIZER(runnable_scps);
  25/* mcp lists.  we actually could get by with one list and a TAILQ_CONCAT, but
  26 * I'm expecting to want the flexibility of the pointers later. */
  27struct proc_list all_mcps_1 = TAILQ_HEAD_INITIALIZER(all_mcps_1);
  28struct proc_list all_mcps_2 = TAILQ_HEAD_INITIALIZER(all_mcps_2);
  29struct proc_list *primary_mcps = &all_mcps_1;
  30struct proc_list *secondary_mcps = &all_mcps_2;
  31
  32/* Helper, defined below */
  33static void __core_request(struct proc *p, uint32_t amt_needed);
  34static void add_to_list(struct proc *p, struct proc_list *list);
  35static void remove_from_list(struct proc *p, struct proc_list *list);
  36static void switch_lists(struct proc *p, struct proc_list *old,
  37                         struct proc_list *new);
  38static void __run_mcp_ksched(void *arg);        /* don't call directly */
  39static uint32_t get_cores_needed(struct proc *p);
  40
  41/* Locks / sync tools */
  42
  43/* poke-style ksched - ensures the MCP ksched only runs once at a time.  since
  44 * only one mcp ksched runs at a time, while this is set, the ksched knows no
  45 * cores are being allocated by other code (though they could be dealloc, due to
  46 * yield).
  47 *
  48 * The main value to this sync method is to make the 'make sure the ksched runs
  49 * only once at a time and that it actually runs' invariant/desire wait-free, so
  50 * that it can be called anywhere (deep event code, etc).
  51 *
  52 * As the ksched gets smarter, we'll probably embedd this poker in a bigger
  53 * struct that can handle the posting of different types of work. */
  54struct poke_tracker ksched_poker = POKE_INITIALIZER(__run_mcp_ksched);
  55
  56/* this 'big ksched lock' protects a bunch of things, which i may make fine
  57 * grained: */
  58/* - protects the integrity of proc tailqs/structures, as well as the membership
  59 * of a proc on those lists.  proc lifetime within the ksched but outside this
  60 * lock is protected by the proc kref. */
  61//spinlock_t proclist_lock = SPINLOCK_INITIALIZER; /* subsumed by bksl */
  62/* - protects the provisioning assignment, and the integrity of all prov
  63 * lists (the lists of each proc). */
  64//spinlock_t prov_lock = SPINLOCK_INITIALIZER;
  65/* - protects allocation structures */
  66//spinlock_t alloc_lock = SPINLOCK_INITIALIZER;
  67spinlock_t sched_lock = SPINLOCK_INITIALIZER;
  68
  69/* Alarm struct, for our example 'timer tick' */
  70struct alarm_waiter ksched_waiter;
  71
  72#define TIMER_TICK_USEC 10000   /* 10msec */
  73
  74/* Helper: Sets up a timer tick on the calling core to go off 10 msec from now.
  75 * This assumes the calling core is an LL core, etc. */
  76static void set_ksched_alarm(void)
  77{
  78        set_awaiter_rel(&ksched_waiter, TIMER_TICK_USEC);
  79        set_alarm(&per_cpu_info[core_id()].tchain, &ksched_waiter);
  80}
  81
  82/* RKM alarm, to run the scheduler tick (not in interrupt context) and reset the
  83 * alarm.  Note that interrupts will be disabled, but this is not the same as
  84 * interrupt context.  We're a routine kmsg, which means the core is in a
  85 * quiescent state. */
  86static void __ksched_tick(struct alarm_waiter *waiter)
  87{
  88        /* TODO: imagine doing some accounting here */
  89        run_scheduler();
  90        /* Set our alarm to go off, relative to now.  This means we might lag a
  91         * bit, and our ticks won't match wall clock time.  But if we do
  92         * incremental, we'll actually punish the next process because the
  93         * kernel took too long for the previous process.  Ultimately, if we
  94         * really care, we should account for the actual time used. */
  95        set_awaiter_rel(&ksched_waiter, TIMER_TICK_USEC);
  96        set_alarm(&per_cpu_info[core_id()].tchain, &ksched_waiter);
  97}
  98
  99void schedule_init(void)
 100{
 101        spin_lock(&sched_lock);
 102        assert(!core_id());             /* want the alarm on core0 for now */
 103        init_awaiter(&ksched_waiter, __ksched_tick);
 104        set_ksched_alarm();
 105        corealloc_init();
 106        spin_unlock(&sched_lock);
 107
 108#ifdef CONFIG_ARSC_SERVER
 109        /* Most likely we'll have a syscall and a process that dedicates itself
 110         * to running this.  Or if it's a kthread, we don't need a core. */
 111        #error "Find a way to get a core.  Probably a syscall to run a server."
 112        int arsc_coreid = get_any_idle_core();
 113        assert(arsc_coreid >= 0);
 114        send_kernel_message(arsc_coreid, arsc_server, 0, 0, 0, KMSG_ROUTINE);
 115        printk("Using core %d for the ARSC server\n", arsc_coreid);
 116#endif /* CONFIG_ARSC_SERVER */
 117}
 118
 119/* Round-robins on whatever list it's on */
 120static void add_to_list(struct proc *p, struct proc_list *new)
 121{
 122        assert(!(p->ksched_data.cur_list));
 123        TAILQ_INSERT_TAIL(new, p, ksched_data.proc_link);
 124        p->ksched_data.cur_list = new;
 125}
 126
 127static void remove_from_list(struct proc *p, struct proc_list *old)
 128{
 129        assert(p->ksched_data.cur_list == old);
 130        TAILQ_REMOVE(old, p, ksched_data.proc_link);
 131        p->ksched_data.cur_list = 0;
 132}
 133
 134static void switch_lists(struct proc *p, struct proc_list *old,
 135                         struct proc_list *new)
 136{
 137        remove_from_list(p, old);
 138        add_to_list(p, new);
 139}
 140
 141/* Removes from whatever list p is on */
 142static void remove_from_any_list(struct proc *p)
 143{
 144        if (p->ksched_data.cur_list) {
 145                TAILQ_REMOVE(p->ksched_data.cur_list, p, ksched_data.proc_link);
 146                p->ksched_data.cur_list = 0;
 147        }
 148}
 149
 150/************** Process Management Callbacks **************/
 151/* a couple notes:
 152 * - the proc lock is NOT held for any of these calls.  currently, there is no
 153 *   lock ordering between the sched lock and the proc lock.  since the proc
 154 *   code doesn't know what we do, it doesn't hold its lock when calling our
 155 *   CBs.
 156 * - since the proc lock isn't held, the proc could be dying, which means we
 157 *   will receive a __sched_proc_destroy() either before or after some of these
 158 *   other CBs.  the CBs related to list management need to check and abort if
 159 *   DYING */
 160void __sched_proc_register(struct proc *p)
 161{
 162        assert(!proc_is_dying(p));
 163        /* one ref for the proc's existence, cradle-to-grave */
 164        proc_incref(p, 1); /* need at least this OR the 'one for existing' */
 165        spin_lock(&sched_lock);
 166        corealloc_proc_init(p);
 167        add_to_list(p, &unrunnable_scps);
 168        spin_unlock(&sched_lock);
 169}
 170
 171/* Returns 0 if it succeeded, an error code otherwise. */
 172void __sched_proc_change_to_m(struct proc *p)
 173{
 174        spin_lock(&sched_lock);
 175        /* Need to make sure they aren't dying.  if so, we already dealt with
 176         * their list membership, etc (or soon will).  taking advantage of the
 177         * 'immutable state' of dying (so long as refs are held). */
 178        if (proc_is_dying(p)) {
 179                spin_unlock(&sched_lock);
 180                return;
 181        }
 182        /* Catch user bugs */
 183        if (!p->procdata->res_req[RES_CORES].amt_wanted) {
 184                printk("[kernel] process needs to specify amt_wanted\n");
 185                p->procdata->res_req[RES_CORES].amt_wanted = 1;
 186        }
 187        /* For now, this should only ever be called on an unrunnable.  It's
 188         * probably a bug, at this stage in development, to do o/w. */
 189        remove_from_list(p, &unrunnable_scps);
 190        //remove_from_any_list(p);      /* ^^ instead of this */
 191        add_to_list(p, primary_mcps);
 192        spin_unlock(&sched_lock);
 193        //poke_ksched(p, RES_CORES);
 194}
 195
 196/* Sched callback called when the proc dies.  pc_arr holds the cores the proc
 197 * had, if any, and nr_cores tells us how many are in the array.
 198 *
 199 * An external, edible ref is passed in.  when we return and they decref,
 200 * __proc_free will be called (when the last one is done). */
 201void __sched_proc_destroy(struct proc *p, uint32_t *pc_arr, uint32_t nr_cores)
 202{
 203        spin_lock(&sched_lock);
 204        /* Unprovision any cores.  Note this is different than
 205         * track_core_dealloc.  The latter does bookkeeping when an allocation
 206         * changes.  This is a bulk *provisioning* change. */
 207        __unprovision_all_cores(p);
 208        /* Remove from whatever list we are on (if any - might not be on one if
 209         * it was in the middle of __run_mcp_sched) */
 210        remove_from_any_list(p);
 211        if (nr_cores)
 212                __track_core_dealloc_bulk(p, pc_arr, nr_cores);
 213        spin_unlock(&sched_lock);
 214        /* Drop the cradle-to-the-grave reference, jet-li */
 215        proc_decref(p);
 216}
 217
 218/* ksched callbacks.  p just woke up and is UNLOCKED. */
 219void __sched_mcp_wakeup(struct proc *p)
 220{
 221        spin_lock(&sched_lock);
 222        if (proc_is_dying(p)) {
 223                spin_unlock(&sched_lock);
 224                return;
 225        }
 226        /* could try and prioritize p somehow (move it to the front of the
 227         * list). */
 228        spin_unlock(&sched_lock);
 229        /* note they could be dying at this point too. */
 230        poke(&ksched_poker, p);
 231}
 232
 233/* ksched callbacks.  p just woke up and is UNLOCKED. */
 234void __sched_scp_wakeup(struct proc *p)
 235{
 236        spin_lock(&sched_lock);
 237        if (proc_is_dying(p)) {
 238                spin_unlock(&sched_lock);
 239                return;
 240        }
 241        /* might not be on a list if it is new.  o/w, it should be unrunnable */
 242        remove_from_any_list(p);
 243        add_to_list(p, &runnable_scps);
 244        spin_unlock(&sched_lock);
 245        /* we could be on a CG core, and all the mgmt cores could be halted.  if
 246         * we don't tell one of them about the new proc, they will sleep until
 247         * the timer tick goes off. */
 248        if (!management_core()) {
 249                /* TODO: pick a better core and only send if halted.
 250                 *
 251                 * ideally, we'd know if a specific mgmt core is sleeping and
 252                 * wake it up.  o/w, we could interrupt an already-running mgmt
 253                 * core that won't get to our new proc anytime soon.  also, by
 254                 * poking core 0, a different mgmt core could remain idle (and
 255                 * this process would sleep) until its tick goes off */
 256                send_ipi(0, I_POKE_CORE);
 257        }
 258}
 259
 260/* Callback to return a core to the ksched, which tracks it as idle and
 261 * deallocated from p.  The proclock is held (__core_req depends on that).
 262 *
 263 * This also is a trigger, telling us we have more cores.  We could/should make
 264 * a scheduling decision (or at least plan to). */
 265void __sched_put_idle_core(struct proc *p, uint32_t coreid)
 266{
 267        spin_lock(&sched_lock);
 268        __track_core_dealloc(p, coreid);
 269        spin_unlock(&sched_lock);
 270}
 271
 272/* Callback, bulk interface for put_idle. The proclock is held for this. */
 273void __sched_put_idle_cores(struct proc *p, uint32_t *pc_arr, uint32_t num)
 274{
 275        spin_lock(&sched_lock);
 276        __track_core_dealloc_bulk(p, pc_arr, num);
 277        spin_unlock(&sched_lock);
 278        /* could trigger a sched decision here */
 279}
 280
 281/* mgmt/LL cores should call this to schedule the calling core and give it to an
 282 * SCP.  will also prune the dead SCPs from the list.  hold the lock before
 283 * calling.  returns TRUE if it scheduled a proc. */
 284static bool __schedule_scp(void)
 285{
 286        // TODO: sort out lock ordering (proc_run_s also locks)
 287        struct proc *p;
 288        uint32_t pcoreid = core_id();
 289        struct per_cpu_info *pcpui = &per_cpu_info[pcoreid];
 290
 291        /* if there are any runnables, run them here and put any currently
 292         * running SCP on the tail of the runnable queue. */
 293        if ((p = TAILQ_FIRST(&runnable_scps))) {
 294                /* someone is currently running, dequeue them */
 295                if (pcpui->owning_proc) {
 296                        spin_lock(&pcpui->owning_proc->proc_lock);
 297                        /* process might be dying, with a KMSG to clean it up
 298                         * waiting on this core.  can't do much, so we'll
 299                         * attempt to restart */
 300                        if (proc_is_dying(pcpui->owning_proc)) {
 301                                run_as_rkm(run_scheduler);
 302                                spin_unlock(&pcpui->owning_proc->proc_lock);
 303                                return FALSE;
 304                        }
 305                        printd("Descheduled %d in favor of %d\n",
 306                               pcpui->owning_proc->pid, p->pid);
 307                        __proc_set_state(pcpui->owning_proc, PROC_RUNNABLE_S);
 308                        /* Saving FP state aggressively.  Odds are, the SCP was
 309                         * hit by an IRQ and has a HW ctx, in which case we must
 310                         * save. */
 311                        __proc_save_fpu_s(pcpui->owning_proc);
 312                        __proc_save_context_s(pcpui->owning_proc);
 313                        vcore_account_offline(pcpui->owning_proc, 0);
 314                        __seq_start_write(&p->procinfo->coremap_seqctr);
 315                        __unmap_vcore(p, 0);
 316                        __seq_end_write(&p->procinfo->coremap_seqctr);
 317                        spin_unlock(&pcpui->owning_proc->proc_lock);
 318                        /* round-robin the SCPs (inserts at the end of the
 319                         * queue) */
 320                        switch_lists(pcpui->owning_proc, &unrunnable_scps,
 321                                     &runnable_scps);
 322                        clear_owning_proc(pcoreid);
 323                        /* Note we abandon core.  It's not strictly necessary.
 324                         * If we didn't, the TLB would still be loaded with the
 325                         * old one, til we proc_run_s, and the various paths in
 326                         * proc_run_s would pick it up.  This way is a bit safer
 327                         * for future changes, but has an extra (empty) TLB
 328                         * flush.  */
 329                        abandon_core();
 330                }
 331                /* Run the new proc */
 332                switch_lists(p, &runnable_scps, &unrunnable_scps);
 333                printd("PID of the SCP i'm running: %d\n", p->pid);
 334                proc_run_s(p);  /* gives it core we're running on */
 335                return TRUE;
 336        }
 337        return FALSE;
 338}
 339
 340/* Returns how many new cores p needs.  This doesn't lock the proc, so your
 341 * answer might be stale. */
 342static uint32_t get_cores_needed(struct proc *p)
 343{
 344        uint32_t amt_wanted, amt_granted;
 345
 346        amt_wanted = p->procdata->res_req[RES_CORES].amt_wanted;
 347        /* Help them out - if they ask for something impossible, give them 1 so
 348         * they can make some progress. (this is racy, and unnecessary). */
 349        if (amt_wanted > p->procinfo->max_vcores) {
 350                printk("[kernel] proc %d wanted more than max, wanted %d\n",
 351                       p->pid, amt_wanted);
 352                p->procdata->res_req[RES_CORES].amt_wanted = 1;
 353                amt_wanted = 1;
 354        }
 355        /* There are a few cases where amt_wanted is 0, but they are still
 356         * RUNNABLE (involving yields, events, and preemptions).  In these
 357         * cases, give them at least 1, so they can make progress and yield
 358         * properly.  If they are not WAITING, they did not yield and may have
 359         * missed a message. */
 360        if (!amt_wanted) {
 361                /* could ++, but there could be a race and we don't want to give
 362                 * them more than they ever asked for (in case they haven't
 363                 * prepped) */
 364                p->procdata->res_req[RES_CORES].amt_wanted = 1;
 365                amt_wanted = 1;
 366        }
 367        /* amt_granted is racy - they could be *yielding*, but currently they
 368         * can't be getting any new cores if the caller is in the mcp_ksched.
 369         * this is okay - we won't accidentally give them more cores than they
 370         * *ever* wanted (which could crash them), but our answer might be a
 371         * little stale. */
 372        amt_granted = p->procinfo->res_grant[RES_CORES];
 373        /* Do not do an assert like this: it could fail (yield in progress): */
 374        //assert(amt_granted == p->procinfo->num_vcores);
 375        if (amt_wanted <= amt_granted)
 376                return 0;
 377        return amt_wanted - amt_granted;
 378}
 379
 380/* Actual work of the MCP kscheduler.  if we were called by poke_ksched, *arg
 381 * might be the process who wanted special service.  this would be the case if
 382 * we weren't already running the ksched.  Sort of a ghetto way to "post work",
 383 * such that it's an optimization. */
 384static void __run_mcp_ksched(void *arg)
 385{
 386        struct proc *p, *temp;
 387        uint32_t amt_needed;
 388        struct proc_list *temp_mcp_list;
 389
 390        /* locking to protect the MCP lists' integrity and membership */
 391        spin_lock(&sched_lock);
 392        /* 2-pass scheme: check each proc on the primary list (FCFS).  if they
 393         * need nothing, put them on the secondary list.  if they need
 394         * something, rip them off the list, service them, and if they are still
 395         * not dying, put them on the secondary list.  We cull the entire
 396         * primary list, so that when we start from the beginning each time, we
 397         * aren't repeatedly checking procs we looked at on previous waves.
 398         *
 399         * TODO: we could modify this such that procs that we failed to service
 400         * move to yet another list or something.  We can also move the WAITINGs
 401         * to another list and have wakeup move them back, etc. */
 402        while (!TAILQ_EMPTY(primary_mcps)) {
 403                TAILQ_FOREACH_SAFE(p, primary_mcps, ksched_data.proc_link, temp)
 404                {
 405                        /* unlocked peek at the state */
 406                        if (p->state == PROC_WAITING) {
 407                                switch_lists(p, primary_mcps, secondary_mcps);
 408                                continue;
 409                        }
 410                        amt_needed = get_cores_needed(p);
 411                        if (!amt_needed) {
 412                                switch_lists(p, primary_mcps, secondary_mcps);
 413                                continue;
 414                        }
 415                        /* o/w, we want to give cores to this proc */
 416                        remove_from_list(p, primary_mcps);
 417                        /* now it won't die, but it could get removed from lists
 418                         * and have its stuff unprov'd when we unlock */
 419                        proc_incref(p, 1);
 420                        /* GIANT WARNING: __core_req will unlock the sched lock
 421                         * for a bit.  It will return with it locked still.  We
 422                         * could unlock before we pass in, but they will relock
 423                         * right away. */
 424                        /* for mouse-eyed viewers */
 425                        // notionally_unlock(&ksched_lock);
 426                        __core_request(p, amt_needed);
 427                        // notionally_lock(&ksched_lock);
 428                        /* Peeking at the state is okay, since we hold a ref.
 429                         * Once it is DYING, it'll remain DYING until we decref.
 430                         * And if there is a concurrent death, that will spin on
 431                         * the ksched lock (which we hold, and which protects
 432                         * the proc lists). */
 433                        if (!proc_is_dying(p))
 434                                add_to_list(p, secondary_mcps);
 435                        proc_decref(p); /* fyi, this may trigger __proc_free */
 436                        /* need to break: the proc lists may have changed when
 437                         * we unlocked in core_req in ways that the FOREACH_SAFE
 438                         * can't handle. */
 439                        break;
 440                }
 441        }
 442        /* at this point, we moved all the procs over to the secondary list, and
 443         * attempted to service the ones that wanted something.  now just swap
 444         * the lists for the next invocation of the ksched. */
 445        temp_mcp_list = primary_mcps;
 446        primary_mcps = secondary_mcps;
 447        secondary_mcps = temp_mcp_list;
 448        spin_unlock(&sched_lock);
 449}
 450
 451/* Something has changed, and for whatever reason the scheduler should
 452 * reevaluate things.
 453 *
 454 * Don't call this if you are processing a syscall or otherwise care about your
 455 * kthread variables, cur_proc/owning_proc, etc.
 456 *
 457 * Don't call this from interrupt context (grabs proclocks). */
 458void run_scheduler(void)
 459{
 460        /* MCP scheduling: post work, then poke.  for now, i just want the func
 461         * to run again, so merely a poke is sufficient. */
 462        poke(&ksched_poker, 0);
 463        if (management_core()) {
 464                spin_lock(&sched_lock);
 465                __schedule_scp();
 466                spin_unlock(&sched_lock);
 467        }
 468}
 469
 470/* A process is asking the ksched to look at its resource desires.  The
 471 * scheduler is free to ignore this, for its own reasons, so long as it
 472 * eventually gets around to looking at resource desires. */
 473void poke_ksched(struct proc *p, unsigned int res_type)
 474{
 475        /* ignoring res_type for now.  could post that if we wanted (would need
 476         * some other structs/flags) */
 477        if (!__proc_is_mcp(p))
 478                return;
 479        poke(&ksched_poker, p);
 480}
 481
 482/* The calling cpu/core has nothing to do and plans to idle/halt.  This is an
 483 * opportunity to pick the nature of that halting (low power state, etc), or
 484 * provide some other work (_Ss on LL cores).  Note that interrupts are
 485 * disabled, and if you return, the core will cpu_halt(). */
 486void cpu_bored(void)
 487{
 488        bool new_proc = FALSE;
 489        if (!management_core())
 490                return;
 491        spin_lock(&sched_lock);
 492        new_proc = __schedule_scp();
 493        spin_unlock(&sched_lock);
 494        /* if we just scheduled a proc, we need to manually restart it, instead
 495         * of returning.  if we return, the core will halt. */
 496        if (new_proc) {
 497                proc_restartcore();
 498                assert(0);
 499        }
 500        /* Could drop into the monitor if there are no processes at all.  For
 501         * now, the 'call of the giraffe' suffices. */
 502}
 503
 504/* Available resources changed (plus or minus).  Some parts of the kernel may
 505 * call this if a particular resource that is 'quantity-based' changes.  Things
 506 * like available RAM to processes, bandwidth, etc.  Cores would probably be
 507 * inappropriate, since we need to know which specific core is now free. */
 508void avail_res_changed(int res_type, long change)
 509{
 510        printk("[kernel] ksched doesn't track any resources yet!\n");
 511}
 512
 513/* This deals with a request for more cores.  The amt of new cores needed is
 514 * passed in.  The ksched lock is held, but we are free to unlock if we want
 515 * (and we must, if calling out of the ksched to anything high-level).
 516 *
 517 * Side note: if we want to warn, then we can't deal with this proc's prov'd
 518 * cores until we wait til the alarm goes off.  would need to put all
 519 * alarmed cores on a list and wait til the alarm goes off to do the full
 520 * preempt.  and when those cores come in voluntarily, we'd need to know to
 521 * give them to this proc. */
 522static void __core_request(struct proc *p, uint32_t amt_needed)
 523{
 524        uint32_t nr_to_grant = 0;
 525        uint32_t corelist[num_cores];
 526        uint32_t pcoreid;
 527        struct proc *proc_to_preempt;
 528        bool success;
 529
 530        /* we come in holding the ksched lock, and we hold it here to protect
 531         * allocations and provisioning. */
 532        /* get all available cores from their prov_not_alloc list.  the list
 533         * might change when we unlock (new cores added to it, or the entire
 534         * list emptied, but no core allocations will happen (we hold the
 535         * poke)). */
 536        while (nr_to_grant != amt_needed) {
 537                /* Find the next best core to allocate to p. It may be a core
 538                 * provisioned to p, and it might not be. */
 539                pcoreid = __find_best_core_to_alloc(p);
 540                /* If no core is returned, we know that there are no more cores
 541                 * to give out, so we exit the loop. */
 542                if (pcoreid == -1)
 543                        break;
 544                /* If the pcore chosen currently has a proc allocated to it, we
 545                 * know it must be provisioned to p, but not allocated to it. We
 546                 * need to try to preempt. After this block, the core will be
 547                 * track_dealloc'd and on the idle list (regardless of whether
 548                 * we had to preempt or not) */
 549                if (get_alloc_proc(pcoreid)) {
 550                        proc_to_preempt = get_alloc_proc(pcoreid);
 551                        /* would break both preemption and maybe the later
 552                         * decref */
 553                        assert(proc_to_preempt != p);
 554                        /* need to keep a valid, external ref when we unlock */
 555                        proc_incref(proc_to_preempt, 1);
 556                        spin_unlock(&sched_lock);
 557                        /* sending no warning time for now - just an immediate
 558                         * preempt. */
 559                        success = proc_preempt_core(proc_to_preempt, pcoreid,
 560                                                    0);
 561                        /* reaquire locks to protect provisioning and idle lists
 562                         */
 563                        spin_lock(&sched_lock);
 564                        if (success) {
 565                                /* we preempted it before the proc could yield
 566                                 * or die.  alloc_proc should not have changed
 567                                 * (it'll change in death and idle CBs).  the
 568                                 * core is not on the idle core list.  (if we
 569                                 * ever have proc alloc lists, it'll still be on
 570                                 * the old proc's list). */
 571                                assert(get_alloc_proc(pcoreid));
 572                                /* regardless of whether or not it is still prov
 573                                 * to p, we need to note its dealloc.  we are
 574                                 * doing some excessive checking of p ==
 575                                 * prov_proc, but using this helper is a lot
 576                                 * clearer. */
 577                                __track_core_dealloc(proc_to_preempt, pcoreid);
 578                        } else {
 579                                /* the preempt failed, which should only happen
 580                                 * if the pcore was unmapped (could be dying,
 581                                 * could be yielding, but NOT preempted).
 582                                 * whoever unmapped it also triggered (or will
 583                                 * soon trigger) a track_core_dealloc and put it
 584                                 * on the idle list.  Our signal for this is
 585                                 * get_alloc_proc() being 0. We need to spin and
 586                                 * let whoever is trying to free the core grab
 587                                 * the ksched lock.  We could use an
 588                                 * 'ignore_next_idle' flag per sched_pcore, but
 589                                 * it's not critical anymore.
 590                                 *
 591                                 * Note, we're relying on us being the only
 592                                 * preemptor - if the core was unmapped by
 593                                 * *another* preemptor, there would be no way of
 594                                 * knowing the core was made idle *yet* (the
 595                                 * success branch in another thread).  likewise,
 596                                 * if there were another allocator, the pcore
 597                                 * could have been put on the idle list and then
 598                                 * quickly removed/allocated. */
 599                                cmb();
 600                                while (get_alloc_proc(pcoreid)) {
 601                                        /* this loop should be very rare */
 602                                        spin_unlock(&sched_lock);
 603                                        udelay(1);
 604                                        spin_lock(&sched_lock);
 605                                }
 606                        }
 607                        /* no longer need to keep p_to_pre alive */
 608                        proc_decref(proc_to_preempt);
 609                        /* might not be prov to p anymore (rare race). pcoreid
 610                         * is idle - we might get it later, or maybe we'll give
 611                         * it to its rightful proc*/
 612                        if (get_prov_proc(pcoreid) != p)
 613                                continue;
 614                }
 615                /* At this point, the pcore is idle, regardless of how we got
 616                 * here (successful preempt, failed preempt, or it was idle in
 617                 * the first place).  We also know the core is still provisioned
 618                 * to us.  Lets add it to the corelist for p (so we can give it
 619                 * to p in bulk later), and track its allocation with p (so our
 620                 * internal data structures stay in sync). We rely on the fact
 621                 * that we are the only allocator (pcoreid is still idle,
 622                 * despite (potentially) unlocking during the preempt attempt
 623                 * above).  It is guaranteed to be track_dealloc'd() (regardless
 624                 * of how we got here). */
 625                corelist[nr_to_grant] = pcoreid;
 626                nr_to_grant++;
 627                __track_core_alloc(p, pcoreid);
 628        }
 629        /* Now, actually give them out */
 630        if (nr_to_grant) {
 631                /* Need to unlock before calling out to proc code.  We are
 632                 * somewhat relying on being the only one allocating 'thread'
 633                 * here, since another allocator could have seen these cores (if
 634                 * they are prov to some proc) and could be trying to give them
 635                 * out (and assuming they are already on the idle list). */
 636                spin_unlock(&sched_lock);
 637                /* give them the cores.  this will start up the extras if
 638                 * RUNNING_M. */
 639                spin_lock(&p->proc_lock);
 640                /* if they fail, it is because they are WAITING or DYING.  we
 641                 * could give the cores to another proc or whatever.  for the
 642                 * current type of ksched, we'll just put them back on the pile
 643                 * and return.  Note, the ksched could check the states after
 644                 * locking, but it isn't necessary: just need to check at some
 645                 * point in the ksched loop. */
 646                if (__proc_give_cores(p, corelist, nr_to_grant)) {
 647                        spin_unlock(&p->proc_lock);
 648                        /* we failed, put the cores and track their dealloc.
 649                         * lock is protecting those structures. */
 650                        spin_lock(&sched_lock);
 651                        __track_core_dealloc_bulk(p, corelist, nr_to_grant);
 652                } else {
 653                        /* at some point after giving cores, call proc_run_m()
 654                         * (harmless on RUNNING_Ms).  You can give small groups
 655                         * of cores, then run them (which is more efficient than
 656                         * interleaving runs with the gives for bulk preempted
 657                         * processes). */
 658                        __proc_run_m(p);
 659                        spin_unlock(&p->proc_lock);
 660                        /* main mcp_ksched wants this held (it came to
 661                         * __core_req held) */
 662                        spin_lock(&sched_lock);
 663                }
 664        }
 665        /* note the ksched lock is still held */
 666}
 667
 668/* Provision a core to a process. This function wraps the primary logic
 669 * implemented in __provision_core, with a lock, error checking, etc. */
 670int provision_core(struct proc *p, uint32_t pcoreid)
 671{
 672        /* Make sure we aren't asking for something that doesn't exist (bounds
 673         * check on the pcore array) */
 674        if (!(pcoreid < num_cores)) {
 675                set_errno(ENXIO);
 676                return -1;
 677        }
 678        /* Don't allow the provisioning of LL cores */
 679        if (is_ll_core(pcoreid)) {
 680                set_errno(EBUSY);
 681                return -1;
 682        }
 683        /* Note the sched lock protects the tailqs for all procs in this code.
 684         * If we need a finer grained sched lock, this is one place where we
 685         * could have a different lock */
 686        spin_lock(&sched_lock);
 687        __provision_core(p, pcoreid);
 688        spin_unlock(&sched_lock);
 689        return 0;
 690}
 691
 692/************** Debugging **************/
 693void sched_diag(void)
 694{
 695        struct proc *p;
 696
 697        spin_lock(&sched_lock);
 698        TAILQ_FOREACH(p, &runnable_scps, ksched_data.proc_link)
 699                printk("Runnable _S PID: %d\n", p->pid);
 700        TAILQ_FOREACH(p, &unrunnable_scps, ksched_data.proc_link)
 701                printk("Unrunnable _S PID: %d\n", p->pid);
 702        TAILQ_FOREACH(p, primary_mcps, ksched_data.proc_link)
 703                printk("Primary MCP PID: %d\n", p->pid);
 704        TAILQ_FOREACH(p, secondary_mcps, ksched_data.proc_link)
 705                printk("Secondary MCP PID: %d\n", p->pid);
 706        spin_unlock(&sched_lock);
 707        return;
 708}
 709
 710void print_resources(struct proc *p)
 711{
 712        printk("--------------------\n");
 713        printk("PID: %d\n", p->pid);
 714        printk("--------------------\n");
 715        for (int i = 0; i < MAX_NUM_RESOURCES; i++)
 716                printk("Res type: %02d, amt wanted: %08d, amt granted: %08d\n",
 717                       i, p->procdata->res_req[i].amt_wanted,
 718                       p->procinfo->res_grant[i]);
 719}
 720
 721void print_all_resources(void)
 722{
 723        /* Hash helper */
 724        void __print_resources(void *item, void *opaque)
 725        {
 726                print_resources((struct proc*)item);
 727        }
 728        spin_lock(&pid_hash_lock);
 729        hash_for_each(pid_hash, __print_resources, NULL);
 730        spin_unlock(&pid_hash_lock);
 731}
 732
 733void next_core_to_alloc(uint32_t pcoreid)
 734{
 735        spin_lock(&sched_lock);
 736        __next_core_to_alloc(pcoreid);
 737        spin_unlock(&sched_lock);
 738}
 739
 740void sort_idle_cores(void)
 741{
 742        spin_lock(&sched_lock);
 743        __sort_idle_cores();
 744        spin_unlock(&sched_lock);
 745}
 746