1 /* Copyright (c) 2009, 2012, 2015 The Regents of the University of California
2 * Barret Rhoden <brho@cs.berkeley.edu>
3 * Valmon Leymarie <leymariv@berkeley.edu>
4 * Kevin Klues <klueska@cs.berkeley.edu>
5 * See LICENSE for details.
11 /* Provision a core to proc p. This code assumes that the scheduler that uses
12 * it holds a lock for the duration of the call. */
13 void __provision_core(struct proc *p, uint32_t pcoreid)
15 struct sched_pcore *spc = pcoreid2spc(pcoreid);
16 struct sched_pcore_tailq *prov_list;
18 /* If the core is already prov to someone else, take it away. (last write
19 * wins, some other layer or new func can handle permissions). */
21 /* the list the spc is on depends on whether it is alloced to the
23 prov_list = (spc->alloc_proc == spc->prov_proc ?
24 &spc->prov_proc->ksched_data.crd.prov_alloc_me :
25 &spc->prov_proc->ksched_data.crd.prov_not_alloc_me);
26 TAILQ_REMOVE(prov_list, spc, prov_next);
28 /* Now prov it to p. Again, the list it goes on depends on whether it is
29 * alloced to p or not. Callers can also send in 0 to de-provision. */
31 if (spc->alloc_proc == p) {
32 TAILQ_INSERT_TAIL(&p->ksched_data.crd.prov_alloc_me, spc,
35 /* this is be the victim list, which can be sorted so that we pick
36 * the right victim (sort by alloc_proc reverse priority, etc). */
37 TAILQ_INSERT_TAIL(&p->ksched_data.crd.prov_not_alloc_me, spc,
44 /* Unprovisions any pcores for the given list */
45 static void __unprov_pcore_list(struct sched_pcore_tailq *list_head)
47 struct sched_pcore *spc_i;
48 /* We can leave them connected within the tailq, since the scps don't have a
49 * default list (if they aren't on a proc's list, then we don't care about
50 * them), and since the INSERTs don't care what list you were on before
51 * (chummy with the implementation). Pretty sure this is right. If there's
52 * suspected list corruption, be safer here. */
53 TAILQ_FOREACH(spc_i, list_head, prov_next)
55 TAILQ_INIT(list_head);
58 /* Unprovision all cores from proc p. This code assumes that the scheduler
59 * that uses * it holds a lock for the duration of the call. */
60 void __unprovision_all_cores(struct proc *p)
62 __unprov_pcore_list(&p->ksched_data.crd.prov_alloc_me);
63 __unprov_pcore_list(&p->ksched_data.crd.prov_not_alloc_me);
66 /* Print a list of the cores currently provisioned to p. */
67 void print_proc_coreprov(struct proc *p)
69 struct sched_pcore *spc_i;
73 printk("Prov cores alloced to proc %d (%p)\n----------\n", p->pid, p);
74 TAILQ_FOREACH(spc_i, &p->ksched_data.crd.prov_alloc_me, prov_next)
75 printk("Pcore %d\n", spc2pcoreid(spc_i));
76 printk("Prov cores not alloced to proc %d (%p)\n----------\n", p->pid, p);
77 TAILQ_FOREACH(spc_i, &p->ksched_data.crd.prov_not_alloc_me, prov_next)
78 printk("Pcore %d (alloced to %d (%p))\n", spc2pcoreid(spc_i),
79 spc_i->alloc_proc ? spc_i->alloc_proc->pid : 0,
83 /* Print the processes attached to each provisioned core. */
84 void print_coreprov_map(void)
86 struct sched_pcore *spc_i;
87 /* Doing this unlocked, which is dangerous, but won't deadlock */
88 printk("Which cores are provisioned to which procs:\n------------------\n");
89 for (int i = 0; i < num_cores; i++) {
90 spc_i = pcoreid2spc(i);
91 printk("Core %02d, prov: %d(%p) alloc: %d(%p)\n", i,
92 spc_i->prov_proc ? spc_i->prov_proc->pid : 0, spc_i->prov_proc,
93 spc_i->alloc_proc ? spc_i->alloc_proc->pid : 0,