#pragma once
-/* The core request algorithm maintains an internal array of these: the
- * global pcore map. Note the prov_proc and alloc_proc are weak (internal)
- * references, and should only be used as a ref source while the ksched has a
- * valid kref. */
-struct sched_pcore {
- TAILQ_ENTRY(sched_pcore) prov_next; /* on a proc's prov list */
- TAILQ_ENTRY(sched_pcore) alloc_next; /* on an alloc list (idle)*/
- struct proc *prov_proc; /* who this is prov to */
- struct proc *alloc_proc; /* who this is alloc to */
-};
-TAILQ_HEAD(sched_pcore_tailq, sched_pcore);
-
-struct core_request_data {
- struct sched_pcore_tailq prov_alloc_me; /* prov cores alloced us */
- struct sched_pcore_tailq prov_not_alloc_me; /* maybe alloc to others */
-};
+#include <stdbool.h>
+#include <arch/topology.h>
+#if defined(CONFIG_COREALLOC_FCFS)
+ #include <corealloc_fcfs.h>
+#endif
/* Initialize any data assocaited with doing core allocation. */
void corealloc_init(void);
-/* Initialize any data associated with provisiong cores to a process. */
-void coreprov_proc_init(struct proc *p);
+/* Initialize any data associated with allocating cores to a process. */
+void corealloc_proc_init(struct proc *p);
/* Find the best core to allocate to a process as dictated by the core
- * allocation algorithm. This code assumes that the scheduler that uses it
- * holds a lock for the duration of the call. */
-struct sched_pcore *__find_best_core_to_alloc(struct proc *p);
+ * allocation algorithm. If no core is found, return -1. This code assumes
+ * that the scheduler that uses it holds a lock for the duration of the call.
+ * */
+uint32_t __find_best_core_to_alloc(struct proc *p);
/* Track the pcore properly when it is allocated to p. This code assumes that
* the scheduler that uses it holds a lock for the duration of the call. */
/* Provision a core to proc p. This code assumes that the scheduler that uses
* it holds a lock for the duration of the call. */
-void __provision_core(struct proc *p, struct sched_pcore *spc);
+void __provision_core(struct proc *p, uint32_t pcoreid);
/* Unprovision all cores from proc p. This code assumes that the scheduler
* that uses * it holds a lock for the duration of the call. */
/* Print the processes attached to each provisioned core. */
void print_coreprov_map(void);
-static inline uint32_t spc2pcoreid(struct sched_pcore *spc)
+static inline struct proc *get_alloc_proc(uint32_t pcoreid)
{
extern struct sched_pcore *all_pcores;
- return spc - all_pcores;
+ return all_pcores[pcoreid].alloc_proc;
}
-static inline struct sched_pcore *pcoreid2spc(uint32_t pcoreid)
+static inline struct proc *get_prov_proc(uint32_t pcoreid)
{
extern struct sched_pcore *all_pcores;
- return &all_pcores[pcoreid];
+ return all_pcores[pcoreid].prov_proc;
}
-static inline struct proc *get_alloc_proc(struct sched_pcore *c)
+/* TODO: need more thorough CG/LL management. For now, core0 is the only LL
+ * core. This won't play well with the ghetto shit in schedule_init() if you do
+ * anything like 'DEDICATED_MONITOR' or the ARSC server. All that needs an
+ * overhaul. */
+static inline bool is_ll_core(uint32_t pcoreid)
{
- return c->alloc_proc;
+ if (pcoreid == 0)
+ return TRUE;
+ return FALSE;
}
-static inline struct proc *get_prov_proc(struct sched_pcore *c)
+/* Normally it'll be the max number of CG cores ever */
+static inline uint32_t max_vcores(struct proc *p)
{
- return c->prov_proc;
+/* TODO: (CG/LL) */
+#ifdef CONFIG_DISABLE_SMT
+ return num_cores >> 1;
+#else
+ return num_cores - 1; /* reserving core 0 */
+#endif /* CONFIG_DISABLE_SMT */
}