Finalize arg, env, aux migration (1/3) (CXX) (BB)
[akaros.git] / kern / include / smp.h
index 5ca27b9..fd62d33 100644 (file)
 #include <alarm.h>
 #include <trace.h>
 
-#ifdef __SHARC__
-typedef sharC_env_t;
-#endif
+#define CPU_STATE_IRQ                  0
+#define CPU_STATE_KERNEL               1
+#define CPU_STATE_USER                 2
+#define CPU_STATE_IDLE                 3
+#define NR_CPU_STATES                  4
+
+static char *cpu_state_names[NR_CPU_STATES] =
+{
+       "irq",
+       "kern",
+       "user",
+       "idle",
+};
 
 struct per_cpu_info {
-#ifdef CONFIG_X86_64
-       uintptr_t stacktop;
+#ifdef CONFIG_X86
+       uintptr_t stacktop;                     /* must be first */
+       int coreid;                                     /* must be second */
+       struct vmcs *vmxarea;
+       pseudodesc_t host_gdt;
+       int vmx_enabled;
+       void *local_vcpu;
 #endif
        spinlock_t lock;
        /* Process management */
@@ -37,18 +52,14 @@ struct per_cpu_info {
        struct user_context actual_ctx; /* storage for cur_ctx */
        uint32_t __ctx_depth;           /* don't access directly.  see trap.h. */
        int __lock_checking_enabled;/* == 1, enables spinlock depth checking */
-       struct syscall *cur_sysc;       /* ptr is into cur_proc's address space */
+       struct kthread *cur_kthread;/* tracks the running kernel context */
        struct kthread *spare;          /* useful when restarting */
        struct timer_chain tchain;      /* for the per-core alarm */
        unsigned int lock_depth;
        struct trace_ring traces;
-
-#ifdef __SHARC__
-       // held spin-locks. this will have to go elsewhere if multiple kernel
-       // threads can share a CPU.
-       // zra: Used by Ivy. Let me know if this should go elsewhere.
-       sharC_env_t sharC_env;
-#endif
+       int cpu_state;
+       uint64_t last_tick_cnt;
+       uint64_t state_ticks[NR_CPU_STATES];
        /* TODO: 64b (not sure if we'll need these at all */
 #ifdef CONFIG_X86
        taskstate_t *tss;
@@ -56,9 +67,11 @@ struct per_cpu_info {
 #endif
        /* KMSGs */
        spinlock_t immed_amsg_lock;
-       struct kernel_msg_list NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) immed_amsgs;
+       struct kernel_msg_list immed_amsgs;
        spinlock_t routine_amsg_lock;
-       struct kernel_msg_list NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) routine_amsgs;
+       struct kernel_msg_list routine_amsgs;
+       /* profiling -- opaque to all but the profiling code. */
+       void *profiling;
 }__attribute__((aligned(ARCH_CL_SIZE)));
 
 /* Allows the kernel to figure out what process is running on this core.  Can be
@@ -70,10 +83,10 @@ struct per_cpu_info {
  * interrupted contexts via iret/etc.  We never do that for user contexts. */
 #define current_ctx per_cpu_info[core_id()].cur_ctx
 
-typedef struct per_cpu_info NTPTV(t) NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) per_cpu_info_t;
+typedef struct per_cpu_info  per_cpu_info_t;
 
-extern per_cpu_info_t (RO per_cpu_info)[MAX_NUM_CPUS];
-extern volatile uint32_t RO num_cpus;
+extern per_cpu_info_t per_cpu_info[MAX_NUM_CPUS];
+extern volatile uint32_t num_cpus;
 
 /* SMP bootup functions */
 void smp_boot(void);
@@ -81,14 +94,17 @@ void smp_idle(void) __attribute__((noreturn));
 void smp_percpu_init(void); // this must be called by each core individually
 void __arch_pcpu_init(uint32_t coreid);        /* each arch has one of these */
 
+void __set_cpu_state(struct per_cpu_info *pcpui, int state);
+void reset_cpu_state_ticks(int coreid);
+
 /* SMP utility functions */
-int smp_call_function_self(poly_isr_t handler, TV(t) data,
-                           handler_wrapper_t** wait_wrapper);
-int smp_call_function_all(poly_isr_t handler, TV(t) data,
-                          handler_wrapper_t** wait_wrapper);
-int smp_call_function_single(uint32_t dest, poly_isr_t handler, TV(t) data,
-                             handler_wrapper_t** wait_wrapper);
-int smp_call_wait(handler_wrapper_t*SAFE wrapper);
+int smp_call_function_self(isr_t handler, void *data,
+                           handler_wrapper_t **wait_wrapper);
+int smp_call_function_all(isr_t handler, void *data,
+                          handler_wrapper_t **wait_wrapper);
+int smp_call_function_single(uint32_t dest, isr_t handler, void *data,
+                             handler_wrapper_t **wait_wrapper);
+int smp_call_wait(handler_wrapper_t *wrapper);
 
 /* PCPUI Trace Rings: */
 struct pcpu_trace_event {
@@ -102,7 +118,8 @@ struct pcpu_trace_event {
  * pcpui_tr_handlers in smp.c. */
 #define PCPUI_TR_TYPE_NULL             0
 #define PCPUI_TR_TYPE_KMSG             1
-#define PCPUI_NR_TYPES                 2
+#define PCPUI_TR_TYPE_LOCKS            2
+#define PCPUI_NR_TYPES                 3
 
 #ifdef CONFIG_TRACE_KMSGS
 
@@ -121,6 +138,25 @@ struct pcpu_trace_event {
 
 #endif /* CONFIG_TRACE_KMSGS */
 
+
+#ifdef CONFIG_TRACE_LOCKS
+
+# define pcpui_trace_locks(pcpui, lock)                                        \
+{                                                                              \
+       struct pcpu_trace_event *e = get_trace_slot_overwrite(&pcpui->traces);     \
+       if (e) {                                                                   \
+               e->type = PCPUI_TR_TYPE_LOCKS;                                         \
+               e->arg0 = (int)tsc2usec(read_tsc());                                   \
+               e->arg1 = (uintptr_t)lock;                                             \
+       }                                                                          \
+}
+
+#else
+
+# define pcpui_trace_locks(pcpui, lock)
+
+#endif /* CONFIG_TRACE_LOCKS */
+
 /* Run the handlers for all events in a pcpui ring.  Can run on all cores, or
  * just one core.  'type' selects which event type is handled (0 for all). */
 void pcpui_tr_foreach(int coreid, int type);