BNX2X: limit queues to 2
[akaros.git] / kern / include / smp.h
index 7b7f765..c31ad34 100644 (file)
 #include <syscall.h>
 #include <alarm.h>
 #include <trace.h>
+#ifdef CONFIG_X86
+#include <arch/vm.h>
+#endif
 
 #ifdef __SHARC__
 typedef sharC_env_t;
 #endif
 
+#define CPU_STATE_IRQ                  0
+#define CPU_STATE_KERNEL               1
+#define CPU_STATE_USER                 2
+#define CPU_STATE_IDLE                 3
+#define NR_CPU_STATES                  4
+
+static char *cpu_state_names[NR_CPU_STATES] =
+{
+       "irq",
+       "kern",
+       "user",
+       "idle",
+};
+
 struct per_cpu_info {
-#ifdef CONFIG_X86_64
-       uintptr_t stacktop;
+#ifdef CONFIG_X86
+       uintptr_t stacktop;                     /* must be first */
+       int coreid;                                     /* must be second */
+       /* virtual machines */
+       /* this is all kind of gross, but so it goes. Kmalloc
+        * the vmxarea. It varies in size depending on the architecture.
+        */
+       struct vmcs *vmxarea;
+       struct vmcs *vmcs;
+       pseudodesc_t host_gdt;
+       int vmx_enabled;
+       void *local_vcpu;
 #endif
        spinlock_t lock;
        /* Process management */
@@ -37,14 +64,14 @@ struct per_cpu_info {
        struct user_context actual_ctx; /* storage for cur_ctx */
        uint32_t __ctx_depth;           /* don't access directly.  see trap.h. */
        int __lock_checking_enabled;/* == 1, enables spinlock depth checking */
-       struct syscall *cur_sysc;       /* ptr is into cur_proc's address space */
-       void *cur_errbuf;                       /* ptr to current err stack buffer */
        struct kthread *cur_kthread;/* tracks the running kernel context */
        struct kthread *spare;          /* useful when restarting */
        struct timer_chain tchain;      /* for the per-core alarm */
        unsigned int lock_depth;
        struct trace_ring traces;
-
+       int cpu_state;
+       uint64_t last_tick_cnt;
+       uint64_t state_ticks[NR_CPU_STATES];
 #ifdef __SHARC__
        // held spin-locks. this will have to go elsewhere if multiple kernel
        // threads can share a CPU.
@@ -61,6 +88,8 @@ struct per_cpu_info {
        struct kernel_msg_list NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) immed_amsgs;
        spinlock_t routine_amsg_lock;
        struct kernel_msg_list NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) routine_amsgs;
+       /* profiling -- opaque to all but the profiling code. */
+       void *profiling;
 }__attribute__((aligned(ARCH_CL_SIZE)));
 
 /* Allows the kernel to figure out what process is running on this core.  Can be
@@ -83,14 +112,17 @@ void smp_idle(void) __attribute__((noreturn));
 void smp_percpu_init(void); // this must be called by each core individually
 void __arch_pcpu_init(uint32_t coreid);        /* each arch has one of these */
 
+void __set_cpu_state(struct per_cpu_info *pcpui, int state);
+void reset_cpu_state_ticks(int coreid);
+
 /* SMP utility functions */
-int smp_call_function_self(poly_isr_t handler, TV(t) data,
-                           handler_wrapper_t** wait_wrapper);
-int smp_call_function_all(poly_isr_t handler, TV(t) data,
-                          handler_wrapper_t** wait_wrapper);
-int smp_call_function_single(uint32_t dest, poly_isr_t handler, TV(t) data,
-                             handler_wrapper_t** wait_wrapper);
-int smp_call_wait(handler_wrapper_t*SAFE wrapper);
+int smp_call_function_self(isr_t handler, void *data,
+                           handler_wrapper_t **wait_wrapper);
+int smp_call_function_all(isr_t handler, void *data,
+                          handler_wrapper_t **wait_wrapper);
+int smp_call_function_single(uint32_t dest, isr_t handler, void *data,
+                             handler_wrapper_t **wait_wrapper);
+int smp_call_wait(handler_wrapper_t *wrapper);
 
 /* PCPUI Trace Rings: */
 struct pcpu_trace_event {