Moved Ivy lock tracking into per cpu info
[akaros.git] / kern / include / smp.h
1 /*
2  * Copyright (c) 2009 The Regents of the University of California
3  * Barret Rhoden <brho@cs.berkeley.edu>
4  * See LICENSE for details.
5  */
6
7 #ifndef ROS_INC_SMP_H
8 #define ROS_INC_SMP_H
9
10 /* SMP related functions */
11
12 #include <arch/smp.h>
13 #include <ros/common.h>
14 #include <trap.h>
15 #include <atomic.h>
16 #include <process.h>
17 #include <workqueue.h>
18 #include <env.h>
19
20 #ifdef __SHARC__
21 typedef sharC_env_t;
22 #endif
23 // will want this padded out to cacheline alignment
24 struct per_cpu_info {
25         spinlock_t lock;
26         bool preempt_pending;
27         struct workqueue NTPTV(t) workqueue;
28
29 #ifdef __SHARC__
30         // held spin-locks. this will have to go elsewhere if multiple kernel
31         // threads can share a CPU.
32         // zra: Used by Ivy. Let me know if this should go elsewhere.
33         sharC_env_t sharC_env;
34 #endif
35
36 #ifdef __i386__
37         spinlock_t amsg_lock;
38         unsigned LCKD(&amsg_lock) amsg_current;
39         active_message_t LCKD(&amsg_lock) (RO active_msgs)[NUM_ACTIVE_MESSAGES];
40 #endif
41 };
42
43 typedef struct per_cpu_info NTPTV(t) NTPTV(a0t) NTPTV(a1t) NTPTV(a2t) per_cpu_info_t;
44
45 extern per_cpu_info_t (RO per_cpu_info)[MAX_NUM_CPUS];
46 extern volatile uint8_t RO num_cpus;
47
48 /* SMP bootup functions */
49 void smp_boot(void);
50 void smp_idle(void);
51
52 /* SMP utility functions */
53 int smp_call_function_self(poly_isr_t handler, TV(t) data,
54                            handler_wrapper_t** wait_wrapper);
55 int smp_call_function_all(poly_isr_t handler, TV(t) data,
56                           handler_wrapper_t** wait_wrapper);
57 int smp_call_function_single(uint32_t dest, poly_isr_t handler, TV(t) data,
58                              handler_wrapper_t** wait_wrapper);
59 int smp_call_wait(handler_wrapper_t*SAFE wrapper);
60
61 #endif /* !ROS_INC_SMP_H */