Bochs compatible SMP booting and enable_irqsave()
[akaros.git] / inc / atomic.h
1 #ifndef ROS_INC_ATOMIC_H
2 #define ROS_INC_ATOMIC_H
3
4 #include <inc/types.h>
5 #include <inc/mmu.h>
6 #include <inc/x86.h>
7
8 /* //linux style atomic ops
9 typedef struct {uint32_t real_num;} atomic_t;
10 #define atomic_read(atom) ((atom)->real_num)
11 #define atomic_set(atom, val) (((atom)->real_num) = (val))
12 #define atomic_init(i) {(i)}
13 //and the atomic incs, etc take an atomic_t ptr, deref inside
14 */
15
16 static inline void spin_lock(volatile uint32_t* lock);
17 static inline void spin_unlock(volatile uint32_t* lock);
18 static inline void spin_lock_irqsave(volatile uint32_t* lock);
19 static inline void spin_unlock_irqsave(volatile uint32_t* lock);
20 static inline void atomic_inc(volatile uint32_t* number);
21 static inline void atomic_dec(volatile uint32_t* number);
22
23
24 static inline void spin_lock(volatile uint32_t* lock)
25 {
26         asm volatile(
27                         "1:                       "
28                         "       cmpb $0, %0;          "
29                         "       je 2f;                "
30                         "       pause;                "
31                         "       jmp 1b;               "
32                         "2:                       " 
33                         "       movb $1, %%al;        "
34                         "       xchgb %%al, %0;       "
35                         "       cmpb $0, %%al;        "
36                         "       jne 1b;               "
37                 : : "m"(*lock) : "eax", "cc");
38 }
39
40 static inline void spin_unlock(volatile uint32_t* lock)
41 {
42         *lock = 0;
43 }
44
45 // If ints are enabled, disable them and note it in the top bit of the lock
46 // There is an assumption about releasing locks in order here...
47 static inline void spin_lock_irqsave(volatile uint32_t* lock)
48 {
49         uint32_t eflags;
50         eflags = read_eflags();
51         disable_irq();
52         spin_lock(lock);
53         if (eflags & FL_IF)
54                 *lock |= 0x80000000;
55 }
56
57 // if the top bit of the lock is set, then re-enable interrupts
58 static inline void spin_unlock_irqsave(volatile uint32_t* lock)
59 {
60         if (*lock & 0x80000000) {
61                 *lock = 0;
62                 enable_irq();
63         } else
64                 *lock = 0;
65 }
66
67 // need to do this with pointers and deref.  %0 needs to be the memory address
68 static inline void atomic_inc(volatile uint32_t* number)
69 {
70         asm volatile("lock incl %0" : "=m"(*number) : : "cc");
71 }
72
73 static inline void atomic_dec(volatile uint32_t* number)
74 {
75         asm volatile("lock decl %0" : "=m"(*number) : : "cc");
76 }
77 #endif /* !ROS_INC_ATOMIC_H */