04f15521a7ab11a35e89a45ab58d7951cd7032ca
[akaros.git] / kern / include / percpu_counter.h
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5  *
6  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
7  */
8
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15 #include <linux/gfp.h>
16
17 #ifdef CONFIG_SMP
18
19 struct percpu_counter {
20         spinlock_t lock;
21         int64_t count;
22 #ifdef CONFIG_HOTPLUG_CPU
23         struct list_head list;  /* All percpu_counters are on a list */
24 #endif
25         int32_t __percpu *counters;
26 };
27
28 extern int percpu_counter_batch;
29
30 int __percpu_counter_init(struct percpu_counter *fbc, int64_t amount, gfp_t gfp,
31                           struct lock_class_key *key);
32
33 #define percpu_counter_init(fbc, value, gfp)                            \
34         ({                                                              \
35                 static struct lock_class_key __key;                     \
36                                                                         \
37                 __percpu_counter_init(fbc, value, gfp, &__key);         \
38         })
39
40 void percpu_counter_destroy(struct percpu_counter *fbc);
41 void percpu_counter_set(struct percpu_counter *fbc, int64_t amount);
42 void percpu_counter_add_batch(struct percpu_counter *fbc, int64_t amount,
43                               int32_t batch);
44 int64_t __percpu_counter_sum(struct percpu_counter *fbc);
45 int __percpu_counter_compare(struct percpu_counter *fbc, int64_t rhs,
46                              int32_t batch);
47
48 static inline int percpu_counter_compare(struct percpu_counter *fbc,
49                                          int64_t rhs)
50 {
51         return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
52 }
53
54 static inline void percpu_counter_add(struct percpu_counter *fbc,
55                                       int64_t amount)
56 {
57         percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
58 }
59
60 static inline int64_t percpu_counter_sum_positive(struct percpu_counter *fbc)
61 {
62         int64_t ret = __percpu_counter_sum(fbc);
63         return ret < 0 ? 0 : ret;
64 }
65
66 static inline int64_t percpu_counter_sum(struct percpu_counter *fbc)
67 {
68         return __percpu_counter_sum(fbc);
69 }
70
71 static inline int64_t percpu_counter_read(struct percpu_counter *fbc)
72 {
73         return fbc->count;
74 }
75
76 /*
77  * It is possible for the percpu_counter_read() to return a small negative
78  * number for some counter which should never be negative.
79  *
80  */
81 static inline int64_t percpu_counter_read_positive(struct percpu_counter *fbc)
82 {
83         int64_t ret = fbc->count;
84
85         cmb();          /* Prevent reloads of fbc->count */
86         if (ret >= 0)
87                 return ret;
88         return 0;
89 }
90
91 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
92 {
93         return (fbc->counters != NULL);
94 }
95
96 #else /* !CONFIG_SMP */
97
98 struct percpu_counter {
99         int64_t count;
100 };
101
102 static inline int percpu_counter_init(struct percpu_counter *fbc, int64_t amount,
103                                       gfp_t gfp)
104 {
105         fbc->count = amount;
106         return 0;
107 }
108
109 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
110 {
111 }
112
113 static inline void percpu_counter_set(struct percpu_counter *fbc,
114                                       int64_t amount)
115 {
116         fbc->count = amount;
117 }
118
119 static inline int percpu_counter_compare(struct percpu_counter *fbc,
120                                          int64_t rhs)
121 {
122         if (fbc->count > rhs)
123                 return 1;
124         else if (fbc->count < rhs)
125                 return -1;
126         else
127                 return 0;
128 }
129
130 static inline int
131 __percpu_counter_compare(struct percpu_counter *fbc, int64_t rhs,
132                          int32_t batch)
133 {
134         return percpu_counter_compare(fbc, rhs);
135 }
136
137 static inline void
138 percpu_counter_add(struct percpu_counter *fbc, int64_t amount)
139 {
140         preempt_disable();
141         fbc->count += amount;
142         preempt_enable();
143 }
144
145 static inline void
146 percpu_counter_add_batch(struct percpu_counter *fbc, int64_t amount,
147                          int32_t batch)
148 {
149         percpu_counter_add(fbc, amount);
150 }
151
152 static inline int64_t percpu_counter_read(struct percpu_counter *fbc)
153 {
154         return fbc->count;
155 }
156
157 /*
158  * percpu_counter is intended to track positive numbers. In the UP case the
159  * number should never be negative.
160  */
161 static inline int64_t percpu_counter_read_positive(struct percpu_counter *fbc)
162 {
163         return fbc->count;
164 }
165
166 static inline int64_t percpu_counter_sum_positive(struct percpu_counter *fbc)
167 {
168         return percpu_counter_read_positive(fbc);
169 }
170
171 static inline int64_t percpu_counter_sum(struct percpu_counter *fbc)
172 {
173         return percpu_counter_read(fbc);
174 }
175
176 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
177 {
178         return 1;
179 }
180
181 #endif  /* CONFIG_SMP */
182
183 static inline void percpu_counter_inc(struct percpu_counter *fbc)
184 {
185         percpu_counter_add(fbc, 1);
186 }
187
188 static inline void percpu_counter_dec(struct percpu_counter *fbc)
189 {
190         percpu_counter_add(fbc, -1);
191 }
192
193 static inline void percpu_counter_sub(struct percpu_counter *fbc,
194                                       int64_t amount)
195 {
196         percpu_counter_add(fbc, -amount);
197 }
198
199 #endif /* _LINUX_PERCPU_COUNTER_H */