alarm: Force unset_alarm to grab the CV lock
[akaros.git] / kern / include / percpu_counter.h
1 /* Copyright (C) 1991-2017, the Linux Kernel authors */
2
3 #ifndef _LINUX_PERCPU_COUNTER_H
4 #define _LINUX_PERCPU_COUNTER_H
5 /*
6  * A simple "approximate counter".
7  */
8
9 #include <atomic.h>
10 #include <list.h>
11
12 struct percpu_counter {
13         spinlock_t lock;
14         int64_t count;
15 #ifdef CONFIG_HOTPLUG_CPU
16         struct list_head list;  /* All percpu_counters are on a list */
17 #endif
18         int32_t *counters;
19 };
20
21 extern int percpu_counter_batch;
22
23 int percpu_counter_init(struct percpu_counter *fbc, int64_t amount, int gfp);
24 void percpu_counter_destroy(struct percpu_counter *fbc);
25 void percpu_counter_set(struct percpu_counter *fbc, int64_t amount);
26 void percpu_counter_add_batch(struct percpu_counter *fbc, int64_t amount,
27                               int32_t batch);
28 int64_t __percpu_counter_sum(struct percpu_counter *fbc);
29 int __percpu_counter_compare(struct percpu_counter *fbc, int64_t rhs,
30                              int32_t batch);
31
32 static inline int percpu_counter_compare(struct percpu_counter *fbc,
33                                          int64_t rhs)
34 {
35         return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
36 }
37
38 static inline void percpu_counter_add(struct percpu_counter *fbc,
39                                       int64_t amount)
40 {
41         percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
42 }
43
44 static inline int64_t percpu_counter_sum_positive(struct percpu_counter *fbc)
45 {
46         int64_t ret = __percpu_counter_sum(fbc);
47         return ret < 0 ? 0 : ret;
48 }
49
50 static inline int64_t percpu_counter_sum(struct percpu_counter *fbc)
51 {
52         return __percpu_counter_sum(fbc);
53 }
54
55 static inline int64_t percpu_counter_read(struct percpu_counter *fbc)
56 {
57         return fbc->count;
58 }
59
60 /*
61  * It is possible for the percpu_counter_read() to return a small negative
62  * number for some counter which should never be negative.
63  *
64  */
65 static inline int64_t percpu_counter_read_positive(struct percpu_counter *fbc)
66 {
67         int64_t ret = fbc->count;
68
69         cmb();          /* Prevent reloads of fbc->count */
70         if (ret >= 0)
71                 return ret;
72         return 0;
73 }
74
75 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
76 {
77         return (fbc->counters != NULL);
78 }
79
80 static inline void percpu_counter_inc(struct percpu_counter *fbc)
81 {
82         percpu_counter_add(fbc, 1);
83 }
84
85 static inline void percpu_counter_dec(struct percpu_counter *fbc)
86 {
87         percpu_counter_add(fbc, -1);
88 }
89
90 static inline void percpu_counter_sub(struct percpu_counter *fbc,
91                                       int64_t amount)
92 {
93         percpu_counter_add(fbc, -amount);
94 }
95
96 #endif /* _LINUX_PERCPU_COUNTER_H */