Port Linux's percpu_counters
[akaros.git] / kern / lib / percpu_counter.c
1 /* Copyright (C) 1991-2017, the Linux Kernel authors */
2
3 /*
4  * Fast batching percpu counters.
5  */
6
7 #include <percpu_counter.h>
8 #include <linux_compat.h>
9
10 void percpu_counter_set(struct percpu_counter *fbc, int64_t amount)
11 {
12         int cpu;
13         unsigned long flags;
14
15         spin_lock_irqsave(&fbc->lock);
16         for_each_possible_cpu(cpu) {
17                 int32_t *pcount = _PERCPU_VARPTR(*fbc->counters, cpu);
18                 *pcount = 0;
19         }
20         fbc->count = amount;
21         spin_unlock_irqsave(&fbc->lock);
22 }
23
24 /**
25  * This function is both preempt and irq safe. The former is due to explicit
26  * preemption disable. The latter is guaranteed by the fact that the slow path
27  * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
28  * this_cpu_add which is irq-safe by definition. Hence there is no need muck
29  * with irq state before calling this one
30  */
31 void percpu_counter_add_batch(struct percpu_counter *fbc, int64_t amount,
32                               int32_t batch)
33 {
34         int64_t count;
35
36         preempt_disable();
37         count = __this_cpu_read(*fbc->counters) + amount;
38         if (count >= batch || count <= -batch) {
39                 unsigned long flags;
40                 spin_lock_irqsave(&fbc->lock);
41                 fbc->count += count;
42                 __this_cpu_sub(*fbc->counters, count - amount);
43                 spin_unlock_irqsave(&fbc->lock);
44         } else {
45                 this_cpu_add(*fbc->counters, amount);
46         }
47         preempt_enable();
48 }
49
50 /*
51  * Add up all the per-cpu counts, return the result.  This is a more accurate
52  * but much slower version of percpu_counter_read_positive()
53  */
54 int64_t __percpu_counter_sum(struct percpu_counter *fbc)
55 {
56         int64_t ret;
57         int cpu;
58         unsigned long flags;
59
60         spin_lock_irqsave(&fbc->lock);
61         ret = fbc->count;
62         for_each_online_cpu(cpu) {
63                 int32_t *pcount = _PERCPU_VARPTR(*fbc->counters, cpu);
64                 ret += *pcount;
65         }
66         spin_unlock_irqsave(&fbc->lock);
67         return ret;
68 }
69
70 int percpu_counter_init(struct percpu_counter *fbc, int64_t amount, gfp_t gfp)
71 {
72         unsigned long flags __maybe_unused;
73
74         spinlock_init_irqsave(&fbc->lock);
75         fbc->count = amount;
76         fbc->counters = alloc_percpu_gfp(int32_t, gfp);
77         if (!fbc->counters)
78                 return -ENOMEM;
79         return 0;
80 }
81
82 void percpu_counter_destroy(struct percpu_counter *fbc)
83 {
84         unsigned long flags __maybe_unused;
85
86         if (!fbc->counters)
87                 return;
88         free_percpu(fbc->counters);
89         fbc->counters = NULL;
90 }
91
92 int percpu_counter_batch __read_mostly = 32;
93
94 /*
95  * Compare counter against given value.
96  * Return 1 if greater, 0 if equal and -1 if less
97  */
98 int __percpu_counter_compare(struct percpu_counter *fbc, int64_t rhs,
99                              int32_t batch)
100 {
101         int64_t count;
102
103         count = percpu_counter_read(fbc);
104         /* Check to see if rough count will be sufficient for comparison */
105         if (abs(count - rhs) > (batch * num_online_cpus())) {
106                 if (count > rhs)
107                         return 1;
108                 else
109                         return -1;
110         }
111         /* Need to use precise count */
112         count = percpu_counter_sum(fbc);
113         if (count > rhs)
114                 return 1;
115         else if (count < rhs)
116                 return -1;
117         else
118                 return 0;
119 }