Import Linux's percpu_counter
authorBarret Rhoden <brho@cs.berkeley.edu>
Thu, 3 May 2018 19:43:02 +0000 (15:43 -0400)
committerBarret Rhoden <brho@cs.berkeley.edu>
Tue, 22 May 2018 21:53:04 +0000 (17:53 -0400)
From commit 569dbb88e80d ("Linux 4.13").

Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
kern/include/percpu_counter.h [new file with mode: 0644]
kern/lib/percpu_counter.c [new file with mode: 0644]

diff --git a/kern/include/percpu_counter.h b/kern/include/percpu_counter.h
new file mode 100644 (file)
index 0000000..ec06538
--- /dev/null
@@ -0,0 +1,191 @@
+#ifndef _LINUX_PERCPU_COUNTER_H
+#define _LINUX_PERCPU_COUNTER_H
+/*
+ * A simple "approximate counter" for use in ext2 and ext3 superblocks.
+ *
+ * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/percpu.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+
+#ifdef CONFIG_SMP
+
+struct percpu_counter {
+       raw_spinlock_t lock;
+       s64 count;
+#ifdef CONFIG_HOTPLUG_CPU
+       struct list_head list;  /* All percpu_counters are on a list */
+#endif
+       s32 __percpu *counters;
+};
+
+extern int percpu_counter_batch;
+
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
+                         struct lock_class_key *key);
+
+#define percpu_counter_init(fbc, value, gfp)                           \
+       ({                                                              \
+               static struct lock_class_key __key;                     \
+                                                                       \
+               __percpu_counter_init(fbc, value, gfp, &__key);         \
+       })
+
+void percpu_counter_destroy(struct percpu_counter *fbc);
+void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
+void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
+                             s32 batch);
+s64 __percpu_counter_sum(struct percpu_counter *fbc);
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+       return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
+}
+
+static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+       percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
+}
+
+static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
+{
+       s64 ret = __percpu_counter_sum(fbc);
+       return ret < 0 ? 0 : ret;
+}
+
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+       return __percpu_counter_sum(fbc);
+}
+
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
+{
+       return fbc->count;
+}
+
+/*
+ * It is possible for the percpu_counter_read() to return a small negative
+ * number for some counter which should never be negative.
+ *
+ */
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
+{
+       s64 ret = fbc->count;
+
+       barrier();              /* Prevent reloads of fbc->count */
+       if (ret >= 0)
+               return ret;
+       return 0;
+}
+
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+       return (fbc->counters != NULL);
+}
+
+#else /* !CONFIG_SMP */
+
+struct percpu_counter {
+       s64 count;
+};
+
+static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+                                     gfp_t gfp)
+{
+       fbc->count = amount;
+       return 0;
+}
+
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+}
+
+static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
+{
+       fbc->count = amount;
+}
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+       if (fbc->count > rhs)
+               return 1;
+       else if (fbc->count < rhs)
+               return -1;
+       else
+               return 0;
+}
+
+static inline int
+__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+       return percpu_counter_compare(fbc, rhs);
+}
+
+static inline void
+percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+       preempt_disable();
+       fbc->count += amount;
+       preempt_enable();
+}
+
+static inline void
+percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
+{
+       percpu_counter_add(fbc, amount);
+}
+
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
+{
+       return fbc->count;
+}
+
+/*
+ * percpu_counter is intended to track positive numbers. In the UP case the
+ * number should never be negative.
+ */
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
+{
+       return fbc->count;
+}
+
+static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
+{
+       return percpu_counter_read_positive(fbc);
+}
+
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+       return percpu_counter_read(fbc);
+}
+
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+       return 1;
+}
+
+#endif /* CONFIG_SMP */
+
+static inline void percpu_counter_inc(struct percpu_counter *fbc)
+{
+       percpu_counter_add(fbc, 1);
+}
+
+static inline void percpu_counter_dec(struct percpu_counter *fbc)
+{
+       percpu_counter_add(fbc, -1);
+}
+
+static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
+{
+       percpu_counter_add(fbc, -amount);
+}
+
+#endif /* _LINUX_PERCPU_COUNTER_H */
diff --git a/kern/lib/percpu_counter.c b/kern/lib/percpu_counter.c
new file mode 100644 (file)
index 0000000..3bf4a99
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * Fast batching percpu counters.
+ */
+
+#include <linux/percpu_counter.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/debugobjects.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+static LIST_HEAD(percpu_counters);
+static DEFINE_SPINLOCK(percpu_counters_lock);
+#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
+
+static struct debug_obj_descr percpu_counter_debug_descr;
+
+static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
+{
+       struct percpu_counter *fbc = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               percpu_counter_destroy(fbc);
+               debug_object_free(fbc, &percpu_counter_debug_descr);
+               return true;
+       default:
+               return false;
+       }
+}
+
+static struct debug_obj_descr percpu_counter_debug_descr = {
+       .name           = "percpu_counter",
+       .fixup_free     = percpu_counter_fixup_free,
+};
+
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{
+       debug_object_init(fbc, &percpu_counter_debug_descr);
+       debug_object_activate(fbc, &percpu_counter_debug_descr);
+}
+
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{
+       debug_object_deactivate(fbc, &percpu_counter_debug_descr);
+       debug_object_free(fbc, &percpu_counter_debug_descr);
+}
+
+#else  /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{ }
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{ }
+#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+
+void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
+{
+       int cpu;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&fbc->lock, flags);
+       for_each_possible_cpu(cpu) {
+               s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+               *pcount = 0;
+       }
+       fbc->count = amount;
+       raw_spin_unlock_irqrestore(&fbc->lock, flags);
+}
+EXPORT_SYMBOL(percpu_counter_set);
+
+/**
+ * This function is both preempt and irq safe. The former is due to explicit
+ * preemption disable. The latter is guaranteed by the fact that the slow path
+ * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
+ * this_cpu_add which is irq-safe by definition. Hence there is no need muck
+ * with irq state before calling this one
+ */
+void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
+{
+       s64 count;
+
+       preempt_disable();
+       count = __this_cpu_read(*fbc->counters) + amount;
+       if (count >= batch || count <= -batch) {
+               unsigned long flags;
+               raw_spin_lock_irqsave(&fbc->lock, flags);
+               fbc->count += count;
+               __this_cpu_sub(*fbc->counters, count - amount);
+               raw_spin_unlock_irqrestore(&fbc->lock, flags);
+       } else {
+               this_cpu_add(*fbc->counters, amount);
+       }
+       preempt_enable();
+}
+EXPORT_SYMBOL(percpu_counter_add_batch);
+
+/*
+ * Add up all the per-cpu counts, return the result.  This is a more accurate
+ * but much slower version of percpu_counter_read_positive()
+ */
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
+{
+       s64 ret;
+       int cpu;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&fbc->lock, flags);
+       ret = fbc->count;
+       for_each_online_cpu(cpu) {
+               s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+               ret += *pcount;
+       }
+       raw_spin_unlock_irqrestore(&fbc->lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL(__percpu_counter_sum);
+
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
+                         struct lock_class_key *key)
+{
+       unsigned long flags __maybe_unused;
+
+       raw_spin_lock_init(&fbc->lock);
+       lockdep_set_class(&fbc->lock, key);
+       fbc->count = amount;
+       fbc->counters = alloc_percpu_gfp(s32, gfp);
+       if (!fbc->counters)
+               return -ENOMEM;
+
+       debug_percpu_counter_activate(fbc);
+
+#ifdef CONFIG_HOTPLUG_CPU
+       INIT_LIST_HEAD(&fbc->list);
+       spin_lock_irqsave(&percpu_counters_lock, flags);
+       list_add(&fbc->list, &percpu_counters);
+       spin_unlock_irqrestore(&percpu_counters_lock, flags);
+#endif
+       return 0;
+}
+EXPORT_SYMBOL(__percpu_counter_init);
+
+void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+       unsigned long flags __maybe_unused;
+
+       if (!fbc->counters)
+               return;
+
+       debug_percpu_counter_deactivate(fbc);
+
+#ifdef CONFIG_HOTPLUG_CPU
+       spin_lock_irqsave(&percpu_counters_lock, flags);
+       list_del(&fbc->list);
+       spin_unlock_irqrestore(&percpu_counters_lock, flags);
+#endif
+       free_percpu(fbc->counters);
+       fbc->counters = NULL;
+}
+EXPORT_SYMBOL(percpu_counter_destroy);
+
+int percpu_counter_batch __read_mostly = 32;
+EXPORT_SYMBOL(percpu_counter_batch);
+
+static int compute_batch_value(unsigned int cpu)
+{
+       int nr = num_online_cpus();
+
+       percpu_counter_batch = max(32, nr*2);
+       return 0;
+}
+
+static int percpu_counter_cpu_dead(unsigned int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       struct percpu_counter *fbc;
+
+       compute_batch_value(cpu);
+
+       spin_lock_irq(&percpu_counters_lock);
+       list_for_each_entry(fbc, &percpu_counters, list) {
+               s32 *pcount;
+
+               raw_spin_lock(&fbc->lock);
+               pcount = per_cpu_ptr(fbc->counters, cpu);
+               fbc->count += *pcount;
+               *pcount = 0;
+               raw_spin_unlock(&fbc->lock);
+       }
+       spin_unlock_irq(&percpu_counters_lock);
+#endif
+       return 0;
+}
+
+/*
+ * Compare counter against given value.
+ * Return 1 if greater, 0 if equal and -1 if less
+ */
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+       s64     count;
+
+       count = percpu_counter_read(fbc);
+       /* Check to see if rough count will be sufficient for comparison */
+       if (abs(count - rhs) > (batch * num_online_cpus())) {
+               if (count > rhs)
+                       return 1;
+               else
+                       return -1;
+       }
+       /* Need to use precise count */
+       count = percpu_counter_sum(fbc);
+       if (count > rhs)
+               return 1;
+       else if (count < rhs)
+               return -1;
+       else
+               return 0;
+}
+EXPORT_SYMBOL(__percpu_counter_compare);
+
+static int __init percpu_counter_startup(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
+                               compute_batch_value, NULL);
+       WARN_ON(ret < 0);
+       ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
+                                       "lib/percpu_cnt:dead", NULL,
+                                       percpu_counter_cpu_dead);
+       WARN_ON(ret < 0);
+       return 0;
+}
+module_init(percpu_counter_startup);