* - http://lwn.net/Articles/336224/
* - Linux's Documentation/kref.txt
*
- * We differ a bit in that we currently ref count items that are on lists. If
- * an item is stored on a list, that counts as a reference. No need to lock
- * around kref_put, nor do you need to kref_get your list reference *if* you
- * take the reference out of the list. You need to kref_get() (if you want to
- * use the reference later) before allowing someone else access to the list,
- * which is still IAW Linux's style. They might even do this for some lists. If
- * we have lists that are unsynchronized where two threads can get references to
- * the same item at the same time, then we'll need to lock to deal with that.
- *
- * We also allow incrementing by more than one, which helps in some cases. We
- * don't allow decrementing by more than one to catch bugs (for now).
- *
- * As far as usage goes, kref users don't make much of a distinction between
- * internal and external references yet.
- *
- * kref rules (paraphrasing the linux ones):
- * 1. If you pass a pointer somewhere or store it, kref_get() it first. You can
- * do this with no locking if you have a valid reference.
- * 2. When you are done, kref_put() it. You can usually do this without
- * locking.
- * 3. If you never kref_get without already holding a valid reference, you don't
- * need to lock for Rule 2. If you ever grab a reference without already having
- * one, you need some form of sync to prevent a kref_put() from happening
- * while you kref_get().
- *
- * The closest we get to mucking with it is with the proc hash table, though we
- * don't require a lock on every proc kref_put(). If you're
- * curious about these sorts of things, note how easy it is for a list where you
- * are added or removed (like the runnable list) compared to a structure where
- * we make a copy of the reference (like the pid2proc hash). */
+ * See our Documentation/kref.txt for more info. */
+
+#pragma once
#include <atomic.h>
#include <assert.h>
void (*release)(struct kref *kref);
};
+/* Helper for some debugging situations */
+static long kref_refcnt(struct kref *kref)
+{
+ return atomic_read(&kref->refcount);
+}
+
static void kref_init(struct kref *kref, void (*release)(struct kref *kref),
unsigned int init)
{
kref->release = release;
}
-static struct kref *kref_get(struct kref *kref, unsigned int inc)
+/* Will blindly incref */
+static struct kref *__kref_get(struct kref *kref, unsigned int inc)
{
- assert(atomic_read(&kref->refcount));
atomic_add(&kref->refcount, inc);
return kref;
}
-static void kref_put(struct kref *kref)
+/* Returns the kref ptr on success, 0 on failure */
+static struct kref *kref_get_not_zero(struct kref *kref, unsigned int inc)
+{
+ if (atomic_add_not_zero(&kref->refcount, inc))
+ return kref;
+ else
+ return 0;
+}
+
+/* Will panic on zero */
+static struct kref *kref_get(struct kref *kref, unsigned int inc)
+{
+ kref = kref_get_not_zero(kref, inc);
+ assert(kref);
+ return kref;
+}
+
+/* Returns True if we hit 0 and executed 'release', False otherwise */
+static bool kref_put(struct kref *kref)
{
- assert(atomic_read(&kref->refcount) > 0); /* catch some bugs */
- if (atomic_sub_and_test(&kref->refcount, 1))
+ assert(kref_refcnt(kref) > 0); /* catch some bugs */
+ if (atomic_sub_and_test(&kref->refcount, 1)) {
kref->release(kref);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/* Dev / debugging function to catch the attempted freeing of objects we don't
+ * know how to free yet. */
+static void fake_release(struct kref *kref)
+{
+ panic("Cleaning up this object is not supported!\n");
}