akaros/kern/arch/x86/atomic.h
<<
>>
Prefs
   1/* Copyright (c) 2009-2011 The Regents of the University of California
   2 * Barret Rhoden <brho@cs.berkeley.edu>
   3 * See LICENSE for details.
   4 *
   5 * x86 atomics and locking functions. */
   6
   7#pragma once
   8
   9#include <ros/common.h>
  10#include <arch/membar.h>
  11#include <arch/x86.h>
  12#include <arch/arch.h>
  13
  14static inline void atomic_andb(volatile uint8_t *number, uint8_t mask);
  15static inline void atomic_orb(volatile uint8_t *number, uint8_t mask);
  16
  17/* Inlined functions declared above */
  18static inline void atomic_init(atomic_t *number, long val)
  19{
  20        asm volatile("mov %1,%0" : "=m"(*number) : "r"(val));
  21}
  22
  23static inline long atomic_read(atomic_t *number)
  24{
  25        long val;
  26        asm volatile("mov %1,%0" : "=r"(val) : "m"(*number));
  27        return val;
  28}
  29
  30static inline void atomic_set(atomic_t *number, long val)
  31{
  32        asm volatile("mov %1,%0" : "=m"(*number) : "r"(val));
  33}
  34
  35static inline void atomic_add(atomic_t *number, long val)
  36{
  37        __sync_fetch_and_add(number, val);
  38}
  39
  40static inline void atomic_inc(atomic_t *number)
  41{
  42        __sync_fetch_and_add(number, 1);
  43}
  44
  45static inline void atomic_dec(atomic_t *number)
  46{
  47        __sync_fetch_and_sub(number, 1);
  48}
  49
  50static inline long atomic_fetch_and_add(atomic_t *number, long val)
  51{
  52        return (long)__sync_fetch_and_add(number, val);
  53}
  54
  55static inline void atomic_and(atomic_t *number, long mask)
  56{
  57        __sync_fetch_and_and(number, mask);
  58}
  59
  60static inline void atomic_or(atomic_t *number, long mask)
  61{
  62        __sync_fetch_and_or(number, mask);
  63}
  64
  65static inline long atomic_swap(atomic_t *addr, long val)
  66{
  67        /* This poorly named function does an xchg */
  68        return (long)__sync_lock_test_and_set(addr, val);
  69}
  70
  71static inline void *atomic_swap_ptr(void **addr, void *val)
  72{
  73        return (void*)__sync_lock_test_and_set(addr, val);
  74}
  75
  76static inline bool atomic_cas(atomic_t *addr, long exp_val, long new_val)
  77{
  78        return __sync_bool_compare_and_swap(addr, exp_val, new_val);
  79}
  80
  81static inline bool atomic_cas_ptr(void **addr, void *exp_val, void *new_val)
  82{
  83        return __sync_bool_compare_and_swap(addr, exp_val, new_val);
  84}
  85
  86static inline bool atomic_cas_u32(uint32_t *addr, uint32_t exp_val,
  87                                  uint32_t new_val)
  88{
  89        return __sync_bool_compare_and_swap(addr, exp_val, new_val);
  90}
  91
  92/* Adds val to number, so long as number was not zero.  Returns TRUE if the
  93 * operation succeeded (added, not zero), returns FALSE if number is zero. */
  94static inline bool atomic_add_not_zero(atomic_t *number, long val)
  95{
  96        long old_num, new_num;
  97        do {
  98                old_num = atomic_read(number);
  99                if (!old_num)
 100                        return FALSE;
 101                new_num = old_num + val;
 102        } while (!atomic_cas(number, old_num, new_num));
 103        return TRUE;
 104}
 105
 106/* Subtracts val from number, returning True if the new value is 0. */
 107static inline bool atomic_sub_and_test(atomic_t *number, long val)
 108{
 109        bool b;
 110        asm volatile("lock sub %2,%1; setz %0" : "=q"(b), "=m"(*number)
 111                                               : "r"(val), "m"(*number)
 112                                               : "cc" );
 113        return b;
 114}
 115
 116static inline void atomic_andb(volatile uint8_t *number, uint8_t mask)
 117{
 118        __sync_fetch_and_and(number, mask);
 119}
 120
 121static inline void atomic_orb(volatile uint8_t *number, uint8_t mask)
 122{
 123        __sync_fetch_and_or(number, mask);
 124}
 125
 126static inline bool spin_locked(spinlock_t *lock)
 127{
 128        // the lock status is the lowest byte of the lock
 129        return lock->rlock & 0xff;
 130}
 131
 132static inline void __spin_lock_raw(volatile uint32_t *rlock)
 133{
 134        uint8_t dicks = 0;
 135        asm volatile("1:                      "
 136                     "  cmpb $0, %0;          "
 137                     "  je 2f;                "
 138                     "  pause;                "
 139                     "  jmp 1b;               "
 140                     "2:                      "
 141                     "  movb $1, %1;          "
 142                     "  xchgb %1, %0;         "
 143                     "  cmpb $0, %1;          "
 144                     "  jne 1b;               "
 145                     : : "m"(*rlock), "r"(dicks) : "cc");
 146        cmb();  /* need cmb(), the CPU mb() was handled by the xchg */
 147}
 148
 149static inline void __spin_lock(spinlock_t *lock)
 150{
 151        __spin_lock_raw(&lock->rlock);
 152}
 153
 154static inline bool __spin_trylock(spinlock_t *lock)
 155{
 156        /* since this is an or, we're not going to clobber the top bytes (if
 157         * that matters) */
 158        return !__sync_fetch_and_or(&lock->rlock, 1);
 159}
 160
 161static inline void __spin_unlock(spinlock_t *lock)
 162{
 163        /* Need to prevent the compiler from reordering older stores. */
 164        wmb();
 165        rwmb(); /* x86 makes both of these a cmb() */
 166        lock->rlock = 0;
 167}
 168
 169static inline void __spinlock_init(spinlock_t *lock)
 170{
 171        lock->rlock = 0;
 172}
 173