akaros/tools/compilers/gcc-glibc/gcc-4.9.2-akaros/libgomp/config/akaros/bar.c
<<
>>
Prefs
   1/* Copyright (C) 2005-2014 Free Software Foundation, Inc.
   2   Contributed by Richard Henderson <rth@redhat.com>.
   3
   4   This file is part of the GNU OpenMP Library (libgomp).
   5
   6   Libgomp is free software; you can redistribute it and/or modify it
   7   under the terms of the GNU General Public License as published by
   8   the Free Software Foundation; either version 3, or (at your option)
   9   any later version.
  10
  11   Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
  12   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  13   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14   more details.
  15
  16   Under Section 7 of GPL version 3, you are granted additional
  17   permissions described in the GCC Runtime Library Exception, version
  18   3.1, as published by the Free Software Foundation.
  19
  20   You should have received a copy of the GNU General Public License and
  21   a copy of the GCC Runtime Library Exception along with this program;
  22   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
  23   <http://www.gnu.org/licenses/>.  */
  24
  25/* This is a Linux specific implementation of a barrier synchronization
  26   mechanism for libgomp.  This type is private to the library.  This 
  27   implementation uses atomic instructions and the futex syscall.  */
  28
  29#include <limits.h>
  30#include "wait.h"
  31
  32
  33void
  34gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
  35{
  36  if (__builtin_expect (state & BAR_WAS_LAST, 0))
  37    {
  38      /* Next time we'll be awaiting TOTAL threads again.  */
  39      bar->awaited = bar->total;
  40      __atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
  41                        MEMMODEL_RELEASE);
  42      futex_wake ((int *) &bar->generation, INT_MAX);
  43    }
  44  else
  45    {
  46      do
  47        do_wait ((int *) &bar->generation, state);
  48      while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) == state);
  49    }
  50}
  51
  52void
  53gomp_barrier_wait (gomp_barrier_t *bar)
  54{
  55  gomp_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
  56}
  57
  58/* Like gomp_barrier_wait, except that if the encountering thread
  59   is not the last one to hit the barrier, it returns immediately.
  60   The intended usage is that a thread which intends to gomp_barrier_destroy
  61   this barrier calls gomp_barrier_wait, while all other threads
  62   call gomp_barrier_wait_last.  When gomp_barrier_wait returns,
  63   the barrier can be safely destroyed.  */
  64
  65void
  66gomp_barrier_wait_last (gomp_barrier_t *bar)
  67{
  68  gomp_barrier_state_t state = gomp_barrier_wait_start (bar);
  69  if (state & BAR_WAS_LAST)
  70    gomp_barrier_wait_end (bar, state);
  71}
  72
  73void
  74gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
  75{
  76  futex_wake ((int *) &bar->generation, count == 0 ? INT_MAX : count);
  77}
  78
  79void
  80gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
  81{
  82  unsigned int generation, gen;
  83
  84  if (__builtin_expect (state & BAR_WAS_LAST, 0))
  85    {
  86      /* Next time we'll be awaiting TOTAL threads again.  */
  87      struct gomp_thread *thr = gomp_thread ();
  88      struct gomp_team *team = thr->ts.team;
  89
  90      bar->awaited = bar->total;
  91      team->work_share_cancelled = 0;
  92      if (__builtin_expect (team->task_count, 0))
  93        {
  94          gomp_barrier_handle_tasks (state);
  95          state &= ~BAR_WAS_LAST;
  96        }
  97      else
  98        {
  99          state &= ~BAR_CANCELLED;
 100          state += BAR_INCR - BAR_WAS_LAST;
 101          __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
 102          futex_wake ((int *) &bar->generation, INT_MAX);
 103          return;
 104        }
 105    }
 106
 107  generation = state;
 108  state &= ~BAR_CANCELLED;
 109  do
 110    {
 111      do_wait ((int *) &bar->generation, generation);
 112      gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
 113      if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
 114        {
 115          gomp_barrier_handle_tasks (state);
 116          gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
 117        }
 118      generation |= gen & BAR_WAITING_FOR_TASK;
 119    }
 120  while (gen != state + BAR_INCR);
 121}
 122
 123void
 124gomp_team_barrier_wait (gomp_barrier_t *bar)
 125{
 126  gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
 127}
 128
 129void
 130gomp_team_barrier_wait_final (gomp_barrier_t *bar)
 131{
 132  gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
 133  if (__builtin_expect (state & BAR_WAS_LAST, 0))
 134    bar->awaited_final = bar->total;
 135  gomp_team_barrier_wait_end (bar, state);
 136}
 137
 138bool
 139gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
 140                                   gomp_barrier_state_t state)
 141{
 142  unsigned int generation, gen;
 143
 144  if (__builtin_expect (state & BAR_WAS_LAST, 0))
 145    {
 146      /* Next time we'll be awaiting TOTAL threads again.  */
 147      /* BAR_CANCELLED should never be set in state here, because
 148         cancellation means that at least one of the threads has been
 149         cancelled, thus on a cancellable barrier we should never see
 150         all threads to arrive.  */
 151      struct gomp_thread *thr = gomp_thread ();
 152      struct gomp_team *team = thr->ts.team;
 153
 154      bar->awaited = bar->total;
 155      team->work_share_cancelled = 0;
 156      if (__builtin_expect (team->task_count, 0))
 157        {
 158          gomp_barrier_handle_tasks (state);
 159          state &= ~BAR_WAS_LAST;
 160        }
 161      else
 162        {
 163          state += BAR_INCR - BAR_WAS_LAST;
 164          __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
 165          futex_wake ((int *) &bar->generation, INT_MAX);
 166          return false;
 167        }
 168    }
 169
 170  if (__builtin_expect (state & BAR_CANCELLED, 0))
 171    return true;
 172
 173  generation = state;
 174  do
 175    {
 176      do_wait ((int *) &bar->generation, generation);
 177      gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
 178      if (__builtin_expect (gen & BAR_CANCELLED, 0))
 179        return true;
 180      if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
 181        {
 182          gomp_barrier_handle_tasks (state);
 183          gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
 184        }
 185      generation |= gen & BAR_WAITING_FOR_TASK;
 186    }
 187  while (gen != state + BAR_INCR);
 188
 189  return false;
 190}
 191
 192bool
 193gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
 194{
 195  return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
 196}
 197
 198void
 199gomp_team_barrier_cancel (struct gomp_team *team)
 200{
 201  gomp_mutex_lock (&team->task_lock);
 202  if (team->barrier.generation & BAR_CANCELLED)
 203    {
 204      gomp_mutex_unlock (&team->task_lock);
 205      return;
 206    }
 207  team->barrier.generation |= BAR_CANCELLED;
 208  gomp_mutex_unlock (&team->task_lock);
 209  futex_wake ((int *) &team->barrier.generation, INT_MAX);
 210}
 211