#include <limits.h>
#include "libgomp.h"
-/* For cpu_relax. */
-#include "doacross.h"
-
-/* Assuming ADDR is &bar->generation, return bar. Copied from
- rtems/bar.c. */
+void
+gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
+{
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
+ {
+ /* Next time we'll be awaiting TOTAL threads again. */
+ bar->awaited = bar->total;
+ __atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
+ MEMMODEL_RELEASE);
+ }
+ if (bar->total > 1)
+ asm ("bar.sync 1, %0;" : : "r" (32 * bar->total));
+}
-static gomp_barrier_t *
-generation_to_barrier (int *addr)
+void
+gomp_barrier_wait (gomp_barrier_t *bar)
{
- char *bar
- = (char *) addr - __builtin_offsetof (gomp_barrier_t, generation);
- return (gomp_barrier_t *)bar;
+ gomp_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
}
-/* Implement futex_wait-like behaviour to plug into the linux/bar.c
- implementation. Assumes ADDR is &bar->generation. */
+/* Like gomp_barrier_wait, except that if the encountering thread
+ is not the last one to hit the barrier, it returns immediately.
+ The intended usage is that a thread which intends to gomp_barrier_destroy
+ this barrier calls gomp_barrier_wait, while all other threads
+ call gomp_barrier_wait_last. When gomp_barrier_wait returns,
+ the barrier can be safely destroyed. */
-static inline void
-futex_wait (int *addr, int val)
+void
+gomp_barrier_wait_last (gomp_barrier_t *bar)
{
- gomp_barrier_t *bar = generation_to_barrier (addr);
+ /* The above described behavior matches 'bar.arrive' perfectly. */
+ if (bar->total > 1)
+ asm ("bar.arrive 1, %0;" : : "r" (32 * bar->total));
+}
- if (bar->total < 2)
- /* A barrier with less than two threads, nop. */
- return;
+/* Barriers are implemented mainly using 'bar.red.or', which combines a bar.sync
+ operation with a OR-reduction of "team->task_count != 0" across all threads.
+ Task processing is done only after synchronization and verifying that
+ task_count was non-zero in at least one of the team threads.
- gomp_mutex_lock (&bar->lock);
+ This use of simple-barriers, and queueing of tasks till the end, is deemed
+ more efficient performance-wise for GPUs in the common offloading case, as
+ opposed to implementing futex-wait/wake operations to simultaneously process
+ tasks in a CPU-thread manner (which is not easy to implement efficiently
+ on GPUs). */
- /* Futex semantics: only go to sleep if *addr == val. */
- if (__builtin_expect (__atomic_load_n (addr, MEMMODEL_ACQUIRE) != val, 0))
- {
- gomp_mutex_unlock (&bar->lock);
- return;
- }
+void
+gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
+{
+ struct gomp_thread *thr = gomp_thread ();
+ struct gomp_team *team = thr->ts.team;
- /* Register as waiter. */
- unsigned int waiters
- = __atomic_add_fetch (&bar->waiters, 1, MEMMODEL_ACQ_REL);
- if (waiters == 0)
- __builtin_abort ();
- unsigned int waiter_id = waiters;
+ bool run_tasks = (team->task_count != 0);
+ if (bar->total > 1)
+ run_tasks = __builtin_nvptx_bar_red_or (1, 32 * bar->total, true,
+ (team->task_count != 0));
- if (waiters > 1)
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
{
- /* Wake other threads in bar.sync. */
- asm volatile ("bar.sync 1, %0;" : : "r" (32 * waiters));
-
- /* Ensure that they have updated waiters. */
- asm volatile ("bar.sync 1, %0;" : : "r" (32 * waiters));
+ /* Next time we'll be awaiting TOTAL threads again. */
+ bar->awaited = bar->total;
+ team->work_share_cancelled = 0;
}
- gomp_mutex_unlock (&bar->lock);
-
- while (1)
+ if (__builtin_expect (run_tasks == true, 0))
{
- /* Wait for next thread in barrier. */
- asm volatile ("bar.sync 1, %0;" : : "r" (32 * (waiters + 1)));
+ while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE)
+ & BAR_TASK_PENDING)
+ gomp_barrier_handle_tasks (state);
- /* Get updated waiters. */
- unsigned int updated_waiters
- = __atomic_load_n (&bar->waiters, MEMMODEL_ACQUIRE);
-
- /* Notify that we have updated waiters. */
- asm volatile ("bar.sync 1, %0;" : : "r" (32 * (waiters + 1)));
-
- waiters = updated_waiters;
+ if (bar->total > 1)
+ asm volatile ("bar.sync 1, %0;" : : "r" (32 * bar->total));
+ }
+}
- if (waiter_id > waiters)
- /* A wake happened, and we're in the group of woken threads. */
- break;
+void
+gomp_team_barrier_wait (gomp_barrier_t *bar)
+{
+ gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
+}
- /* Continue waiting. */
- }
+void
+gomp_team_barrier_wait_final (gomp_barrier_t *bar)
+{
+ gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
+ bar->awaited_final = bar->total;
+ gomp_team_barrier_wait_end (bar, state);
}
-/* Implement futex_wake-like behaviour to plug into the linux/bar.c
- implementation. Assumes ADDR is &bar->generation. */
+/* See also comments for gomp_team_barrier_wait_end. */
-static inline void
-futex_wake (int *addr, int count)
+bool
+gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
+ gomp_barrier_state_t state)
{
- gomp_barrier_t *bar = generation_to_barrier (addr);
+ struct gomp_thread *thr = gomp_thread ();
+ struct gomp_team *team = thr->ts.team;
- if (bar->total < 2)
- /* A barrier with less than two threads, nop. */
- return;
+ bool run_tasks = (team->task_count != 0);
+ if (bar->total > 1)
+ run_tasks = __builtin_nvptx_bar_red_or (1, 32 * bar->total, true,
+ (team->task_count != 0));
+ if (state & BAR_CANCELLED)
+ return true;
- gomp_mutex_lock (&bar->lock);
- unsigned int waiters = __atomic_load_n (&bar->waiters, MEMMODEL_ACQUIRE);
- if (waiters == 0)
+ if (__builtin_expect (state & BAR_WAS_LAST, 0))
{
- /* No threads to wake. */
- gomp_mutex_unlock (&bar->lock);
- return;
+ /* Note: BAR_CANCELLED should never be set in state here, because
+ cancellation means that at least one of the threads has been
+ cancelled, thus on a cancellable barrier we should never see
+ all threads to arrive. */
+
+ /* Next time we'll be awaiting TOTAL threads again. */
+ bar->awaited = bar->total;
+ team->work_share_cancelled = 0;
}
- if (count == INT_MAX)
- /* Release all threads. */
- __atomic_store_n (&bar->waiters, 0, MEMMODEL_RELEASE);
- else if (count < bar->total)
- /* Release count threads. */
- __atomic_add_fetch (&bar->waiters, -count, MEMMODEL_ACQ_REL);
- else
- /* Count has an illegal value. */
- __builtin_abort ();
-
- /* Wake other threads in bar.sync. */
- asm volatile ("bar.sync 1, %0;" : : "r" (32 * (waiters + 1)));
+ if (__builtin_expect (run_tasks == true, 0))
+ {
+ while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE)
+ & BAR_TASK_PENDING)
+ gomp_barrier_handle_tasks (state);
- /* Let them get the updated waiters. */
- asm volatile ("bar.sync 1, %0;" : : "r" (32 * (waiters + 1)));
+ if (bar->total > 1)
+ asm volatile ("bar.sync 1, %0;" : : "r" (32 * bar->total));
+ }
- gomp_mutex_unlock (&bar->lock);
+ return false;
}
-/* Copied from linux/wait.h. */
-
-static inline int do_spin (int *addr, int val)
+bool
+gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
{
- /* The current implementation doesn't spin. */
- return 1;
+ return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
}
-/* Copied from linux/wait.h. */
-
-static inline void do_wait (int *addr, int val)
+void
+gomp_team_barrier_cancel (struct gomp_team *team)
{
- if (do_spin (addr, val))
- futex_wait (addr, val);
-}
+ gomp_mutex_lock (&team->task_lock);
+ if (team->barrier.generation & BAR_CANCELLED)
+ {
+ gomp_mutex_unlock (&team->task_lock);
+ return;
+ }
+ team->barrier.generation |= BAR_CANCELLED;
+ gomp_mutex_unlock (&team->task_lock);
-/* Reuse the linux implementation. */
-#define GOMP_WAIT_H 1
-#include "../linux/bar.c"
+ /* The 'exit' instruction cancels this thread and also fullfills any other
+ CTA threads waiting on barriers. */
+ asm volatile ("exit;");
+}