diff --git a/src/include/port/atomics/arch-ppc.h b/src/include/port/atomics/arch-ppc.h index ed1cd9d1b9..7cf8c8ef97 100644 --- a/src/include/port/atomics/arch-ppc.h +++ b/src/include/port/atomics/arch-ppc.h @@ -23,4 +23,11 @@ #define pg_memory_barrier_impl() __asm__ __volatile__ ("sync" : : : "memory") #define pg_read_barrier_impl() __asm__ __volatile__ ("lwsync" : : : "memory") #define pg_write_barrier_impl() __asm__ __volatile__ ("lwsync" : : : "memory") + +#if defined(__IBMC__) || defined(__IBMCPP__) + +#define pg_memory_barrier_impl() __sync() +#define pg_read_barrier_impl() __lwsync() +#define pg_write_barrier_impl() __lwsync() + #endif diff --git a/src/include/port/atomics/generic-xlc.h b/src/include/port/atomics/generic-xlc.h index f854612d39..e1dd3310a5 100644 --- a/src/include/port/atomics/generic-xlc.h +++ b/src/include/port/atomics/generic-xlc.h @@ -48,7 +48,7 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, * consistency only, do not use it here. GCC atomics observe the same * restriction; see its rs6000_pre_atomic_barrier(). */ - __asm__ __volatile__ (" sync \n" ::: "memory"); + __sync(); /* * XXX: __compare_and_swap is defined to take signed parameters, but that @@ -73,11 +73,19 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, static inline uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) { + uint32 ret; + /* - * __fetch_and_add() emits a leading "sync" and trailing "isync", thereby - * providing sequential consistency. This is undocumented. + * Use __sync() before and __isync() after, like in compare-exchange + * above. */ - return __fetch_and_add((volatile int *)&ptr->value, add_); + __sync(); + + ret = __fetch_and_add((volatile int *)&ptr->value, add_); + + __isync(); + + return ret; } #ifdef PG_HAVE_ATOMIC_U64_SUPPORT @@ -89,7 +97,7 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, { bool ret; - __asm__ __volatile__ (" sync \n" ::: "memory"); + __sync(); ret = __compare_and_swaplp((volatile long*)&ptr->value, (long *)expected, (long)newval); @@ -103,7 +111,15 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, static inline uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) { - return __fetch_and_addlp((volatile long *)&ptr->value, add_); + uint64 ret; + + __sync(); + + ret = __fetch_and_addlp((volatile long *)&ptr->value, add_); + + __isync(); + + return ret; } #endif /* PG_HAVE_ATOMIC_U64_SUPPORT */