diff options
Diffstat (limited to 'erts/include/internal/win')
-rw-r--r-- | erts/include/internal/win/ethr_atomic.h | 415 | ||||
-rw-r--r-- | erts/include/internal/win/ethr_event.h | 16 | ||||
-rw-r--r-- | erts/include/internal/win/ethread.h | 6 |
3 files changed, 307 insertions, 130 deletions
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h index 500459dd6c..60def01a7e 100644 --- a/erts/include/internal/win/ethr_atomic.h +++ b/erts/include/internal/win/ethr_atomic.h @@ -22,223 +22,394 @@ * Author: Rickard Green */ -#ifndef ETHR_WIN_ATOMIC_H__ -#define ETHR_WIN_ATOMIC_H__ - -#ifdef _MSC_VER -# if _MSC_VER < 1300 -# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */ -# else -# if defined(_M_IX86) -# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1 -# else /* I.e. IA64 */ -# if _MSC_VER >= 1400 -# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1 -# else -# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 -# endif -# endif -# endif -# if _MSC_VER >= 1400 -# include <intrin.h> -# undef ETHR_COMPILER_BARRIER -# define ETHR_COMPILER_BARRIER _ReadWriteBarrier() -# endif -#pragma intrinsic(_ReadWriteBarrier) -#pragma intrinsic(_InterlockedAnd) -#pragma intrinsic(_InterlockedOr) +#undef ETHR_INCLUDE_ATOMIC_IMPL__ +#if !defined(ETHR_WIN_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__) +#define ETHR_WIN_ATOMIC32_H__ +#define ETHR_INCLUDE_ATOMIC_IMPL__ 4 +#undef ETHR_ATOMIC_WANT_32BIT_IMPL__ +#elif !defined(ETHR_WIN_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__) +#define ETHR_WIN_ATOMIC64_H__ +#ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64 +/* _InterlockedCompareExchange64() required... */ +#define ETHR_INCLUDE_ATOMIC_IMPL__ 8 +#endif +#undef ETHR_ATOMIC_WANT_64BIT_IMPL__ +#endif + +#ifdef ETHR_INCLUDE_ATOMIC_IMPL__ + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + +#ifndef ETHR_WIN_ATOMIC_COMMON__ +#define ETHR_WIN_ATOMIC_COMMON__ + +#define ETHR_HAVE_NATIVE_ATOMICS 1 + +#if defined(_M_IX86) || defined(_M_AMD64) || defined(_M_IA64) +# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 1 #else -# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 +# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 0 #endif +#if defined(_M_AMD64) || (defined(_M_IX86) \ + && !defined(ETHR_PRE_PENTIUM4_COMPAT)) +# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1 +#else +# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0 +#endif /* - * No configure test checking for _Interlocked*_{acq,rel} and - * Interlocked*{Acquire,Release} have been written yet... + * No configure test checking for interlocked acquire/release + * versions have been written, yet. It should define + * ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS if, and + * only if, all used interlocked operations with barriers + * exists. * * Note, that these are pure optimizations for the itanium * processor. */ -#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ -#pragma intrinsic(_InterlockedCompareExchange_acq) +#include <intrin.h> +#undef ETHR_COMPILER_BARRIER +#define ETHR_COMPILER_BARRIER _ReadWriteBarrier() +#pragma intrinsic(_ReadWriteBarrier) +#pragma intrinsic(_InterlockedCompareExchange) + +#if defined(_M_AMD64) || (defined(_M_IX86) \ + && !defined(ETHR_PRE_PENTIUM4_COMPAT)) +#include <emmintrin.h> +#include <mmintrin.h> +#pragma intrinsic(_mm_mfence) +#define ETHR_MEMORY_BARRIER _mm_mfence() +#pragma intrinsic(_mm_sfence) +#define ETHR_WRITE_MEMORY_BARRIER _mm_sfence() +#pragma intrinsic(_mm_lfence) +#define ETHR_READ_MEMORY_BARRIER _mm_lfence() +#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER + +#else + +#define ETHR_MEMORY_BARRIER \ +do { \ + volatile long x___ = 0; \ + _InterlockedCompareExchange(&x___, (long) 1, (long) 0); \ +} while (0) + #endif -#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL + +#endif /* ETHR_WIN_ATOMIC_COMMON__ */ + +#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4 + +#define ETHR_HAVE_NATIVE_ATOMIC32 1 + +/* + * All used operations available as 32-bit intrinsics + */ + +#pragma intrinsic(_InterlockedDecrement) +#pragma intrinsic(_InterlockedIncrement) +#pragma intrinsic(_InterlockedExchangeAdd) +#pragma intrinsic(_InterlockedExchange) +#pragma intrinsic(_InterlockedAnd) +#pragma intrinsic(_InterlockedOr) +#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS +#pragma intrinsic(_InterlockedExchangeAdd_acq) +#pragma intrinsic(_InterlockedIncrement_acq) +#pragma intrinsic(_InterlockedDecrement_rel) +#pragma intrinsic(_InterlockedCompareExchange_acq) #pragma intrinsic(_InterlockedCompareExchange_rel) #endif +#define ETHR_ILCKD__(X) _Interlocked ## X +#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS +#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## _acq +#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## _rel +#else +#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X +#define ETHR_ILCKD_REL__(X) _Interlocked ## X +#endif + +#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X +#define ETHR_ATMC_T__ ethr_native_atomic32_t +#define ETHR_AINT_T__ ethr_sint32_t + +#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8 + +#define ETHR_HAVE_NATIVE_ATOMIC64 1 + +/* + * _InterlockedCompareExchange64() is required. The other may not + * be available, but if so, we can generate them. + */ +#pragma intrinsic(_InterlockedCompareExchange64) + +#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ +#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) *(PTR) +#else +#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) (__int64) 0 +#endif + +#define ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, PTR, NEW, ACT, EXP, OPS, RET) \ +{ \ + __int64 NEW, ACT, EXP; \ + ACT = ETHR_OWN_ILCKD_INIT_VAL__(PTR); \ + do { \ + EXP = ACT; \ + { OPS; } \ + ACT = _InterlockedCompareExchange64(PTR, NEW, EXP); \ + } while (ACT != EXP); \ + return RET; \ +} + +#define ETHR_OWN_ILCKD_1_IMPL__(FUNC, NEW, ACT, EXP, OPS, RET) \ +static __forceinline __int64 \ +FUNC(__int64 volatile *ptr) \ +ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET) + +#define ETHR_OWN_ILCKD_2_IMPL__(FUNC, NEW, ACT, EXP, OPS, ARG, RET) \ +static __forceinline __int64 \ +FUNC(__int64 volatile *ptr, __int64 ARG) \ +ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET) + + +#ifdef ETHR_HAVE__INTERLOCKEDDECREMENT64 +#pragma intrinsic(_InterlockedDecrement64) +#else +ETHR_OWN_ILCKD_1_IMPL__(_InterlockedDecrement64, new, act, exp, + new = act - 1, new) +#endif +#ifdef ETHR_HAVE__INTERLOCKEDINCREMENT64 +#pragma intrinsic(_InterlockedIncrement64) +#else +ETHR_OWN_ILCKD_1_IMPL__(_InterlockedIncrement64, new, act, exp, + new = act + 1, new) +#endif +#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64 +#pragma intrinsic(_InterlockedExchangeAdd64) +#else +ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchangeAdd64, new, act, exp, + new = act + arg, arg, act) +#endif +#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGE64 +#pragma intrinsic(_InterlockedExchange64) +#else +ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchange64, new, act, exp, + new = arg, arg, act) +#endif +#ifdef ETHR_HAVE__INTERLOCKEDAND64 +#pragma intrinsic(_InterlockedAnd64) +#else +ETHR_OWN_ILCKD_2_IMPL__(_InterlockedAnd64, new, act, exp, + new = act & arg, arg, act) +#endif +#ifdef ETHR_HAVE__INTERLOCKEDOR64 +#pragma intrinsic(_InterlockedOr64) +#else +ETHR_OWN_ILCKD_2_IMPL__(_InterlockedOr64, new, act, exp, + new = act | arg, arg, act) +#endif +#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS +#pragma intrinsic(_InterlockedExchangeAdd64_acq) +#pragma intrinsic(_InterlockedIncrement64_acq) +#pragma intrinsic(_InterlockedDecrement64_rel) +#pragma intrinsic(_InterlockedCompareExchange64_acq) +#pragma intrinsic(_InterlockedCompareExchange64_rel) +#endif + +#define ETHR_ILCKD__(X) _Interlocked ## X ## 64 +#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS +#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64_acq +#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64_rel +#else +#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64 +#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64 +#endif + +#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X +#define ETHR_ATMC_T__ ethr_native_atomic64_t +#define ETHR_AINT_T__ ethr_sint64_t + +#else +#error "Unsupported integer size" +#endif typedef struct { - volatile LONG value; -} ethr_native_atomic_t; + volatile ETHR_AINT_T__ value; +} ETHR_ATMC_T__; -#define ETHR_MEMORY_BARRIER \ -do { \ - volatile LONG x___ = 0; \ - _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \ -} while (0) +#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__) -#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__) +static ETHR_INLINE ETHR_AINT_T__ * +ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var) +{ + return (ETHR_AINT_T__ *) &var->value; +} static ETHR_INLINE void -ethr_native_atomic_init(ethr_native_atomic_t *var, long i) +ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i) { - var->value = (LONG) i; +#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ + var->value = i; +#else + (void) ETHR_ILCKD__(Exchange)(&var->value, i); +#endif } static ETHR_INLINE void -ethr_native_atomic_set(ethr_native_atomic_t *var, long i) +ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i) { -#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__ - var->value = (LONG) i; +#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ + var->value = i; #else - (void) InterlockedExchange(&var->value, (LONG) i); + (void) ETHR_ILCKD__(Exchange)(&var->value, i); #endif } -static ETHR_INLINE long -ethr_native_atomic_read(ethr_native_atomic_t *var) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var) { -#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__ +#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ return var->value; #else - return InterlockedExchangeAdd(&var->value, (LONG) 0); + return ETHR_ILCKD__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0); #endif } static ETHR_INLINE void -ethr_native_atomic_add(ethr_native_atomic_t *var, long incr) +ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr) { - (void) InterlockedExchangeAdd(&var->value, (LONG) incr); + (void) ETHR_ILCKD__(ExchangeAdd)(&var->value, incr); } -static ETHR_INLINE long -ethr_native_atomic_add_return(ethr_native_atomic_t *var, long i) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i) { - LONG tmp = InterlockedExchangeAdd(&var->value, (LONG) i); - return tmp + i; + return ETHR_ILCKD__(ExchangeAdd)(&var->value, i) + i; } static ETHR_INLINE void -ethr_native_atomic_inc(ethr_native_atomic_t *var) +ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var) { - (void) InterlockedIncrement(&var->value); + (void) ETHR_ILCKD__(Increment)(&var->value); } static ETHR_INLINE void -ethr_native_atomic_dec(ethr_native_atomic_t *var) +ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var) { - (void) InterlockedDecrement(&var->value); + (void) ETHR_ILCKD__(Decrement)(&var->value); } -static ETHR_INLINE long -ethr_native_atomic_inc_return(ethr_native_atomic_t *var) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var) { - return (long) InterlockedIncrement(&var->value); + return ETHR_ILCKD__(Increment)(&var->value); } -static ETHR_INLINE long -ethr_native_atomic_dec_return(ethr_native_atomic_t *var) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var) { - return (long) InterlockedDecrement(&var->value); + return ETHR_ILCKD__(Decrement)(&var->value); } -static ETHR_INLINE long -ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask) { - return (long) _InterlockedAnd(&var->value, mask); + return ETHR_ILCKD__(And)(&var->value, mask); } -static ETHR_INLINE long -ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask) { - return (long) _InterlockedOr(&var->value, mask); + return ETHR_ILCKD__(Or)(&var->value, mask); } - -static ETHR_INLINE long -ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var, + ETHR_AINT_T__ new, + ETHR_AINT_T__ old) { - return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old); + return ETHR_ILCKD__(CompareExchange)(&var->value, new, old); } -static ETHR_INLINE long -ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new) { - return (long) InterlockedExchange(&var->value, (LONG) new); + return ETHR_ILCKD__(Exchange)(&var->value, new); } /* * Atomic ops with at least specified barriers. */ -static ETHR_INLINE long -ethr_native_atomic_read_acqb(ethr_native_atomic_t *var) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var) { -#ifdef ETHR_HAVE_INTERLOCKEDEXCHANGEADDACQUIRE - return (long) InterlockedExchangeAddAcquire(&var->value, (LONG) 0); +#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ + ETHR_AINT_T__ val = var->value; + ETHR_COMPILER_BARRIER; + return val; #else - return (long) InterlockedExchangeAdd(&var->value, (LONG) 0); + return ETHR_ILCKD_ACQ__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0); #endif } -static ETHR_INLINE long -ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var) { -#ifdef ETHR_HAVE_INTERLOCKEDINCREMENTACQUIRE - return (long) InterlockedIncrementAcquire(&var->value); -#else - return (long) InterlockedIncrement(&var->value); -#endif + return ETHR_ILCKD_ACQ__(Increment)(&var->value); } static ETHR_INLINE void -ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i) +ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i) { - (void) InterlockedExchange(&var->value, (LONG) i); +#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ + ETHR_COMPILER_BARRIER; + var->value = i; +#else + (void) ETHR_ILCKD_REL__(Exchange)(&var->value, i); +#endif } static ETHR_INLINE void -ethr_native_atomic_dec_relb(ethr_native_atomic_t *var) +ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var) { -#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE - (void) InterlockedDecrementRelease(&var->value); -#else - (void) InterlockedDecrement(&var->value); -#endif + (void) ETHR_ILCKD_REL__(Decrement)(&var->value); } -static ETHR_INLINE long -ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var) { -#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE - return (long) InterlockedDecrementRelease(&var->value); -#else - return (long) InterlockedDecrement(&var->value); -#endif + return ETHR_ILCKD_REL__(Decrement)(&var->value); } -static ETHR_INLINE long -ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var, + ETHR_AINT_T__ new, + ETHR_AINT_T__ old) { -#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ - return (long) _InterlockedCompareExchange_acq(&var->value, (LONG) new, (LONG) old); -#else - return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old); -#endif + return ETHR_ILCKD_ACQ__(CompareExchange)(&var->value, new, old); } -static ETHR_INLINE long -ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old) +static ETHR_INLINE ETHR_AINT_T__ +ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var, + ETHR_AINT_T__ new, + ETHR_AINT_T__ old) { - -#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL - return (long) _InterlockedCompareExchange_rel(&var->value, (LONG) new, (LONG) old); -#else - return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old); -#endif + return ETHR_ILCKD_REL__(CompareExchange)(&var->value, new, old); } -#endif +#endif /* ETHR_TRY_INLINE_FUNCS */ -#endif +#undef ETHR_ILCKD__ +#undef ETHR_ILCKD_ACQ__ +#undef ETHR_ILCKD_REL__ +#undef ETHR_NATMC_FUNC__ +#undef ETHR_ATMC_T__ +#undef ETHR_AINT_T__ +#undef ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ +#undef ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ + +#endif /* _MSC_VER */ + +#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */ diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h index af57c20f91..598816b2c6 100644 --- a/erts/include/internal/win/ethr_event.h +++ b/erts/include/internal/win/ethr_event.h @@ -21,22 +21,24 @@ * Author: Rickard Green */ -#define ETHR_EVENT_OFF_WAITER__ ((LONG) -1) -#define ETHR_EVENT_OFF__ ((LONG) 1) -#define ETHR_EVENT_ON__ ((LONG) 0) +#define ETHR_EVENT_OFF_WAITER__ ((long) -1) +#define ETHR_EVENT_OFF__ ((long) 1) +#define ETHR_EVENT_ON__ ((long) 0) typedef struct { - volatile LONG state; + volatile long state; HANDLE handle; } ethr_event; #if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__) +#pragma intrinsic(_InterlockedExchange) + static ETHR_INLINE void ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e) { - /* InterlockedExchange() imply a full memory barrier which is important */ - LONG state = InterlockedExchange(&e->state, ETHR_EVENT_ON__); + /* _InterlockedExchange() imply a full memory barrier which is important */ + long state = _InterlockedExchange(&e->state, ETHR_EVENT_ON__); if (state == ETHR_EVENT_OFF_WAITER__) { if (!SetEvent(e->handle)) ETHR_FATAL_ERROR__(ethr_win_get_errno__()); @@ -46,7 +48,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e) static ETHR_INLINE void ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e) { - /* InterlockedExchange() imply a full memory barrier which is important */ + /* _InterlockedExchange() imply a full memory barrier which is important */ InterlockedExchange(&e->state, ETHR_EVENT_OFF__); } diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h index b52710f6a3..c01b17cf14 100644 --- a/erts/include/internal/win/ethread.h +++ b/erts/include/internal/win/ethread.h @@ -25,7 +25,11 @@ #ifndef ETHREAD_WIN_H__ #define ETHREAD_WIN_H__ +#define ETHR_ATOMIC_WANT_32BIT_IMPL__ #include "ethr_atomic.h" -#define ETHR_HAVE_NATIVE_ATOMICS 1 +#if ETHR_SIZEOF_PTR == 8 +# define ETHR_ATOMIC_WANT_64BIT_IMPL__ +# include "ethr_atomic.h" +#endif #endif |