diff options
author | Rickard Green <[email protected]> | 2011-01-02 10:03:54 +0100 |
---|---|---|
committer | Rickard Green <[email protected]> | 2011-06-14 11:40:19 +0200 |
commit | 7f19af0423934f85c74ccb75546e5e3a6b6d10e8 (patch) | |
tree | 612d1010f37517f813a94d8a5f38cfd0126ce3f8 /erts/include/internal/sparc32/ethr_membar.h | |
parent | 4a5a75811e2cd590b5c94f71864a5245fd511ccf (diff) | |
download | otp-7f19af0423934f85c74ccb75546e5e3a6b6d10e8.tar.gz otp-7f19af0423934f85c74ccb75546e5e3a6b6d10e8.tar.bz2 otp-7f19af0423934f85c74ccb75546e5e3a6b6d10e8.zip |
Improve ethread atomics
The ethread atomics API now also provide double word size atomics.
Double word size atomics are implemented using native atomic
instructions on x86 (when the cmpxchg8b instruction is available)
and on x86_64 (when the cmpxchg16b instruction is available). On
other hardware where 32-bit atomics or word size atomics are
available, an optimized fallback is used; otherwise, a spinlock,
or a mutex based fallback is used.
The ethread library now performs runtime tests for presence of
hardware features, such as for example SSE2 instructions, instead
of requiring this to be determined at compile time.
There are now functions implementing each atomic operation with the
following implied memory barrier semantics: none, read, write,
acquire, release, and full. Some of the operation-barrier
combinations aren't especially useful. But instead of filtering
useful ones out, and potentially miss a useful one, we implement
them all.
A much smaller set of functionality for native atomics are required
to be implemented than before. More or less only cmpxchg and a
membar macro are required to be implemented for each atomic size.
Other functions will automatically be constructed from these. It is,
of course, often wise to implement more that this if possible from a
performance perspective.
Diffstat (limited to 'erts/include/internal/sparc32/ethr_membar.h')
-rw-r--r-- | erts/include/internal/sparc32/ethr_membar.h | 115 |
1 files changed, 115 insertions, 0 deletions
diff --git a/erts/include/internal/sparc32/ethr_membar.h b/erts/include/internal/sparc32/ethr_membar.h new file mode 100644 index 0000000000..6eb0c5a1d6 --- /dev/null +++ b/erts/include/internal/sparc32/ethr_membar.h @@ -0,0 +1,115 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2011. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +/* + * Description: Memory barriers for sparc-v9 + * Author: Rickard Green + */ + +#ifndef ETHR_SPARC_V9_MEMBAR_H__ +#define ETHR_SPARC_V9_MEMBAR_H__ + +#if defined(ETHR_SPARC_TSO) +/* --- Total Store Order ------------------------------------------------ */ + +#define ETHR_LoadLoad 0 +#define ETHR_LoadStore 0 +#define ETHR_StoreLoad 1 +#define ETHR_StoreStore 0 + +static __inline__ void +ethr_cb__(void) +{ + __asm__ __volatile__ ("" : : : "memory"); +} + +static __inline__ void +ethr_StoreLoad__(void) +{ + __asm__ __volatile__ ("membar #StoreLoad\n\t" : : : "memory"); +} + + +#define ETHR_MEMBAR(B) \ + ETHR_CHOOSE_EXPR((B), ethr_StoreLoad__(), ethr_cb__()) + +#elif defined(ETHR_SPARC_PSO) +/* --- Partial Store Order ---------------------------------------------- */ + +#define ETHR_LoadLoad 0 +#define ETHR_LoadStore 0 +#define ETHR_StoreLoad (1 << 0) +#define ETHR_StoreStore (1 << 1) + +static __inline__ void +ethr_cb__(void) +{ + __asm__ __volatile__ ("" : : : "memory"); +} + +static __inline__ void +ethr_StoreLoad__(void) +{ + __asm__ __volatile__ ("membar #StoreLoad\n\t" : : : "memory"); +} + +static __inline__ void +ethr_StoreStore__(void) +{ + __asm__ __volatile__ ("membar #StoreStore\n\t" : : : "memory"); +} + +static __inline__ void +ethr_StoreLoad_StoreStore__(void) +{ + __asm__ __volatile__ ("membar #StoreLoad|StoreStore\n\t" : : : "memory"); +} + +#define ETHR_MEMBAR(B) \ + ETHR_CHOOSE_EXPR( \ + (B) == ETHR_StoreLoad, \ + ethr_StoreLoad__(), \ + ETHR_CHOOSE_EXPR( \ + (B) == ETHR_StoreStore, \ + ethr_StoreStore__(), \ + ETHR_CHOOSE_EXPR( \ + (B) == (ETHR_StoreLoad|ETHR_StoreStore), \ + ethr_StoreLoad_StoreStore__(), \ + ethr_cb__()))) + +#elif defined(ETHR_SPARC_RMO) +/* --- Relaxed Memory Order --------------------------------------------- */ + +# define ETHR_LoadLoad #LoadLoad +# define ETHR_LoadStore #LoadStore +# define ETHR_StoreLoad #StoreLoad +# define ETHR_StoreStore #StoreStore + +# define ETHR_MEMBAR_AUX__(B) \ + __asm__ __volatile__("membar " #B "\n\t" : : : "memory") + +# define ETHR_MEMBAR(B) ETHR_MEMBAR_AUX__(B) + +#else + +# error "No memory order defined" + +#endif + +#endif |