aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include
diff options
context:
space:
mode:
authorErlang/OTP <[email protected]>2009-11-20 14:54:40 +0000
committerErlang/OTP <[email protected]>2009-11-20 14:54:40 +0000
commit84adefa331c4159d432d22840663c38f155cd4c1 (patch)
treebff9a9c66adda4df2106dfd0e5c053ab182a12bd /erts/include
downloadotp-84adefa331c4159d432d22840663c38f155cd4c1.tar.gz
otp-84adefa331c4159d432d22840663c38f155cd4c1.tar.bz2
otp-84adefa331c4159d432d22840663c38f155cd4c1.zip
The R13B03 release.OTP_R13B03
Diffstat (limited to 'erts/include')
-rw-r--r--erts/include/erl_fixed_size_int_types.h160
-rw-r--r--erts/include/erl_int_sizes_config.h.in33
-rw-r--r--erts/include/erl_memory_trace_parser.h156
-rw-r--r--erts/include/internal/README28
-rw-r--r--erts/include/internal/erl_errno.h51
-rw-r--r--erts/include/internal/erl_memory_trace_protocol.h245
-rw-r--r--erts/include/internal/erl_misc_utils.h53
-rw-r--r--erts/include/internal/erl_printf.h57
-rw-r--r--erts/include/internal/erl_printf_format.h46
-rw-r--r--erts/include/internal/erts_internal.mk.in24
-rw-r--r--erts/include/internal/ethread.h1448
-rw-r--r--erts/include/internal/ethread.mk.in39
-rw-r--r--erts/include/internal/ethread_header_config.h.in55
-rw-r--r--erts/include/internal/i386/atomic.h155
-rw-r--r--erts/include/internal/i386/ethread.h34
-rw-r--r--erts/include/internal/i386/rwlock.h134
-rw-r--r--erts/include/internal/i386/spinlock.h92
-rw-r--r--erts/include/internal/ppc32/atomic.h209
-rw-r--r--erts/include/internal/ppc32/ethread.h34
-rw-r--r--erts/include/internal/ppc32/rwlock.h153
-rw-r--r--erts/include/internal/ppc32/spinlock.h93
-rw-r--r--erts/include/internal/sparc32/atomic.h173
-rw-r--r--erts/include/internal/sparc32/ethread.h34
-rw-r--r--erts/include/internal/sparc32/rwlock.h142
-rw-r--r--erts/include/internal/sparc32/spinlock.h81
-rw-r--r--erts/include/internal/sparc64/ethread.h20
-rw-r--r--erts/include/internal/tile/atomic.h128
-rw-r--r--erts/include/internal/tile/ethread.h30
-rw-r--r--erts/include/internal/x86_64/ethread.h20
29 files changed, 3927 insertions, 0 deletions
diff --git a/erts/include/erl_fixed_size_int_types.h b/erts/include/erl_fixed_size_int_types.h
new file mode 100644
index 0000000000..3bbc37aea7
--- /dev/null
+++ b/erts/include/erl_fixed_size_int_types.h
@@ -0,0 +1,160 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description:
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef FIXED_SIZE_INT_TYPES_H__
+#define FIXED_SIZE_INT_TYPES_H__
+
+#ifdef SIZEOF_CHAR
+# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
+# undef SIZEOF_CHAR
+#endif
+
+#ifdef SIZEOF_SHORT
+# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
+# undef SIZEOF_SHORT
+#endif
+
+#ifdef SIZEOF_INT
+# define SIZEOF_INT_SAVED__ SIZEOF_INT
+# undef SIZEOF_INT
+#endif
+
+#ifdef SIZEOF_LONG
+# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
+# undef SIZEOF_LONG
+#endif
+
+#ifdef SIZEOF_LONG_LONG
+# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
+# undef SIZEOF_LONG_LONG
+#endif
+
+#include "erl_int_sizes_config.h"
+
+#ifdef SIZEOF_CHAR_SAVED__
+# if SIZEOF_CHAR != SIZEOF_CHAR_SAVED__
+# error char type size mismatch
+# endif
+# undef SIZEOF_CHAR_SAVED__
+#endif
+
+#ifdef SIZEOF_SHORT_SAVED__
+# if SIZEOF_SHORT != SIZEOF_SHORT_SAVED__
+# error short type size mismatch
+# endif
+# undef SIZEOF_SHORT_SAVED__
+#endif
+
+#ifdef SIZEOF_INT_SAVED__
+# if SIZEOF_INT != SIZEOF_INT_SAVED__
+# error int type size mismatch
+# endif
+# undef SIZEOF_INT_SAVED__
+#endif
+
+#ifdef SIZEOF_LONG_SAVED__
+# if SIZEOF_LONG != SIZEOF_LONG_SAVED__
+# error long type size mismatch
+# endif
+# undef SIZEOF_LONG_SAVED__
+#endif
+
+#ifdef SIZEOF_LONG_LONG_SAVED__
+# if SIZEOF_LONG_LONG != SIZEOF_LONG_LONG_SAVED__
+# error long long type size mismatch
+# endif
+# undef SIZEOF_LONG_LONG_SAVED__
+#endif
+
+
+#if SIZEOF_LONG == 8
+#define HAVE_INT_64 1
+typedef unsigned long usgnd_int_64;
+typedef signed long sgnd_int_64;
+#define USGND_INT_64_FSTR "lu"
+#define SGND_INT_64_FSTR "ld"
+#elif SIZEOF_LONG_LONG == 8
+#define HAVE_INT_64 1
+typedef unsigned long long usgnd_int_64;
+typedef signed long long sgnd_int_64;
+#define USGND_INT_64_FSTR "llu"
+#define SGND_INT_64_FSTR "lld"
+#else
+#define HAVE_INT_64 0
+#endif
+
+#if SIZEOF_LONG == 4
+typedef unsigned long usgnd_int_32;
+typedef signed long sgnd_int_32;
+#define USGND_INT_32_FSTR "lu"
+#define SGND_INT_32_FSTR "ld"
+#elif SIZEOF_INT == 4
+typedef unsigned int usgnd_int_32;
+typedef signed int sgnd_int_32;
+#define USGND_INT_32_FSTR "u"
+#define SGND_INT_32_FSTR "d"
+#else
+#error Found no appropriate type to use for 'usgnd_int_32' and 'sgnd_int_32'
+#endif
+
+#if SIZEOF_INT == 2
+typedef unsigned int usgnd_int_16;
+typedef signed int sgnd_int_16;
+#define USGND_INT_16_FSTR "u"
+#define SI_16_FSTR "d"
+#elif SIZEOF_SHORT == 2
+typedef unsigned short usgnd_int_16;
+typedef signed short sgnd_int_16;
+#define USGND_INT_16_FSTR "u"
+#define SGND_INT_16_FSTR "d"
+#else
+#error Found no appropriate type to use for 'usgnd_int_16' and 'sgnd_int_16'
+#endif
+
+#if SIZEOF_CHAR == 1
+typedef unsigned char usgnd_int_8;
+typedef signed char sgnd_int_8;
+#define USGND_INT_8_FSTR "u"
+#define SGND_INT_8_FSTR "d"
+#else
+/* This should *never* happen! */
+#error Found no appropriate type to use for 'usgnd_int_8' and 'sgnd_int_8'
+#endif
+
+
+#if HAVE_INT_64
+typedef usgnd_int_64 usgnd_int_max;
+typedef sgnd_int_64 sgnd_int_max;
+#define USGND_INT_MAX_FSTR USGND_INT_64_FSTR
+#define SGND_INT_MAX_FSTR SGND_INT_64_FSTR
+#else
+typedef usgnd_int_32 usgnd_int_max;
+typedef sgnd_int_32 sgnd_int_max;
+#define USGND_INT_MAX_FSTR USGND_INT_32_FSTR
+#define SGND_INT_MAX_FSTR SGND_INT_32_FSTR
+#endif
+
+#endif /* #ifndef FIXED_SIZE_INT_TYPES_H__ */
diff --git a/erts/include/erl_int_sizes_config.h.in b/erts/include/erl_int_sizes_config.h.in
new file mode 100644
index 0000000000..ef49995732
--- /dev/null
+++ b/erts/include/erl_int_sizes_config.h.in
@@ -0,0 +1,33 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* The number of bytes in a char. */
+#undef SIZEOF_CHAR
+
+/* The number of bytes in a short. */
+#undef SIZEOF_SHORT
+
+/* The number of bytes in a int. */
+#undef SIZEOF_INT
+
+/* The number of bytes in a long. */
+#undef SIZEOF_LONG
+
+/* The number of bytes in a long long. */
+#undef SIZEOF_LONG_LONG
diff --git a/erts/include/erl_memory_trace_parser.h b/erts/include/erl_memory_trace_parser.h
new file mode 100644
index 0000000000..3b6f76d2fd
--- /dev/null
+++ b/erts/include/erl_memory_trace_parser.h
@@ -0,0 +1,156 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description:
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_MTRACE_PARSER_H__
+#define ERL_MTRACE_PARSER_H__
+
+#include <stdlib.h>
+#include "erl_fixed_size_int_types.h"
+
+/* emtp_parse() return values */
+#define EMTP_MIN_ERROR EMTP_NO_TRACE_ERROR
+
+#define EMTP_NO_TRACE_ERROR (-11)
+#define EMTP_HEADER_TAG_IN_BODY_ERROR (-10)
+#define EMTP_BODY_TAG_IN_HEADER_ERROR ( -9)
+#define EMTP_NOT_SUPPORTED_MTRACE_VERSION_ERROR ( -8)
+#define EMTP_NOT_AN_ERL_MTRACE_ERROR ( -7)
+#define EMTP_NO_MEMORY_ERROR ( -6)
+#define EMTP_BAD_OP_SIZE_ERROR ( -5)
+#define EMTP_NO_OPERATIONS_ERROR ( -4)
+#define EMTP_NOT_SUPPORTED_64_BITS_TRACE_ERROR ( -3)
+#define EMTP_PARSE_ERROR ( -2)
+#define EMTP_UNKNOWN_TAG_ERROR ( -1)
+#define EMTP_END_OF_TRACE ( 0)
+#define EMTP_END_OF_TRACE_GARBAGE_FOLLOWS ( 1)
+#define EMTP_ALL_OPS_FILLED ( 2)
+#define EMTP_NEED_MORE_TRACE ( 3)
+#define EMTP_HEADER_PARSED ( 4)
+
+/* Allocator flags */
+#define EMTP_ALLOCATOR_FLAG_HAVE_USED_CARRIERS_INFO (1 << 0)
+
+/* Block type flags */
+/* #define EMTP_BLOCK_TYPE_FLAG_X */
+
+
+typedef struct {
+ usgnd_int_32 major;
+ usgnd_int_32 minor;
+} emtp_version;
+
+typedef struct {
+ emtp_version parser;
+ emtp_version trace;
+} emtp_versions;
+
+typedef struct {
+ int valid;
+ usgnd_int_32 flags;
+ char * name;
+ struct {
+ usgnd_int_16 no_providers;
+ usgnd_int_16 * provider;
+ } carrier;
+} emtp_allocator;
+
+typedef struct {
+ int valid;
+ usgnd_int_32 flags;
+ char * name;
+ sgnd_int_32 allocator;
+} emtp_block_type;
+
+typedef struct {
+ emtp_versions version;
+ int bits;
+ char * nodename;
+ char * hostname;
+ char * pid;
+ struct {
+ usgnd_int_32 year;
+ usgnd_int_32 month;
+ usgnd_int_32 day;
+ usgnd_int_32 hour;
+ usgnd_int_32 minute;
+ usgnd_int_32 second;
+ usgnd_int_32 micro_second;
+ } start_time;
+ usgnd_int_16 segment_ix;
+ usgnd_int_16 max_allocator_ix;
+ emtp_allocator ** allocator;
+ usgnd_int_16 max_block_type_ix;
+ emtp_block_type ** block_type;
+ int have_carrier_info;
+ int have_segment_carrier_info;
+} emtp_info;
+
+typedef struct emtp_state_ emtp_state;
+
+enum emtp_op_type_ {
+ EMTP_UNDEF = 0,
+ EMTP_ALLOC = 1,
+ EMTP_REALLOC = 2,
+ EMTP_FREE = 3,
+ EMTP_CARRIER_ALLOC = 4,
+ EMTP_CARRIER_REALLOC = 5,
+ EMTP_CARRIER_FREE = 6,
+ EMTP_STOP = 7,
+ EMTP_EXIT = 8
+};
+
+typedef enum emtp_op_type_ emtp_op_type;
+
+typedef struct {
+ usgnd_int_16 type;
+ usgnd_int_16 carrier_type;
+ usgnd_int_max new_ptr;
+ usgnd_int_max prev_ptr;
+ usgnd_int_max new_size;
+} emtp_block_op;
+
+typedef struct {
+ emtp_op_type type;
+ struct {
+ usgnd_int_32 secs;
+ usgnd_int_32 usecs;
+ } time;
+ union {
+ emtp_block_op block;
+ usgnd_int_32 exit_status;
+ } u;
+} emtp_operation;
+
+const char *emtp_error_string(int);
+int emtp_get_info(emtp_info *ip, size_t *isz, emtp_state *sp);
+emtp_state *emtp_state_new(void * (*alloc)(size_t),
+ void * (*realloc)(void *, size_t),
+ void (*free)(void *));
+void emtp_state_destroy(emtp_state *sp);
+int emtp_parse(emtp_state *sp,
+ usgnd_int_8 **tracepp, size_t *trace_lenp,
+ emtp_operation *op_start, size_t op_size, size_t *op_lenp);
+#endif
diff --git a/erts/include/internal/README b/erts/include/internal/README
new file mode 100644
index 0000000000..f7b78a3468
--- /dev/null
+++ b/erts/include/internal/README
@@ -0,0 +1,28 @@
+
+ %CopyrightBegin%
+
+ Copyright Ericsson AB 2004-2009. All Rights Reserved.
+
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ %CopyrightEnd%
+
+------------------------------------------------------------------------
+------------------------------------------------------------------------
+
+ Files in this directory are *not* for public use and should *only*
+ be used by Erlang/OTP applications. The content of this directory
+ and the interfaces present in this directory may be changed at any
+ time without prior notice.
+
+------------------------------------------------------------------------
+------------------------------------------------------------------------
diff --git a/erts/include/internal/erl_errno.h b/erts/include/internal/erl_errno.h
new file mode 100644
index 0000000000..2e095e9f64
--- /dev/null
+++ b/erts/include/internal/erl_errno.h
@@ -0,0 +1,51 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_ERRNO_H__
+#define ERL_ERRNO_H__
+
+#include <errno.h>
+/*
+ * Make sure that ENOTSUP is defined.
+ */
+#ifndef ENOTSUP
+# ifdef EOPNOTSUPP
+# define ENOTSUP EOPNOTSUPP
+#else
+# define ENOTSUP INT_MAX
+# endif
+#endif
+
+#ifdef __WIN32__
+# ifndef EWOULDBLOCK
+# define EWOULDBLOCK (10035) /* WSAEWOULDBLOCK */
+# endif
+# ifndef ETIMEDOUT
+# define ETIMEDOUT (10060) /* WSAETIMEDOUT */
+# endif
+#else
+# ifndef EWOULDBLOCK
+# define EWOULDBLOCK EAGAIN
+# endif
+# ifndef ETIMEDOUT
+# define ETIMEDOUT EAGAIN
+# endif
+#endif
+
+#endif
diff --git a/erts/include/internal/erl_memory_trace_protocol.h b/erts/include/internal/erl_memory_trace_protocol.h
new file mode 100644
index 0000000000..bda1f65c87
--- /dev/null
+++ b/erts/include/internal/erl_memory_trace_protocol.h
@@ -0,0 +1,245 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description:
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_MEMORY_TRACE_PROTOCOL_H__
+#define ERL_MEMORY_TRACE_PROTOCOL_H__
+
+/*
+ * Increase ERTS_MT_MAJOR_VSN and set ERTS_MT_MINOR_VSN to 0
+ * when backward incompatible changes are made in the protocol.
+ *
+ * Increase ERTS_MT_MINOR_VSN when backward compatible changes are
+ * made in the protocol.
+ */
+#define ERTS_MT_MAJOR_VSN (2)
+#define ERTS_MT_MINOR_VSN (0)
+
+/* Trace flags */
+
+#define ERTS_MT_64_BIT_FLAG (1 << 0)
+#define ERTS_MT_CRR_INFO (1 << 1)
+#define ERTS_MT_SEG_CRR_INFO (1 << 2)
+
+/* Header flags */
+/* Allocator flags */
+
+#define ERTS_MT_ALLCTR_USD_CRR_INFO (1 << 0)
+
+/* Block type flags */
+
+
+
+/* Entry tags */
+
+#define ERTS_MT_V1_ALLOCATOR_TAG (1)
+#define ERTS_MT_V1_BLOCK_TYPE_TAG (2)
+#define ERTS_MT_V1_ALLOC_TAG (3)
+#define ERTS_MT_V1_REALLOC_NPB_TAG (4)
+#define ERTS_MT_V1_REALLOC_MV_TAG (5)
+#define ERTS_MT_V1_REALLOC_NMV_TAG (6)
+#define ERTS_MT_V1_FREE_TAG (7)
+#define ERTS_MT_V1_TIME_INC_TAG (8)
+#define ERTS_MT_V1_STOP_TAG (9)
+#define ERTS_MT_V1_EXIT_TAG (10)
+
+#define ERTS_MT_END_OF_HDR_TAG (0)
+#define ERTS_MT_ALLOCATOR_HDR_TAG (1)
+#define ERTS_MT_BLOCK_TYPE_HDR_TAG (2)
+
+#define ERTS_MT_EXIT_BDY_TAG (0)
+#define ERTS_MT_STOP_BDY_TAG (1)
+#define ERTS_MT_ALLOC_BDY_TAG (2)
+#define ERTS_MT_REALLOC_BDY_TAG (3)
+#define ERTS_MT_FREE_BDY_TAG (4)
+#define ERTS_MT_CRR_ALLOC_BDY_TAG (5)
+#define ERTS_MT_CRR_REALLOC_BDY_TAG (6)
+#define ERTS_MT_CRR_FREE_BDY_TAG (7)
+#define ERTS_MT_TIME_INC_BDY_TAG (8)
+#define ERTS_MT_X_BDY_TAG (9)
+
+/* X subtags */
+#if 0
+#define ERTS_MT_X_ _BDY_TAG (0)
+#endif
+
+#define ERTS_MT_START_WORD (0xfff04711)
+/* Entry header fields */
+
+#define ERTS_MT_UI8_MSB_EHDR_FLD_SZ (0)
+#define ERTS_MT_UI16_MSB_EHDR_FLD_SZ (1)
+#define ERTS_MT_UI32_MSB_EHDR_FLD_SZ (2)
+#define ERTS_MT_UI64_MSB_EHDR_FLD_SZ (3)
+#define ERTS_MT_UI_MSB_EHDR_FLD_SZ ERTS_MT_UI64_MSB_EHDR_FLD_SZ
+#define ERTS_MT_TAG_EHDR_FLD_SZ (4)
+
+#define ERTS_MT_UI8_MSB_EHDR_FLD_MSK ((1 << ERTS_MT_UI8_MSB_EHDR_FLD_SZ)-1)
+#define ERTS_MT_UI16_MSB_EHDR_FLD_MSK ((1 << ERTS_MT_UI16_MSB_EHDR_FLD_SZ)-1)
+#define ERTS_MT_UI32_MSB_EHDR_FLD_MSK ((1 << ERTS_MT_UI32_MSB_EHDR_FLD_SZ)-1)
+#define ERTS_MT_UI64_MSB_EHDR_FLD_MSK ((1 << ERTS_MT_UI64_MSB_EHDR_FLD_SZ)-1)
+#define ERTS_MT_UI_MSB_EHDR_FLD_MSK ERTS_MT_UI64_MSB_EHDR_FLD_MSK
+#define ERTS_MT_TAG_EHDR_FLD_MSK ((1 << ERTS_MT_TAG_EHDR_FLD_SZ)-1)
+
+/* Time increment word */
+#define ERTS_MT_TIME_INC_SECS_SHIFT 20
+#define ERTS_MT_TIME_INC_USECS_SHIFT 0
+
+#define ERTS_MT_TIME_INC_SECS_MASK ((1 << 12) - 1)
+#define ERTS_MT_TIME_INC_USECS_MASK ((1 << 20) - 1)
+
+
+#define ERTS_MT_MAX_V1_HEADER_ENTRY_SIZE (2 + 2 + 1 + 255 + 2)
+/* Largest v1 header entry is block type entry (ERTS_MT_V1_BLOCK_TYPE_TAG) */
+#define ERTS_MT_MAX_V1_BODY_ENTRY_SIZE (2 + 8 + 8 + 8 + 4)
+/* Largest body entry is realloc moved entry (ERTS_MT_V1_REALLOC_MV_TAG) */
+
+
+#define ERTS_MT_MAX_HEADER_ENTRY_SIZE (1 + 2 + 2 + 1 + 255 + 2)
+/* Largest header entry is block type entry (ERTS_MT_BLOCK_TYPE_TAG) */
+#define ERTS_MT_MAX_BODY_ENTRY_SIZE ERTS_MT_MAX_CRR_REALLOC_SIZE
+/* Largest body entry is carrier realloc entry (ERTS_MT_CRR_REALLOC_BDY_TAG) */
+
+/*
+ *
+ * Entry header:
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ... |MSB2|MSB1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Time inc entry field:
+ *
+ * 31 23 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Seconds | Micro Seconds |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+#define ERTS_MT_MAX_CRR_ALLOC_SIZE (1 + 2 + 2 + 2 + 8 + 8 + 4)
+
+/*
+ * ERTS_MT_CRR_ALLOC_BDY_TAG:
+ * N 1 2 3 4 5
+ * MSB 1-0 1-0 7|3-0 7|3-0 3-0
+ * SZ 1 2 2-1 2-1 8|4-1 8|4-1 4-1
+ * UIT UI8 UI16 UI16 UI16 UI64|UI32 UI64|UI32 UI32
+ * +---+----+...--+...--+...-------+...-------+...-------+
+ * |Tag| Hdr|CType| Type| Out ptr | In size | Time inc |
+ * +---+----+...--+...--+...-------+...-------+...-------+
+ *
+ */
+
+#define ERTS_MT_MAX_ALLOC_SIZE (1 + 2 + 2 + 8 + 8 + 4)
+/*
+ * ERTS_MT_ALLOC_BDY_TAG:
+ * N 1 2 3 4
+ * MSB 1-0 7|3-0 7|3-0 3-0
+ * SZ 1 2 2-1 8|4-1 8|4-1 4-1
+ * UIT UI8 UI16 UI16 UI64|UI32 UI64|UI32 UI32
+ * +---+----+...--+...-------+...-------+...-------+
+ * |Tag| Hdr| Type| Out ptr | In size | Time inc |
+ * +---+----+...--+...-------+...-------+...-------+
+ *
+ */
+
+#define ERTS_MT_MAX_CRR_REALLOC_SIZE (1 + 2 + 2 + 2 + 8 + 8 + 8 + 4)
+/*
+ * ERTS_MT_CRR_REALLOC_BDY_TAG:
+ * N 1 2 3 4 5 6
+ * MSB 1-0 1-0 7|3-0 7|3-0 7|3-0 3-0
+ * SZ 1 2 2-1 2-1 8|4-1 8|4-1 8|4-1 4-1
+ * UIT UI8 UI16 UI16 UI16 UI64|UI32 UI64|UI32 UI64|UI32 UI32
+ * +---+----+...--+...--+...-------+...-------+...-------+...-------+
+ * |Tag| Hdr|CType| Type| Out ptr | In ptr | In size | Time inc |
+ * +---+----+...--+...--+...-------+...-------+...-------+...-------+
+ *
+ */
+
+#define ERTS_MT_MAX_REALLOC_SIZE (1 + 2 + 2 + 8 + 8 + 8 + 4)
+/*
+ * ERTS_MT_REALLOC_BDY_TAG:
+ * N 1 2 3 4 5
+ * MSB 1-0 7|3-0 7|3-0 7|3-0 3-0
+ * SZ 1 2 2-1 8|4-1 8|4-1 8|4-1 4-1
+ * UIT UI8 UI16 UI16 UI64|UI32 UI64|UI32 UI64|UI32 UI32
+ * +---+----+...--+...-------+...-------+...-------+...-------+
+ * |Tag| Hdr| Type| Out ptr | In ptr | In size | Time inc |
+ * +---+----+...--+...-------+...-------+...-------+...-------+
+ *
+ */
+
+#define ERTS_MT_MAX_CRR_FREE_SIZE (1 + 2 + 2 + 2 + 8 + 4)
+/*
+ * ERTS_MT_CRR_FREE_BDY_TAG:
+ * N 1 2 3 4
+ * MSB 1-0 1-0 7|3-0 3-0
+ * SZ 1 2 2-1 2-1 8|4-1 4-1
+ * UIT UI8 UI16 UI16 UI16 UI64|UI32 UI32
+ * +---+----+...--+...--+...-------+...-------+
+ * |Tag| Hdr|CType| Type| In ptr | Time inc |
+ * +---+----+...--+...--+...-------+...-------+
+ *
+ */
+
+#define ERTS_MT_MAX_FREE_SIZE (1 + 2 + 2 + 8 + 4)
+/*
+ * ERTS_MT_FREE_BDY_TAG:
+ * N 1 2 3
+ * MSB 1-0 7|3-0 3-0
+ * SZ 1 2 2-1 8|4-1 4-1
+ * UIT UI8 UI16 UI16 UI64|UI32 UI32
+ * +---+----+...--+...-------+...-------+
+ * |Tag| Hdr| Type| In ptr | Time inc |
+ * +---+----+...--+...-------+...-------+
+ *
+ */
+
+/*
+ * ERTS_MT_X_BDY_TAG:
+ * N
+ * MSB
+ * SZ 1 2 1
+ * UIT UI8 UI16 UI8
+ * +---+-----+------+... ...+
+ * |Tag|TotSz|SubTag| |
+ * +---+-----+------+... ...+
+ *
+ * ^ ^
+ * | |
+ * +------ TotSz bytes -----+
+ *
+ * X for extension
+ *
+ * * Tag equals ERTS_MT_X_BDY_TAG.
+ * * TotSz contains the total size of the entry.
+ * * SubTag is used to distinguish between different sub entries
+ * passed in X entries.
+ *
+ */
+
+
+
+#endif /* #ifndef ERL_MEMORY_TRACE_PROTOCOL_H__ */
+
diff --git a/erts/include/internal/erl_misc_utils.h b/erts/include/internal/erl_misc_utils.h
new file mode 100644
index 0000000000..82e9ba3798
--- /dev/null
+++ b/erts/include/internal/erl_misc_utils.h
@@ -0,0 +1,53 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_MISC_UTILS_H_
+#define ERL_MISC_UTILS_H_
+
+#include "erl_errno.h"
+
+typedef struct erts_cpu_info_t_ erts_cpu_info_t;
+typedef struct {
+ int node;
+ int processor;
+ int processor_node;
+ int core;
+ int thread;
+ int logical;
+} erts_cpu_topology_t;
+
+erts_cpu_info_t *erts_cpu_info_create(void);
+void erts_cpu_info_destroy(erts_cpu_info_t *cpuinfo);
+void erts_cpu_info_update(erts_cpu_info_t *cpuinfo);
+int erts_get_cpu_configured(erts_cpu_info_t *cpuinfo);
+int erts_get_cpu_online(erts_cpu_info_t *cpuinfo);
+int erts_get_cpu_available(erts_cpu_info_t *cpuinfo);
+char *erts_get_unbind_from_cpu_str(erts_cpu_info_t *cpuinfo);
+int erts_get_available_cpu(erts_cpu_info_t *cpuinfo, int no);
+int erts_get_cpu_topology_size(erts_cpu_info_t *cpuinfo);
+int erts_get_cpu_topology(erts_cpu_info_t *cpuinfo,
+ erts_cpu_topology_t *topology);
+int erts_is_cpu_available(erts_cpu_info_t *cpuinfo, int id);
+int erts_bind_to_cpu(erts_cpu_info_t *cpuinfo, int cpu);
+int erts_unbind_from_cpu(erts_cpu_info_t *cpuinfo);
+int erts_unbind_from_cpu_str(char *str);
+
+int erts_milli_sleep(long);
+
+#endif /* #ifndef ERL_MISC_UTILS_H_ */
diff --git a/erts/include/internal/erl_printf.h b/erts/include/internal/erl_printf.h
new file mode 100644
index 0000000000..5bc93a979b
--- /dev/null
+++ b/erts/include/internal/erl_printf.h
@@ -0,0 +1,57 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_PRINTF_H_
+#define ERL_PRINTF_H_
+#include <stdio.h>
+#include <stdarg.h>
+
+extern int (*erts_printf_stdout_func)(char *, va_list);
+extern int (*erts_printf_stderr_func)(char *, va_list);
+extern int erts_printf_add_cr_to_stdout;
+extern int erts_printf_add_cr_to_stderr;
+extern int (*erts_printf_block_fpe)(void);
+extern void (*erts_printf_unblock_fpe)(int);
+
+typedef struct erts_dsprintf_buf_t_ erts_dsprintf_buf_t;
+
+struct erts_dsprintf_buf_t_ {
+ char *str;
+ size_t str_len;
+ size_t size;
+ erts_dsprintf_buf_t *(*grow)(erts_dsprintf_buf_t *, size_t);
+};
+
+#define ERTS_DSPRINTF_BUF_INITER(GFUNC) {NULL, 0, 0, (GFUNC)}
+
+int erts_printf(const char *, ...);
+int erts_fprintf(FILE *, const char *, ...);
+int erts_fdprintf(int, const char *, ...);
+int erts_sprintf(char *, const char *, ...);
+int erts_snprintf(char *, size_t, const char *, ...);
+int erts_dsprintf(erts_dsprintf_buf_t *, const char *, ...);
+
+int erts_vprintf(const char *, va_list);
+int erts_vfprintf(FILE *, const char *, va_list);
+int erts_vfdprintf(int, const char *, va_list);
+int erts_vsprintf(char *, const char *, va_list);
+int erts_vsnprintf(char *, size_t, const char *, va_list);
+int erts_vdsprintf(erts_dsprintf_buf_t *, const char *, va_list);
+
+#endif /* #ifndef ERL_PRINTF_H_ */
diff --git a/erts/include/internal/erl_printf_format.h b/erts/include/internal/erl_printf_format.h
new file mode 100644
index 0000000000..45818079ea
--- /dev/null
+++ b/erts/include/internal/erl_printf_format.h
@@ -0,0 +1,46 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_PRINTF_FORMAT_H__
+#define ERL_PRINTF_FORMAT_H__
+
+#ifdef VXWORKS
+#include <vxWorks.h>
+#endif
+
+#include <sys/types.h>
+#include <stdarg.h>
+#include <stdlib.h>
+
+typedef int (*fmtfn_t)(void*, char*, size_t);
+
+extern int erts_printf_format(fmtfn_t, void*, char*, va_list);
+
+extern int erts_printf_char(fmtfn_t, void*, char);
+extern int erts_printf_string(fmtfn_t, void*, char *);
+extern int erts_printf_buf(fmtfn_t, void*, char *, size_t);
+extern int erts_printf_pointer(fmtfn_t, void*, void *);
+extern int erts_printf_ulong(fmtfn_t, void*, char, int, int, unsigned long);
+extern int erts_printf_slong(fmtfn_t, void*, char, int, int, signed long);
+extern int erts_printf_double(fmtfn_t, void *, char, int, int, double);
+
+extern int (*erts_printf_eterm_func)(fmtfn_t, void*, unsigned long, long);
+
+#endif
+
diff --git a/erts/include/internal/erts_internal.mk.in b/erts/include/internal/erts_internal.mk.in
new file mode 100644
index 0000000000..489531372c
--- /dev/null
+++ b/erts/include/internal/erts_internal.mk.in
@@ -0,0 +1,24 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+# ----------------------------------------------------------------------
+
+ERTS_INTERNAL_X_LIBS=@ERTS_INTERNAL_X_LIBS@
+
+# ----------------------------------------------------------------------
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
new file mode 100644
index 0000000000..934a79c6f9
--- /dev/null
+++ b/erts/include/internal/ethread.h
@@ -0,0 +1,1448 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Thread library for use in the ERTS and other OTP
+ * applications.
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_H__
+#define ETHREAD_H__
+
+#ifndef ETHR_HAVE_ETHREAD_DEFINES
+# include "ethread_header_config.h"
+#endif
+
+#include <stdlib.h>
+#include "erl_errno.h"
+
+/*
+ * Extra memory barrier requirements:
+ * - ethr_atomic_or_old() needs to enforce a memory barrier sufficient
+ * for a lock operation.
+ * - ethr_atomic_and_old() needs to enforce a memory barrier sufficient
+ * for an unlock operation.
+ * - ethr_atomic_cmpxchg() needs to enforce a memory barrier sufficient
+ * for a lock and unlock operation.
+ */
+
+
+#undef ETHR_USE_RWMTX_FALLBACK
+#undef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+#undef ETHR_HAVE_OPTIMIZED_LOCKS
+
+typedef struct {
+ long tv_sec;
+ long tv_nsec;
+} ethr_timeval;
+
+#if defined(DEBUG)
+# undef ETHR_XCHK
+# define ETHR_XCHK 1
+#else
+# ifndef ETHR_XCHK
+# define ETHR_XCHK 0
+# endif
+#endif
+
+#undef ETHR_INLINE
+#if defined(__GNUC__)
+# define ETHR_INLINE __inline__
+#elif defined(__WIN32__)
+# define ETHR_INLINE __forceinline
+#endif
+#if defined(DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
+ || (defined(__GNUC__) && defined(ERTS_MIXED_CYGWIN_VC))
+# undef ETHR_INLINE
+# define ETHR_INLINE
+# undef ETHR_TRY_INLINE_FUNCS
+#endif
+#ifdef ETHR_FORCE_INLINE_FUNCS
+# define ETHR_TRY_INLINE_FUNCS
+#endif
+
+#if !defined(ETHR_DISABLE_NATIVE_IMPLS) \
+ && (defined(PURIFY) || defined(VALGRIND) || defined(ERTS_MIXED_CYGWIN_VC))
+# define ETHR_DISABLE_NATIVE_IMPLS
+#endif
+
+#define ETHR_RWMUTEX_INITIALIZED 0x99999999
+#define ETHR_MUTEX_INITIALIZED 0x77777777
+#define ETHR_COND_INITIALIZED 0x55555555
+
+#define ETHR_CACHE_LINE_SIZE 64
+
+#ifdef ETHR_INLINE_FUNC_NAME_
+# define ETHR_CUSTOM_INLINE_FUNC_NAME_
+#else
+# define ETHR_INLINE_FUNC_NAME_(X) X
+#endif
+
+#define ETHR_COMPILER_BARRIER ethr_compiler_barrier()
+#ifdef __GNUC__
+# undef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER __asm__ __volatile__("":::"memory")
+#endif
+
+#ifdef DEBUG
+#define ETHR_ASSERT(A) \
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
+int ethr_assert_failed(char *f, int l, char *a);
+#else
+#define ETHR_ASSERT(A) ((void) 1)
+#endif
+
+#if defined(ETHR_PTHREADS)
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * The pthread implementation *
+\* */
+
+#if defined(__linux__) && !defined(_GNU_SOURCE)
+#error "_GNU_SOURCE not defined. Please, compile all files with -D_GNU_SOURCE."
+#endif
+
+#if defined(ETHR_HAVE_MIT_PTHREAD_H)
+#include <pthread/mit/pthread.h>
+#elif defined(ETHR_HAVE_PTHREAD_H)
+#include <pthread.h>
+#endif
+
+/* Types */
+
+typedef pthread_t ethr_tid;
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ pthread_mutex_t pt_mtx;
+ int is_rec_mtx;
+ ethr_mutex *prev;
+ ethr_mutex *next;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+ pthread_cond_t pt_cnd;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#ifndef ETHR_HAVE_PTHREAD_RWLOCK_INIT
+#define ETHR_USE_RWMTX_FALLBACK
+#else
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ pthread_rwlock_t pt_rwlock;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+#endif
+
+/* Static initializers */
+#if ETHR_XCHK
+#define ETHR_MUTEX_XCHK_INITER , ETHR_MUTEX_INITIALIZED
+#define ETHR_COND_XCHK_INITER , ETHR_COND_INITIALIZED
+#else
+#define ETHR_MUTEX_XCHK_INITER
+#define ETHR_COND_XCHK_INITER
+#endif
+
+#define ETHR_MUTEX_INITER {PTHREAD_MUTEX_INITIALIZER, 0, NULL, NULL ETHR_MUTEX_XCHK_INITER}
+#define ETHR_COND_INITER {PTHREAD_COND_INITIALIZER ETHR_COND_XCHK_INITER}
+
+#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE) \
+ || defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
+# define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
+# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+# define ETHR_REC_MUTEX_INITER \
+ {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, 1, NULL, NULL ETHR_MUTEX_XCHK_INITER}
+# endif
+#else
+# undef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+#endif
+
+#ifndef ETHR_HAVE_PTHREAD_ATFORK
+# define ETHR_NO_FORKSAFETY 1
+#endif
+
+typedef pthread_key_t ethr_tsd_key;
+
+#define ETHR_HAVE_ETHR_SIG_FUNCS 1
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ return pthread_mutex_trylock(&mtx->pt_mtx);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ return pthread_mutex_lock(&mtx->pt_mtx);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ return pthread_mutex_unlock(&mtx->pt_mtx);
+}
+
+#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
+{
+ return pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
+{
+ return pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
+{
+ return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
+{
+ return pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
+{
+ return pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
+{
+ return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+}
+
+#endif /* ETHR_HAVE_PTHREAD_RWLOCK_INIT */
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#elif defined(ETHR_WIN32_THREADS)
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * The native win32 threads implementation *
+\* */
+
+#if !defined(_WIN32_WINNT)
+#error "_WIN32_WINNT not defined. Please, compile all files with -D_WIN32_WINNT=0x0403"
+#elif _WIN32_WINNT < 0x0403
+#error "_WIN32_WINNT defined to a value less than 0x0403. Please, compile all files with -D_WIN32_WINNT=0x0403"
+#endif
+
+#ifdef WIN32_LEAN_AND_MEAN
+# define ETHR_WIN32_LEAN_AND_MEAN_ALREADY_DEFINED
+#else
+# define WIN32_LEAN_AND_MEAN
+#endif
+#include <windows.h>
+#ifndef ETHR_WIN32_LEAN_AND_MEAN_ALREADY_DEFINED
+# undef WIN32_LEAN_AND_MEAN
+#endif
+
+/* Types */
+typedef long ethr_tid; /* thread id type */
+typedef struct {
+ volatile int initialized;
+ CRITICAL_SECTION cs;
+#if ETHR_XCHK
+ int is_rec_mtx;
+#endif
+} ethr_mutex;
+
+typedef struct cnd_wait_event__ cnd_wait_event_;
+
+typedef struct {
+ volatile int initialized;
+ CRITICAL_SECTION cs;
+ cnd_wait_event_ *queue;
+ cnd_wait_event_ *queue_end;
+} ethr_cond;
+
+#define ETHR_USE_RWMTX_FALLBACK
+
+/* Static initializers */
+
+#define ETHR_MUTEX_INITER {0}
+#define ETHR_COND_INITER {0}
+
+#define ETHR_REC_MUTEX_INITER ETHR_MUTEX_INITER
+
+#define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
+
+typedef DWORD ethr_tsd_key;
+
+#undef ETHR_HAVE_ETHR_SIG_FUNCS
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+int ethr_fake_static_mutex_init(ethr_mutex *mtx);
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ if (!mtx->initialized) {
+ int res = ethr_fake_static_mutex_init(mtx);
+ if (res != 0)
+ return res;
+ }
+ return TryEnterCriticalSection(&mtx->cs) ? 0 : EBUSY;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ if (!mtx->initialized) {
+ int res = ethr_fake_static_mutex_init(mtx);
+ if (res != 0)
+ return res;
+ }
+ EnterCriticalSection(&mtx->cs);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ LeaveCriticalSection(&mtx->cs);
+ return 0;
+}
+
+#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
+
+#ifdef ERTS_MIXED_CYGWIN_VC
+
+/* atomics */
+
+#ifdef _MSC_VER
+# if _MSC_VER < 1300
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
+# else
+# if defined(_M_IX86)
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else /* I.e. IA64 */
+# if _MSC_VER >= 1400
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+# endif
+# endif
+# endif
+# if _MSC_VER >= 1400
+# include <intrin.h>
+# undef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+#endif
+
+#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
+#define ETHR_HAVE_OPTIMIZED_LOCKS 1
+
+typedef struct {
+ volatile LONG value;
+} ethr_atomic_t;
+
+typedef struct {
+ volatile LONG locked;
+} ethr_spinlock_t;
+
+typedef struct {
+ volatile LONG counter;
+} ethr_rwlock_t;
+#define ETHR_WLOCK_FLAG__ (((LONG) 1) << 30)
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ var->value = (LONG) i;
+#else
+ (void) InterlockedExchange(&var->value, (LONG) i);
+#endif
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ var->value = (LONG) i;
+#else
+ (void) InterlockedExchange(&var->value, (LONG) i);
+#endif
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ *i = var->value;
+#else
+ *i = InterlockedExchangeAdd(&var->value, (LONG) 0);
+#endif
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
+{
+ (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
+ long i,
+ long *testp)
+{
+ *testp = InterlockedExchangeAdd(&var->value, (LONG) i);
+ *testp += i;
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
+{
+ (void) InterlockedIncrement(&var->value);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
+{
+ (void) InterlockedDecrement(&var->value);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
+{
+ *testp = (long) InterlockedIncrement(&var->value);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
+{
+ *testp = (long) InterlockedDecrement(&var->value);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
+ long mask,
+ long *old)
+{
+ /*
+ * See "Extra memory barrier requirements" note at the top
+ * of the file.
+ *
+ * According to msdn _InterlockedAnd() provides a full
+ * memory barrier.
+ */
+ *old = (long) _InterlockedAnd(&var->value, mask);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
+ long mask,
+ long *old)
+{
+ /*
+ * See "Extra memory barrier requirements" note at the top
+ * of the file.
+ *
+ * According to msdn _InterlockedOr() provides a full
+ * memory barrier.
+ */
+ *old = (long) _InterlockedOr(&var->value, mask);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
+ long new,
+ long expected,
+ long *old)
+{
+ /*
+ * See "Extra memory barrier requirements" note at the top
+ * of the file.
+ *
+ * According to msdn _InterlockedCompareExchange() provides a full
+ * memory barrier.
+ */
+ *old = _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) expected);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
+ long new,
+ long *old)
+{
+ *old = (long) InterlockedExchange(&var->value, (LONG) new);
+ return 0;
+}
+
+/*
+ * According to msdn InterlockedExchange() provides a full
+ * memory barrier.
+ */
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ lock->locked = (LONG) 0;
+#else
+ (void) InterlockedExchange(&lock->locked, (LONG) 0);
+#endif
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
+{
+ return 0;
+}
+
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ {
+#ifdef DEBUG
+ LONG old =
+#endif
+ InterlockedExchange(&lock->locked, (LONG) 0);
+#ifdef DEBUG
+ ETHR_ASSERT(old == 1);
+#endif
+ }
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
+{
+ LONG old;
+ do {
+ old = InterlockedExchange(&lock->locked, (LONG) 1);
+ } while (old != (LONG) 0);
+ ETHR_COMPILER_BARRIER;
+ return 0;
+}
+
+/*
+ * According to msdn InterlockedIncrement, InterlockedDecrement,
+ * and InterlockedExchangeAdd(), _InterlockedAnd, and _InterlockedOr
+ * provides full memory barriers.
+ */
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ lock->counter = (LONG) 0;
+#else
+ (void) InterlockedExchange(&lock->counter, (LONG) 0);
+#endif
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
+{
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ {
+#ifdef DEBUG
+ LONG old =
+#endif
+ InterlockedDecrement(&lock->counter);
+ ETHR_ASSERT(old != 0);
+ }
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
+{
+ while (1) {
+ LONG old = InterlockedIncrement(&lock->counter);
+ if ((old & ETHR_WLOCK_FLAG__) == 0)
+ break; /* Got read lock */
+ /* Restore and wait for writers to unlock */
+ old = InterlockedDecrement(&lock->counter);
+ while (old & ETHR_WLOCK_FLAG__) {
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ old = lock->counter;
+#else
+ old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
+#endif
+ }
+ }
+ ETHR_COMPILER_BARRIER;
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ {
+#ifdef DEBUG
+ LONG old =
+#endif
+ _InterlockedAnd(&lock->counter, ~ETHR_WLOCK_FLAG__);
+ ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
+ }
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
+{
+ LONG old;
+ do {
+ old = _InterlockedOr(&lock->counter, ETHR_WLOCK_FLAG__);
+ } while (old & ETHR_WLOCK_FLAG__);
+ /* We got the write part of the lock; wait for readers to unlock */
+ while ((old & ~ETHR_WLOCK_FLAG__) != 0) {
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ old = lock->counter;
+#else
+ old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
+#endif
+ ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
+ }
+ ETHR_COMPILER_BARRIER;
+ return 0;
+}
+
+#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
+
+#endif /* #ifdef ERTS_MIXED_CYGWIN_VC */
+
+#else /* No supported thread lib found */
+
+#ifdef ETHR_NO_SUPP_THR_LIB_NOT_FATAL
+#define ETHR_NO_THREAD_LIB
+#else
+#error "No supported thread lib found"
+#endif
+
+#endif
+
+/* __builtin_expect() is needed by both native atomics code
+ * and the fallback code */
+#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
+#define __builtin_expect(X, Y) (X)
+#endif
+
+/* For CPU-optimised atomics, spinlocks, and rwlocks. */
+#if !defined(ETHR_DISABLE_NATIVE_IMPLS) && defined(__GNUC__)
+# if ETHR_SIZEOF_PTR == 4
+# if defined(__i386__)
+# include "i386/ethread.h"
+# elif (defined(__powerpc__) || defined(__ppc__)) && !defined(__powerpc64__)
+# include "ppc32/ethread.h"
+# elif defined(__sparc__)
+# include "sparc32/ethread.h"
+# elif defined(__tile__)
+# include "tile/ethread.h"
+# endif
+# elif ETHR_SIZEOF_PTR == 8
+# if defined(__x86_64__)
+# include "x86_64/ethread.h"
+# elif defined(__sparc__) && defined(__arch64__)
+# include "sparc64/ethread.h"
+# endif
+# endif
+#endif /* !defined(ETHR_DISABLE_NATIVE_IMPLS) && defined(__GNUC__) */
+
+#ifdef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+# undef ETHR_HAVE_NATIVE_ATOMICS
+#endif
+#ifdef ETHR_HAVE_OPTIMIZED_LOCKS
+# undef ETHR_HAVE_NATIVE_LOCKS
+#endif
+
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
+#endif
+#ifdef ETHR_HAVE_NATIVE_LOCKS
+#define ETHR_HAVE_OPTIMIZED_LOCKS 1
+#endif
+
+typedef struct {
+ unsigned open;
+ ethr_mutex mtx;
+ ethr_cond cnd;
+} ethr_gate;
+
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * Map ethread native atomics to ethread API atomics.
+ */
+typedef ethr_native_atomic_t ethr_atomic_t;
+#endif
+
+#ifdef ETHR_HAVE_NATIVE_LOCKS
+/*
+ * Map ethread native spinlocks to ethread API spinlocks.
+ */
+typedef ethr_native_spinlock_t ethr_spinlock_t;
+/*
+ * Map ethread native rwlocks to ethread API rwlocks.
+ */
+typedef ethr_native_rwlock_t ethr_rwlock_t;
+#endif
+
+#ifdef ETHR_USE_RWMTX_FALLBACK
+typedef struct {
+ ethr_mutex mtx;
+ ethr_cond rcnd;
+ ethr_cond wcnd;
+ unsigned readers;
+ unsigned waiting_readers;
+ unsigned waiting_writers;
+#if ETHR_XCHK
+ int initialized;
+#endif
+} ethr_rwmutex;
+#endif
+
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+typedef long ethr_atomic_t;
+#endif
+
+#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
+
+#if defined(ETHR_WIN32_THREADS)
+typedef struct {
+ CRITICAL_SECTION cs;
+} ethr_spinlock_t;
+typedef struct {
+ CRITICAL_SECTION cs;
+ unsigned counter;
+} ethr_rwlock_t;
+
+int ethr_do_spinlock_init(ethr_spinlock_t *lock);
+int ethr_do_rwlock_init(ethr_rwlock_t *lock);
+
+#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+
+#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+typedef struct {
+ pthread_spinlock_t spnlck;
+} ethr_spinlock_t;
+typedef struct {
+ pthread_spinlock_t spnlck;
+ unsigned counter;
+} ethr_rwlock_t;
+#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+
+#else /* ethr mutex/rwmutex */
+
+typedef struct {
+ ethr_mutex mtx;
+} ethr_spinlock_t;
+
+typedef struct {
+ ethr_rwmutex rwmtx;
+} ethr_rwlock_t;
+
+#endif /* end mutex/rwmutex */
+#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
+
+typedef struct {
+ void *(*alloc)(size_t);
+ void *(*realloc)(void *, size_t);
+ void (*free)(void *);
+ void *(*thread_create_prepare_func)(void);
+ void (*thread_create_parent_func)(void *);
+ void (*thread_create_child_func)(void *);
+} ethr_init_data;
+
+#define ETHR_INIT_DATA_DEFAULT_INITER {malloc, realloc, free, NULL, NULL, NULL}
+
+typedef struct {
+ int detached; /* boolean (default false) */
+ int suggested_stack_size; /* kilo words (default sys dependent) */
+} ethr_thr_opts;
+
+#define ETHR_THR_OPTS_DEFAULT_INITER {0, -1}
+
+#if defined(ETHR_CUSTOM_INLINE_FUNC_NAME_) || !defined(ETHR_TRY_INLINE_FUNCS)
+# define ETHR_NEED_MTX_PROTOTYPES__
+# define ETHR_NEED_RWMTX_PROTOTYPES__
+# define ETHR_NEED_SPINLOCK_PROTOTYPES__
+# define ETHR_NEED_ATOMIC_PROTOTYPES__
+#endif
+
+#if !defined(ETHR_NEED_RWMTX_PROTOTYPES__) && defined(ETHR_USE_RWMTX_FALLBACK)
+# define ETHR_NEED_RWMTX_PROTOTYPES__
+#endif
+
+int ethr_init(ethr_init_data *);
+int ethr_install_exit_handler(void (*funcp)(void));
+int ethr_thr_create(ethr_tid *, void * (*)(void *), void *, ethr_thr_opts *);
+int ethr_thr_join(ethr_tid, void **);
+int ethr_thr_detach(ethr_tid);
+void ethr_thr_exit(void *);
+ethr_tid ethr_self(void);
+int ethr_equal_tids(ethr_tid, ethr_tid);
+int ethr_mutex_init(ethr_mutex *);
+#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+int ethr_rec_mutex_init(ethr_mutex *);
+#endif
+int ethr_mutex_destroy(ethr_mutex *);
+int ethr_mutex_set_forksafe(ethr_mutex *);
+int ethr_mutex_unset_forksafe(ethr_mutex *);
+#ifdef ETHR_NEED_MTX_PROTOTYPES__
+int ethr_mutex_trylock(ethr_mutex *);
+int ethr_mutex_lock(ethr_mutex *);
+int ethr_mutex_unlock(ethr_mutex *);
+#endif
+int ethr_cond_init(ethr_cond *);
+int ethr_cond_destroy(ethr_cond *);
+int ethr_cond_signal(ethr_cond *);
+int ethr_cond_broadcast(ethr_cond *);
+int ethr_cond_wait(ethr_cond *, ethr_mutex *);
+int ethr_cond_timedwait(ethr_cond *, ethr_mutex *, ethr_timeval *);
+
+int ethr_rwmutex_init(ethr_rwmutex *);
+int ethr_rwmutex_destroy(ethr_rwmutex *);
+#ifdef ETHR_NEED_RWMTX_PROTOTYPES__
+int ethr_rwmutex_tryrlock(ethr_rwmutex *);
+int ethr_rwmutex_rlock(ethr_rwmutex *);
+int ethr_rwmutex_runlock(ethr_rwmutex *);
+int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
+int ethr_rwmutex_rwlock(ethr_rwmutex *);
+int ethr_rwmutex_rwunlock(ethr_rwmutex *);
+#endif
+
+#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
+int ethr_atomic_init(ethr_atomic_t *, long);
+int ethr_atomic_set(ethr_atomic_t *, long);
+int ethr_atomic_read(ethr_atomic_t *, long *);
+int ethr_atomic_inctest(ethr_atomic_t *, long *);
+int ethr_atomic_dectest(ethr_atomic_t *, long *);
+int ethr_atomic_inc(ethr_atomic_t *);
+int ethr_atomic_dec(ethr_atomic_t *);
+int ethr_atomic_addtest(ethr_atomic_t *, long, long *);
+int ethr_atomic_add(ethr_atomic_t *, long);
+int ethr_atomic_and_old(ethr_atomic_t *, long, long *);
+int ethr_atomic_or_old(ethr_atomic_t *, long, long *);
+int ethr_atomic_xchg(ethr_atomic_t *, long, long *);
+int ethr_atomic_cmpxchg(ethr_atomic_t *, long, long, long *);
+#endif
+
+#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
+int ethr_spinlock_init(ethr_spinlock_t *);
+int ethr_spinlock_destroy(ethr_spinlock_t *);
+int ethr_spin_unlock(ethr_spinlock_t *);
+int ethr_spin_lock(ethr_spinlock_t *);
+
+int ethr_rwlock_init(ethr_rwlock_t *);
+int ethr_rwlock_destroy(ethr_rwlock_t *);
+int ethr_read_unlock(ethr_rwlock_t *);
+int ethr_read_lock(ethr_rwlock_t *);
+int ethr_write_unlock(ethr_rwlock_t *);
+int ethr_write_lock(ethr_rwlock_t *);
+#endif
+
+int ethr_time_now(ethr_timeval *);
+int ethr_tsd_key_create(ethr_tsd_key *);
+int ethr_tsd_key_delete(ethr_tsd_key);
+int ethr_tsd_set(ethr_tsd_key, void *);
+void *ethr_tsd_get(ethr_tsd_key);
+
+int ethr_gate_init(ethr_gate *);
+int ethr_gate_destroy(ethr_gate *);
+int ethr_gate_close(ethr_gate *);
+int ethr_gate_let_through(ethr_gate *, unsigned);
+int ethr_gate_wait(ethr_gate *);
+int ethr_gate_swait(ethr_gate *, int);
+
+#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
+#include <signal.h>
+int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset);
+int ethr_sigwait(const sigset_t *set, int *sig);
+#endif
+
+void ethr_compiler_barrier(void);
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
+{
+ ethr_native_atomic_init(var, i);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
+{
+ ethr_native_atomic_set(var, i);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
+{
+ *i = ethr_native_atomic_read(var);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
+{
+ ethr_native_atomic_add(var, incr);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
+ long i,
+ long *testp)
+{
+ *testp = ethr_native_atomic_add_return(var, i);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
+{
+ ethr_native_atomic_inc(var);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
+{
+ ethr_native_atomic_dec(var);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
+{
+ *testp = ethr_native_atomic_inc_return(var);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
+{
+ *testp = ethr_native_atomic_dec_return(var);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
+ long mask,
+ long *old)
+{
+ /*
+ * See "Extra memory barrier requirements" note at the top
+ * of the file.
+ */
+ *old = ethr_native_atomic_and_retold(var, mask);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
+ long mask,
+ long *old)
+{
+ /*
+ * See "Extra memory barrier requirements" note at the top
+ * of the file.
+ */
+ *old = ethr_native_atomic_or_retold(var, mask);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
+ long new,
+ long *old)
+{
+ *old = ethr_native_atomic_xchg(var, new);
+ return 0;
+}
+
+/*
+ * If *var == *old, replace *old with new, else do nothing.
+ * In any case return the original value of *var in *old.
+ */
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
+ long new,
+ long expected,
+ long *old)
+{
+ /*
+ * See "Extra memory barrier requirements" note at the top
+ * of the file.
+ */
+ *old = ethr_native_atomic_cmpxchg(var, new, expected);
+ return 0;
+}
+
+#endif /* ETHR_HAVE_NATIVE_ATOMICS */
+
+#ifdef ETHR_HAVE_NATIVE_LOCKS
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
+{
+ ethr_native_spinlock_init(lock);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
+{
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
+{
+ ethr_native_spin_unlock(lock);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
+{
+ ethr_native_spin_lock(lock);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
+{
+ ethr_native_rwlock_init(lock);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
+{
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
+{
+ ethr_native_read_unlock(lock);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
+{
+ ethr_native_read_lock(lock);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
+{
+ ethr_native_write_unlock(lock);
+ return 0;
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
+{
+ ethr_native_write_lock(lock);
+ return 0;
+}
+
+#endif /* ETHR_HAVE_NATIVE_LOCKS */
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+/*
+ * Fallbacks for atomics used in absence of optimized implementation.
+ */
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+
+#define ETHR_ATOMIC_ADDR_BITS 4
+#define ETHR_ATOMIC_ADDR_SHIFT 3
+
+typedef struct {
+ union {
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ pthread_spinlock_t spnlck;
+#else
+ ethr_mutex mtx;
+#endif
+ char buf[ETHR_CACHE_LINE_SIZE];
+ } u;
+} ethr_atomic_protection_t;
+
+extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+
+
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+
+#define ETHR_ATOMIC_PTR2LCK__(PTR) \
+(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.spnlck)
+
+
+#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
+do { \
+ pthread_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ int res__ = pthread_spin_lock(slp__); \
+ if (res__ != 0) \
+ return res__; \
+ { EXPS; } \
+ return pthread_spin_unlock(slp__); \
+} while (0)
+
+#else /* ethread mutex */
+
+#define ETHR_ATOMIC_PTR2LCK__(PTR) \
+(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.mtx)
+
+#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
+do { \
+ ethr_mutex *mtxp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ int res__ = ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(mtxp__); \
+ if (res__ != 0) \
+ return res__; \
+ { EXPS; } \
+ return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(mtxp__); \
+} while (0)
+
+#endif /* end ethread mutex */
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *i = (long) *var);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *incp, long *testp)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *testp = (long) ++(*incp));
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *decp, long *testp)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, *testp = (long) --(*decp));
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *incp,
+ long i,
+ long *testp)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *incp += i; *testp = *incp);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *incp)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, ++(*incp));
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *decp)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, --(*decp));
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
+ long mask,
+ long *old)
+{
+ /*
+ * See "Extra memory barrier requirements" note at the top
+ * of the file.
+ */
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var &= mask);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
+ long mask,
+ long *old)
+{
+ /*
+ * See "Extra memory barrier requirements" note at the top
+ * of the file.
+ */
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var |= mask);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
+ long new,
+ long *old)
+{
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var = new);
+}
+
+/*
+ * If *var == *old, replace *old with new, else do nothing.
+ * In any case return the original value of *var in *old.
+ */
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
+ long new,
+ long expected,
+ long *old)
+{
+ /*
+ * See "Extra memory barrier requirements" note at the top
+ * of the file.
+ */
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(
+ var,
+ long old_val = *var;
+ *old = old_val;
+ if (__builtin_expect(old_val == expected, 1))
+ *var = new;
+ );
+ return 0;
+}
+
+#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
+#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
+
+/*
+ * Fallbacks for spin locks, and rw spin locks used in absence of
+ * optimized implementation.
+ */
+#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ return pthread_spin_init(&lock->spnlck, 0);
+#else
+ return ethr_mutex_init(&lock->mtx);
+#endif
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ return pthread_spin_destroy(&lock->spnlck);
+#else
+ return ethr_mutex_destroy(&lock->mtx);
+#endif
+}
+
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ return pthread_spin_unlock(&lock->spnlck);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(&lock->mtx);
+#endif
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ return pthread_spin_lock(&lock->spnlck);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(&lock->mtx);
+#endif
+}
+
+#ifdef ETHR_USE_RWMTX_FALLBACK
+#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) X
+#else
+#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) ETHR_INLINE_FUNC_NAME_(X)
+#endif
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ lock->counter = 0;
+ return pthread_spin_init(&lock->spnlck, 0);
+#else
+ return ethr_rwmutex_init(&lock->rwmtx);
+#endif
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ return pthread_spin_destroy(&lock->spnlck);
+#else
+ return ethr_rwmutex_destroy(&lock->rwmtx);
+#endif
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ int res = pthread_spin_lock(&lock->spnlck);
+ if (res != 0)
+ return res;
+ lock->counter--;
+ return pthread_spin_unlock(&lock->spnlck);
+#else
+ return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_runlock)(&lock->rwmtx);
+#endif
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ int locked = 0;
+ do {
+ int res = pthread_spin_lock(&lock->spnlck);
+ if (res != 0)
+ return res;
+ if ((lock->counter & ETHR_RWLOCK_WRITERS) == 0) {
+ lock->counter++;
+ locked = 1;
+ }
+ res = pthread_spin_unlock(&lock->spnlck);
+ if (res != 0)
+ return res;
+ } while (!locked);
+ return 0;
+#else
+ return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rlock)(&lock->rwmtx);
+#endif
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ lock->counter = 0;
+ return pthread_spin_unlock(&lock->spnlck);
+#else
+ return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwunlock)(&lock->rwmtx);
+#endif
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
+{
+#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+ while (1) {
+ int res = pthread_spin_lock(&lock->spnlck);
+ if (res != 0)
+ return res;
+ lock->counter |= ETHR_RWLOCK_WRITERS;
+ if (lock->counter == ETHR_RWLOCK_WRITERS)
+ return 0;
+ res = pthread_spin_unlock(&lock->spnlck);
+ if (res != 0)
+ return res;
+ }
+#else
+ return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwlock)(&lock->rwmtx);
+#endif
+}
+
+#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
+
+#if defined(ETHR_HAVE_OPTIMIZED_LOCKS) || defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+# define ETHR_HAVE_OPTIMIZED_SPINLOCK
+#endif
+
+#endif /* #ifndef ETHREAD_H__ */
diff --git a/erts/include/internal/ethread.mk.in b/erts/include/internal/ethread.mk.in
new file mode 100644
index 0000000000..13071711e1
--- /dev/null
+++ b/erts/include/internal/ethread.mk.in
@@ -0,0 +1,39 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2004-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+# ----------------------------------------------------------------------
+
+
+# Name of the library where the ethread implementation is located
+ETHR_LIB_NAME=@ETHR_LIB_NAME@
+
+# Command-line defines to use when compiling
+ETHR_DEFS=@ETHR_DEFS@
+
+# Libraries to link with when linking
+ETHR_LIBS=@ETHR_LIBS@
+
+# Extra libraries to link with. The same as ETHR_LIBS except that the
+# ethread library itself is not included.
+ETHR_X_LIBS=@ETHR_X_LIBS@
+
+# The name of the thread library which the ethread library is based on.
+ETHR_THR_LIB_BASE=@ETHR_THR_LIB_BASE@
+
+# ----------------------------------------------------------------------
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
new file mode 100644
index 0000000000..e5b4946a53
--- /dev/null
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -0,0 +1,55 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Define to the size of pointers */
+#undef ETHR_SIZEOF_PTR
+
+/* Define if you want to disable native ethread implementations */
+#undef ETHR_DISABLE_NATIVE_IMPLS
+
+/* Define if you have win32 threads */
+#undef ETHR_WIN32_THREADS
+
+/* Define if you have pthreads */
+#undef ETHR_PTHREADS
+
+/* Define if you have the <pthread.h> header file. */
+#undef ETHR_HAVE_PTHREAD_H
+
+/* Define if the pthread.h header file is in pthread/mit directory. */
+#undef ETHR_HAVE_MIT_PTHREAD_H
+
+/* Define if you have the pthread_mutexattr_settype function. */
+#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE
+
+/* Define if you have the pthread_mutexattr_setkind_np function. */
+#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP
+
+/* Define if you have the pthread_atfork function. */
+#undef ETHR_HAVE_PTHREAD_ATFORK
+
+/* Define if you have the pthread_spin_lock function. */
+#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
+
+/* Define if you have a pthread_rwlock implementation that can be used */
+#undef ETHR_HAVE_PTHREAD_RWLOCK_INIT
+
+/* Define if you want to turn on extra sanity checking in the ethread library */
+#undef ETHR_XCHK
+
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
new file mode 100644
index 0000000000..3291ad38e5
--- /dev/null
+++ b/erts/include/internal/i386/atomic.h
@@ -0,0 +1,155 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread atomics on x86/x86-64.
+ * Author: Mikael Pettersson.
+ *
+ * This code requires a 486 or newer processor.
+ */
+#ifndef ETHREAD_I386_ATOMIC_H
+#define ETHREAD_I386_ATOMIC_H
+
+/* An atomic is an aligned long accessed via locked operations.
+ */
+typedef struct {
+ volatile long counter;
+} ethr_native_atomic_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+#ifdef __x86_64__
+#define LONG_SUFFIX "q"
+#else
+#define LONG_SUFFIX "l"
+#endif
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+{
+ var->counter = i;
+}
+#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+ return var->counter;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ __asm__ __volatile__(
+ "lock; add" LONG_SUFFIX " %1, %0"
+ : "=m"(var->counter)
+ : "ir"(incr), "m"(var->counter));
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__(
+ "lock; inc" LONG_SUFFIX " %0"
+ : "=m"(var->counter)
+ : "m"(var->counter));
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__(
+ "lock; dec" LONG_SUFFIX " %0"
+ : "=m"(var->counter)
+ : "m"(var->counter));
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+{
+ long tmp;
+
+ tmp = incr;
+ __asm__ __volatile__(
+ "lock; xadd" LONG_SUFFIX " %0, %1" /* xadd didn't exist prior to the 486 */
+ : "=r"(tmp)
+ : "m"(var->counter), "0"(tmp));
+ /* now tmp is the atomic's previous value */
+ return tmp + incr;
+}
+
+#define ethr_native_atomic_inc_return(var) ethr_native_atomic_add_return((var), 1)
+#define ethr_native_atomic_dec_return(var) ethr_native_atomic_add_return((var), -1)
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+{
+ __asm__ __volatile__(
+ "lock; cmpxchg" LONG_SUFFIX " %2, %3"
+ : "=a"(old), "=m"(var->counter)
+ : "r"(new), "m"(var->counter), "0"(old)
+ : "cc", "memory"); /* full memory clobber to make this a compiler barrier */
+ return old;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ long tmp, old;
+
+ tmp = var->counter;
+ do {
+ old = tmp;
+ tmp = ethr_native_atomic_cmpxchg(var, tmp & mask, tmp);
+ } while (__builtin_expect(tmp != old, 0));
+ /* now tmp is the atomic's previous value */
+ return tmp;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ long tmp, old;
+
+ tmp = var->counter;
+ do {
+ old = tmp;
+ tmp = ethr_native_atomic_cmpxchg(var, tmp | mask, tmp);
+ } while (__builtin_expect(tmp != old, 0));
+ /* now tmp is the atomic's previous value */
+ return tmp;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+{
+ long tmp = val;
+ __asm__ __volatile__(
+ "xchg" LONG_SUFFIX " %0, %1"
+ : "=r"(tmp)
+ : "m"(var->counter), "0"(tmp));
+ /* now tmp is the atomic's previous value */
+ return tmp;
+}
+
+#undef LONG_SUFFIX
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_I386_ATOMIC_H */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
new file mode 100644
index 0000000000..fad8b108fa
--- /dev/null
+++ b/erts/include/internal/i386/ethread.h
@@ -0,0 +1,34 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Low-level ethread support on x86/x86-64.
+ * Author: Mikael Pettersson.
+ */
+#ifndef ETHREAD_I386_ETHREAD_H
+#define ETHREAD_I386_ETHREAD_H
+
+#include "atomic.h"
+#include "spinlock.h"
+#include "rwlock.h"
+
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+#define ETHR_HAVE_NATIVE_LOCKS 1
+
+#endif /* ETHREAD_I386_ETHREAD_H */
diff --git a/erts/include/internal/i386/rwlock.h b/erts/include/internal/i386/rwlock.h
new file mode 100644
index 0000000000..c009be8ef1
--- /dev/null
+++ b/erts/include/internal/i386/rwlock.h
@@ -0,0 +1,134 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread rwlocks on x86/x86-64.
+ * Author: Mikael Pettersson.
+ *
+ * This code requires a 486 or newer processor.
+ */
+#ifndef ETHREAD_I386_RWLOCK_H
+#define ETHREAD_I386_RWLOCK_H
+
+/* XXX: describe the algorithm */
+typedef struct {
+ volatile int lock;
+} ethr_native_rwlock_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+#define ETHR_RWLOCK_OFFSET (1<<24)
+
+static ETHR_INLINE void
+ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
+{
+ lock->lock = 0;
+}
+
+static ETHR_INLINE void
+ethr_native_read_unlock(ethr_native_rwlock_t *lock)
+{
+ __asm__ __volatile__(
+ "lock; decl %0"
+ : "=m"(lock->lock)
+ : "m"(lock->lock));
+}
+
+static ETHR_INLINE int
+ethr_native_read_trylock(ethr_native_rwlock_t *lock)
+{
+ int tmp;
+
+ tmp = 1;
+ __asm__ __volatile__(
+ "lock; xaddl %0, %1"
+ : "=r"(tmp)
+ : "m"(lock->lock), "0"(tmp));
+ /* tmp is now the lock's previous value */
+ if (__builtin_expect(tmp >= 0, 1))
+ return 1;
+ ethr_native_read_unlock(lock);
+ return 0;
+}
+
+static ETHR_INLINE int
+ethr_native_read_is_locked(ethr_native_rwlock_t *lock)
+{
+ return lock->lock < 0;
+}
+
+static ETHR_INLINE void
+ethr_native_read_lock(ethr_native_rwlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_read_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("rep;nop" : "=m"(lock->lock) : : "memory");
+ } while (ethr_native_read_is_locked(lock));
+ }
+}
+
+static ETHR_INLINE void
+ethr_native_write_unlock(ethr_native_rwlock_t *lock)
+{
+ __asm__ __volatile__(
+ "lock; addl %2,%0"
+ : "=m"(lock->lock)
+ : "m"(lock->lock), "i"(ETHR_RWLOCK_OFFSET));
+}
+
+static ETHR_INLINE int
+ethr_native_write_trylock(ethr_native_rwlock_t *lock)
+{
+ int tmp;
+
+ tmp = -ETHR_RWLOCK_OFFSET;
+ __asm__ __volatile__(
+ "lock; xaddl %0, %1"
+ : "=r"(tmp)
+ : "m"(lock->lock), "0"(tmp));
+ /* tmp is now the lock's previous value */
+ if (__builtin_expect(tmp == 0, 1))
+ return 1;
+ ethr_native_write_unlock(lock);
+ return 0;
+}
+
+static ETHR_INLINE int
+ethr_native_write_is_locked(ethr_native_rwlock_t *lock)
+{
+ return lock->lock != 0;
+}
+
+static ETHR_INLINE void
+ethr_native_write_lock(ethr_native_rwlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_write_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("rep;nop" : "=m"(lock->lock) : : "memory");
+ } while (ethr_native_write_is_locked(lock));
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_I386_RWLOCK_H */
diff --git a/erts/include/internal/i386/spinlock.h b/erts/include/internal/i386/spinlock.h
new file mode 100644
index 0000000000..2b4832e26a
--- /dev/null
+++ b/erts/include/internal/i386/spinlock.h
@@ -0,0 +1,92 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread spinlocks on x86/x86-64.
+ * Author: Mikael Pettersson.
+ */
+#ifndef ETHREAD_I386_SPINLOCK_H
+#define ETHREAD_I386_SPINLOCK_H
+
+/* A spinlock is the low byte of an aligned 32-bit integer.
+ * A non-zero value means that the lock is locked.
+ */
+typedef struct {
+ volatile unsigned int lock;
+} ethr_native_spinlock_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
+{
+ lock->lock = 0;
+}
+
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
+{
+ /* To unlock we move 0 to the lock.
+ * On i386 this needs to be a locked operation
+ * to avoid Pentium Pro errata 66 and 92.
+ */
+#if defined(__x86_64__)
+ __asm__ __volatile__("" : : : "memory");
+ *(unsigned char*)&lock->lock = 0;
+#else
+ char tmp = 0;
+ __asm__ __volatile__(
+ "xchgb %b0, %1"
+ : "=q"(tmp), "=m"(lock->lock)
+ : "0"(tmp) : "memory");
+#endif
+}
+
+static ETHR_INLINE int
+ethr_native_spin_trylock(ethr_native_spinlock_t *lock)
+{
+ char tmp = 1;
+ __asm__ __volatile__(
+ "xchgb %b0, %1"
+ : "=q"(tmp), "=m"(lock->lock)
+ : "0"(tmp) : "memory");
+ return tmp == 0;
+}
+
+static ETHR_INLINE int
+ethr_native_spin_is_locked(ethr_native_spinlock_t *lock)
+{
+ return *(volatile unsigned char*)&lock->lock != 0;
+}
+
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_spin_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("rep;nop" : "=m"(lock->lock) : : "memory");
+ } while (ethr_native_spin_is_locked(lock));
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_I386_SPINLOCK_H */
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
new file mode 100644
index 0000000000..fa701c6a92
--- /dev/null
+++ b/erts/include/internal/ppc32/atomic.h
@@ -0,0 +1,209 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread atomics on PowerPC.
+ * Author: Mikael Pettersson.
+ *
+ * Based on the examples in Appendix E of Motorola's
+ * "Programming Environments Manual For 32-Bit Implementations
+ * of the PowerPC Architecture".
+ */
+#ifndef ETHREAD_PPC_ATOMIC_H
+#define ETHREAD_PPC_ATOMIC_H
+
+typedef struct {
+ volatile int counter;
+} ethr_native_atomic_t;
+
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
+{
+ var->counter = i;
+}
+#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
+
+static ETHR_INLINE int
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+ return var->counter;
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "add %0,%2,%0\n\t"
+ "stwcx. %0,0,%1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(tmp)
+ : "r"(&var->counter), "r"(incr)
+ : "cc", "memory");
+ return tmp;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, int incr)
+{
+ /* XXX: could use weaker version here w/o eieio+isync */
+ (void)ethr_native_atomic_add_return(var, incr);
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "addic %0,%0,1\n\t" /* due to addi's (rA|0) behaviour */
+ "stwcx. %0,0,%1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(tmp)
+ : "r"(&var->counter)
+ : "cc", "memory");
+ return tmp;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ /* XXX: could use weaker version here w/o eieio+isync */
+ (void)ethr_native_atomic_inc_return(var);
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "addic %0,%0,-1\n\t"
+ "stwcx. %0,0,%1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(tmp)
+ : "r"(&var->counter)
+ : "cc", "memory");
+ return tmp;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ /* XXX: could use weaker version here w/o eieio+isync */
+ (void)ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
+{
+ int old, new;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%2\n\t"
+ "and %1,%0,%3\n\t"
+ "stwcx. %1,0,%2\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(old), "=&r"(new)
+ : "r"(&var->counter), "r"(mask)
+ : "cc", "memory");
+ return old;
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
+{
+ int old, new;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%2\n\t"
+ "or %1,%0,%3\n\t"
+ "stwcx. %1,0,%2\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(old), "=&r"(new)
+ : "r"(&var->counter), "r"(mask)
+ : "cc", "memory");
+ return old;
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "stwcx. %2,0,%1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(tmp)
+ : "r"(&var->counter), "r"(val)
+ : "cc", "memory");
+ return tmp;
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
+{
+ int old;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%2\n\t"
+ "cmpw 0,%0,%3\n\t"
+ "bne 2f\n\t"
+ "stwcx. %1,0,%2\n\t"
+ "bne- 1b\n\t"
+ "isync\n"
+ "2:"
+ : "=&r"(old)
+ : "r"(new), "r"(&var->counter), "r"(expected)
+ : "cc", "memory");
+
+ return old;
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_PPC_ATOMIC_H */
diff --git a/erts/include/internal/ppc32/ethread.h b/erts/include/internal/ppc32/ethread.h
new file mode 100644
index 0000000000..d2a72c3dc1
--- /dev/null
+++ b/erts/include/internal/ppc32/ethread.h
@@ -0,0 +1,34 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Low-level ethread support on PowerPC.
+ * Author: Mikael Pettersson.
+ */
+#ifndef ETHREAD_PPC32_ETHREAD_H
+#define ETHREAD_PPC32_ETHREAD_H
+
+#include "atomic.h"
+#include "spinlock.h"
+#include "rwlock.h"
+
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+#define ETHR_HAVE_NATIVE_LOCKS 1
+
+#endif /* ETHREAD_PPC32_ETHREAD_H */
diff --git a/erts/include/internal/ppc32/rwlock.h b/erts/include/internal/ppc32/rwlock.h
new file mode 100644
index 0000000000..9bdab12826
--- /dev/null
+++ b/erts/include/internal/ppc32/rwlock.h
@@ -0,0 +1,153 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread rwlocks on PowerPC.
+ * Author: Mikael Pettersson.
+ *
+ * Based on the examples in Appendix E of Motorola's
+ * "Programming Environments Manual For 32-Bit Implementations
+ * of the PowerPC Architecture". Uses eieio instead of sync
+ * in the unlock sequence, as suggested in the manual.
+ */
+#ifndef ETHREAD_PPC_RWLOCK_H
+#define ETHREAD_PPC_RWLOCK_H
+
+/* Unlocked if zero, read-locked if negative, write-locked if +1. */
+typedef struct {
+ volatile int lock;
+} ethr_native_rwlock_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
+{
+ lock->lock = 0;
+}
+
+static ETHR_INLINE void
+ethr_native_read_unlock(ethr_native_rwlock_t *lock)
+{
+ int tmp;
+
+ /* this is eieio + ethr_native_atomic_inc() - isync */
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "addic %0,%0,1\n\t"
+ "stwcx. %0,0,%1\n\t"
+ "bne- 1b"
+ : "=&r"(tmp)
+ : "r"(&lock->lock)
+ : "cr0", "memory");
+}
+
+static ETHR_INLINE int
+ethr_native_read_trylock(ethr_native_rwlock_t *lock)
+{
+ int counter;
+
+ __asm__ __volatile__(
+ "1:\t"
+ "lwarx %0,0,%1\n\t" /* read lock to counter */
+ "addic. %0,%0,-1\n\t" /* decrement counter */
+ "bge- 2f\n\t" /* bail if >= 0 (write-locked) */
+ "stwcx. %0,0,%1\n\t" /* try to store decremented counter */
+ "bne- 1b\n\t" /* loop if lost reservation */
+ "isync\n\t" /* wait for previous insns to complete */
+ "2:"
+ : "=&r"(counter)
+ : "r"(&lock->lock)
+ : "cr0", "memory"
+#if __GNUC__ > 2
+ ,"xer"
+#endif
+ );
+ return counter < 0;
+}
+
+static ETHR_INLINE int
+ethr_native_read_is_locked(ethr_native_rwlock_t *lock)
+{
+ return lock->lock > 0;
+}
+
+static ETHR_INLINE void
+ethr_native_read_lock(ethr_native_rwlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_read_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("":::"memory");
+ } while (ethr_native_read_is_locked(lock));
+ }
+}
+
+static ETHR_INLINE void
+ethr_native_write_unlock(ethr_native_rwlock_t *lock)
+{
+ __asm__ __volatile__("eieio" : : : "memory");
+ lock->lock = 0;
+}
+
+static ETHR_INLINE int
+ethr_native_write_trylock(ethr_native_rwlock_t *lock)
+{
+ int prev;
+
+ /* identical to ethr_native_spin_trylock() */
+ __asm__ __volatile__(
+ "1:\t"
+ "lwarx %0,0,%1\n\t" /* read lock to prev */
+ "cmpwi 0,%0,0\n\t"
+ "bne- 2f\n\t" /* bail if non-zero (any lock) */
+ "stwcx. %2,0,%1\n\t" /* try to make the lock positive */
+ "bne- 1b\n\t" /* loop if lost reservation */
+ "isync\n\t" /* wait for previous insns to complete */
+ "2:"
+ : "=&r"(prev)
+ : "r"(&lock->lock), "r"(1)
+ : "cr0", "memory");
+ return prev == 0;
+}
+
+static ETHR_INLINE int
+ethr_native_write_is_locked(ethr_native_rwlock_t *lock)
+{
+ return lock->lock != 0;
+}
+
+static ETHR_INLINE void
+ethr_native_write_lock(ethr_native_rwlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_write_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("":::"memory");
+ } while (ethr_native_write_is_locked(lock));
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_PPC_RWLOCK_H */
diff --git a/erts/include/internal/ppc32/spinlock.h b/erts/include/internal/ppc32/spinlock.h
new file mode 100644
index 0000000000..034c20c143
--- /dev/null
+++ b/erts/include/internal/ppc32/spinlock.h
@@ -0,0 +1,93 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread spinlocks on PowerPC.
+ * Author: Mikael Pettersson.
+ *
+ * Based on the examples in Appendix E of Motorola's
+ * "Programming Environments Manual For 32-Bit Implementations
+ * of the PowerPC Architecture". Uses eieio instead of sync
+ * in the unlock sequence, as suggested in the manual.
+ */
+#ifndef ETHREAD_PPC_SPINLOCK_H
+#define ETHREAD_PPC_SPINLOCK_H
+
+/* Unlocked if zero, locked if non-zero. */
+typedef struct {
+ volatile unsigned int lock;
+} ethr_native_spinlock_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
+{
+ lock->lock = 0;
+}
+
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
+{
+ __asm__ __volatile__("eieio" : : : "memory");
+ lock->lock = 0;
+}
+
+static ETHR_INLINE int
+ethr_native_spin_trylock(ethr_native_spinlock_t *lock)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__(
+ "1:\t"
+ "lwarx %0,0,%1\n\t" /* read lock to prev */
+ "cmpwi 0,%0,0\n\t"
+ "bne- 2f\n\t" /* bail if non-zero/locked */
+ "stwcx. %2,0,%1\n\t" /* try to make the lock non-zero */
+ "bne- 1b\n\t" /* loop if lost reservation */
+ "isync\n\t" /* wait for previous insns to complete */
+ "2:"
+ : "=&r"(prev)
+ : "r"(&lock->lock), "r"(1)
+ : "cr0", "memory");
+ return prev == 0;
+}
+
+static ETHR_INLINE int
+ethr_native_spin_is_locked(ethr_native_spinlock_t *lock)
+{
+
+ return lock->lock != 0;
+}
+
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_spin_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("":::"memory");
+ } while (ethr_native_spin_is_locked(lock));
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_PPC_SPINLOCK_H */
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
new file mode 100644
index 0000000000..d6fdc6b2a4
--- /dev/null
+++ b/erts/include/internal/sparc32/atomic.h
@@ -0,0 +1,173 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread atomics on SPARC V9.
+ * Author: Mikael Pettersson.
+ */
+#ifndef ETHR_SPARC32_ATOMIC_H
+#define ETHR_SPARC32_ATOMIC_H
+
+typedef struct {
+ volatile long counter;
+} ethr_native_atomic_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+#if defined(__arch64__)
+#define CASX "casx"
+#else
+#define CASX "cas"
+#endif
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+{
+ var->counter = i;
+}
+#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+ return var->counter;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+{
+ long old, tmp;
+
+ __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
+ do {
+ old = var->counter;
+ tmp = old+incr;
+ __asm__ __volatile__(
+ CASX " [%2], %1, %0"
+ : "=&r"(tmp)
+ : "r"(old), "r"(&var->counter), "0"(tmp)
+ : "memory");
+ } while (__builtin_expect(old != tmp, 0));
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return old+incr;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void)ethr_native_atomic_add_return(var, incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+ return ethr_native_atomic_add_return(var, 1);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void)ethr_native_atomic_add_return(var, 1);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+ return ethr_native_atomic_add_return(var, -1);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void)ethr_native_atomic_add_return(var, -1);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ long old, tmp;
+
+ __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
+ do {
+ old = var->counter;
+ tmp = old & mask;
+ __asm__ __volatile__(
+ CASX " [%2], %1, %0"
+ : "=&r"(tmp)
+ : "r"(old), "r"(&var->counter), "0"(tmp)
+ : "memory");
+ } while (__builtin_expect(old != tmp, 0));
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return old;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ long old, tmp;
+
+ __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
+ do {
+ old = var->counter;
+ tmp = old | mask;
+ __asm__ __volatile__(
+ CASX " [%2], %1, %0"
+ : "=&r"(tmp)
+ : "r"(old), "r"(&var->counter), "0"(tmp)
+ : "memory");
+ } while (__builtin_expect(old != tmp, 0));
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return old;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+{
+ long old, new;
+
+ __asm__ __volatile__("membar #LoadLoad|#StoreLoad");
+ do {
+ old = var->counter;
+ new = val;
+ __asm__ __volatile__(
+ CASX " [%2], %1, %0"
+ : "=&r"(new)
+ : "r"(old), "r"(&var->counter), "0"(new)
+ : "memory");
+ } while (__builtin_expect(old != new, 0));
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return old;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+{
+ __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
+ __asm__ __volatile__(
+ CASX " [%2], %1, %0"
+ : "=&r"(new)
+ : "r"(old), "r"(&var->counter), "0"(new)
+ : "memory");
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return new;
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHR_SPARC32_ATOMIC_H */
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
new file mode 100644
index 0000000000..1d55399640
--- /dev/null
+++ b/erts/include/internal/sparc32/ethread.h
@@ -0,0 +1,34 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Low-level ethread support on SPARC V9.
+ * Author: Mikael Pettersson.
+ */
+#ifndef ETHREAD_SPARC32_ETHREAD_H
+#define ETHREAD_SPARC32_ETHREAD_H
+
+#include "atomic.h"
+#include "spinlock.h"
+#include "rwlock.h"
+
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+#define ETHR_HAVE_NATIVE_LOCKS 1
+
+#endif /* ETHREAD_SPARC32_ETHREAD_H */
diff --git a/erts/include/internal/sparc32/rwlock.h b/erts/include/internal/sparc32/rwlock.h
new file mode 100644
index 0000000000..12448e0b06
--- /dev/null
+++ b/erts/include/internal/sparc32/rwlock.h
@@ -0,0 +1,142 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread rwlocks on SPARC V9.
+ * Author: Mikael Pettersson.
+ */
+#ifndef ETHREAD_SPARC32_RWLOCK_H
+#define ETHREAD_SPARC32_RWLOCK_H
+
+/* Unlocked if zero, read-locked if positive, write-locked if -1. */
+typedef struct {
+ volatile int lock;
+} ethr_native_rwlock_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
+{
+ lock->lock = 0;
+}
+
+static ETHR_INLINE void
+ethr_native_read_unlock(ethr_native_rwlock_t *lock)
+{
+ unsigned int old, new;
+
+ __asm__ __volatile__("membar #LoadLoad|#StoreLoad");
+ do {
+ old = lock->lock;
+ new = old-1;
+ __asm__ __volatile__(
+ "cas [%2], %1, %0"
+ : "=&r"(new)
+ : "r"(old), "r"(&lock->lock), "0"(new)
+ : "memory");
+ } while (__builtin_expect(old != new, 0));
+}
+
+static ETHR_INLINE int
+ethr_native_read_trylock(ethr_native_rwlock_t *lock)
+{
+ int old, new;
+
+ do {
+ old = lock->lock;
+ if (__builtin_expect(old < 0, 0))
+ return 0;
+ new = old+1;
+ __asm__ __volatile__(
+ "cas [%2], %1, %0"
+ : "=&r"(new)
+ : "r"(old), "r"(&lock->lock), "0"(new)
+ : "memory");
+ } while (__builtin_expect(old != new, 0));
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return 1;
+}
+
+static ETHR_INLINE int
+ethr_native_read_is_locked(ethr_native_rwlock_t *lock)
+{
+ return lock->lock < 0;
+}
+
+static ETHR_INLINE void
+ethr_native_read_lock(ethr_native_rwlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_read_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("membar #LoadLoad");
+ } while (ethr_native_read_is_locked(lock));
+ }
+}
+
+static ETHR_INLINE void
+ethr_native_write_unlock(ethr_native_rwlock_t *lock)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ lock->lock = 0;
+}
+
+static ETHR_INLINE int
+ethr_native_write_trylock(ethr_native_rwlock_t *lock)
+{
+ unsigned int old, new;
+
+ do {
+ old = lock->lock;
+ if (__builtin_expect(old != 0, 0))
+ return 0;
+ new = -1;
+ __asm__ __volatile__(
+ "cas [%2], %1, %0"
+ : "=&r"(new)
+ : "r"(old), "r"(&lock->lock), "0"(new)
+ : "memory");
+ } while (__builtin_expect(old != new, 0));
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return 1;
+}
+
+static ETHR_INLINE int
+ethr_native_write_is_locked(ethr_native_rwlock_t *lock)
+{
+ return lock->lock != 0;
+}
+
+static ETHR_INLINE void
+ethr_native_write_lock(ethr_native_rwlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_write_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("membar #LoadLoad");
+ } while (ethr_native_write_is_locked(lock));
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_SPARC32_RWLOCK_H */
diff --git a/erts/include/internal/sparc32/spinlock.h b/erts/include/internal/sparc32/spinlock.h
new file mode 100644
index 0000000000..b4fe48b714
--- /dev/null
+++ b/erts/include/internal/sparc32/spinlock.h
@@ -0,0 +1,81 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread spinlocks on SPARC V9.
+ * Author: Mikael Pettersson.
+ */
+#ifndef ETHR_SPARC32_SPINLOCK_H
+#define ETHR_SPARC32_SPINLOCK_H
+
+/* Locked with ldstub, so unlocked when 0 and locked when non-zero. */
+typedef struct {
+ volatile unsigned char lock;
+} ethr_native_spinlock_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
+{
+ lock->lock = 0;
+}
+
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ lock->lock = 0;
+}
+
+static ETHR_INLINE int
+ethr_native_spin_trylock(ethr_native_spinlock_t *lock)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__(
+ "ldstub [%1], %0\n\t"
+ "membar #StoreLoad|#StoreStore"
+ : "=r"(prev)
+ : "r"(&lock->lock)
+ : "memory");
+ return prev == 0;
+}
+
+static ETHR_INLINE int
+ethr_native_spin_is_locked(ethr_native_spinlock_t *lock)
+{
+ return lock->lock != 0;
+}
+
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_spin_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("membar #LoadLoad");
+ } while (ethr_native_spin_is_locked(lock));
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHR_SPARC32_SPINLOCK_H */
diff --git a/erts/include/internal/sparc64/ethread.h b/erts/include/internal/sparc64/ethread.h
new file mode 100644
index 0000000000..65fd58d492
--- /dev/null
+++ b/erts/include/internal/sparc64/ethread.h
@@ -0,0 +1,20 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2007-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#include "../sparc32/ethread.h"
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
new file mode 100644
index 0000000000..0622b53729
--- /dev/null
+++ b/erts/include/internal/tile/atomic.h
@@ -0,0 +1,128 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread atomics on TILE64/TILEPro.
+ *
+ */
+#ifndef ETHREAD_TILE_ATOMIC_H
+#define ETHREAD_TILE_ATOMIC_H
+
+#include <atomic.h>
+
+/* An atomic is an aligned int accessed via locked operations.
+ */
+typedef struct {
+ volatile long counter;
+} ethr_native_atomic_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+{
+ var->counter = i;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+{
+ __insn_mf();
+ atomic_exchange_acq(&var->counter, i);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+ return var->counter;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ __insn_mf();
+ atomic_add(&var->counter, incr);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ __insn_mf();
+ atomic_increment(&var->counter);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ __insn_mf();
+ atomic_decrement(&var->counter);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+{
+ __insn_mf();
+ return atomic_exchange_and_add(&var->counter, incr) + incr;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+ return ethr_native_atomic_add_return(&var->counter, 1);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+ return ethr_native_atomic_add_return(&var->counter, -1);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ /* Implement a barrier suitable for a mutex unlock. */
+ __insn_mf();
+ return atomic_and_val(&var->counter, mask);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ __insn_mf();
+ return atomic_or_val(&var->counter, mask);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+{
+ __insn_mf();
+ return atomic_exchange_acq(&var->counter, val);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
+{
+ /* Implement a barrier suitable for a mutex unlock. */
+ __insn_mf();
+ return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_TILE_ATOMIC_H */
diff --git a/erts/include/internal/tile/ethread.h b/erts/include/internal/tile/ethread.h
new file mode 100644
index 0000000000..2de4d42bc6
--- /dev/null
+++ b/erts/include/internal/tile/ethread.h
@@ -0,0 +1,30 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Low-level ethread support on TILE64/TILEPro.
+ */
+#ifndef ETHREAD_TILE_ETHREAD_H
+#define ETHREAD_TILE_ETHREAD_H
+
+#include "atomic.h"
+
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#endif /* ETHREAD_TILE_ETHREAD_H */
diff --git a/erts/include/internal/x86_64/ethread.h b/erts/include/internal/x86_64/ethread.h
new file mode 100644
index 0000000000..59c3980535
--- /dev/null
+++ b/erts/include/internal/x86_64/ethread.h
@@ -0,0 +1,20 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#include "../i386/ethread.h"