aboutsummaryrefslogtreecommitdiffstats
path: root/erts/lib_src
diff options
context:
space:
mode:
Diffstat (limited to 'erts/lib_src')
-rw-r--r--erts/lib_src/Makefile22
-rw-r--r--erts/lib_src/Makefile.in615
-rw-r--r--erts/lib_src/common/erl_memory_trace_parser.c1956
-rw-r--r--erts/lib_src/common/erl_misc_utils.c967
-rw-r--r--erts/lib_src/common/erl_printf.c427
-rw-r--r--erts/lib_src/common/erl_printf_format.c940
-rw-r--r--erts/lib_src/common/ethread.c3346
7 files changed, 8273 insertions, 0 deletions
diff --git a/erts/lib_src/Makefile b/erts/lib_src/Makefile
new file mode 100644
index 0000000000..f94e47a856
--- /dev/null
+++ b/erts/lib_src/Makefile
@@ -0,0 +1,22 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2004-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+# Invoke with GNU make or clearmake -C gnu.
+#
+
+include $(ERL_TOP)/make/run_make.mk
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
new file mode 100644
index 0000000000..ce5c846677
--- /dev/null
+++ b/erts/lib_src/Makefile.in
@@ -0,0 +1,615 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2004-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+include $(ERL_TOP)/make/target.mk
+include ../include/internal/$(TARGET)/ethread.mk
+
+OMIT_OMIT_FP=no
+
+
+CC=@CC@
+LD=@LD@
+AR=@AR@
+RANLIB=@RANLIB@
+RM=@RM@
+MKDIR=@MKDIR@
+INSTALL=@INSTALL@
+INSTALL_DIR=@INSTALL_DIR@
+INSTALL_DATA=@INSTALL_DATA@
+INSTALL_PROGRAM=@INSTALL_PROGRAM@
+
+ERLANG_OSTYPE=@ERLANG_OSTYPE@
+
+OMIT_FP=false
+CFLAGS=$(subst O2,O3, @CFLAGS@)
+
+ifeq ($(TYPE),debug)
+CFLAGS=@DEBUG_CFLAGS@ -DDEBUG
+TYPE_SUFFIX=.debug
+ifeq ($(USING_VC),yes)
+LD_FLAGS += -g
+endif
+PRE_LD=
+
+else
+
+ifeq ($(TYPE),purify)
+CFLAGS=@DEBUG_CFLAGS@ -DPURIFY
+TYPE_SUFFIX=.purify
+PRE_LD=purify $(PURIFY_BUILD_OPTIONS)
+else
+ifeq ($(TYPE),quantify)
+CFLAGS += -DQUANTIFY
+TYPE_SUFFIX=.quantify
+PRE_LD=quantify $(QUANTIFY_BUILD_OPTIONS)
+else
+ifeq ($(TYPE),purecov)
+CFLAGS=@DEBUG_CFLAGS@ -DPURECOV
+TYPE_SUFFIX=.purecov
+PRE_LD=purecov $(PURECOV_BUILD_OPTIONS)
+else
+ifeq ($(TYPE),gcov)
+CFLAGS=@DEBUG_CFLAGS@ -fprofile-arcs -ftest-coverage -O0
+TYPE_SUFFIX=.gcov
+PRE_LD=
+else
+ifeq ($(TYPE),valgrind)
+CFLAGS=@DEBUG_CFLAGS@ -DVALGRIND
+TYPE_SUFFIX=.valgrind
+PRE_LD=
+else
+ifeq ($(TYPE),gprof)
+CFLAGS += -DGPROF -pg
+TYPE_SUFFIX=.gprof
+PRE_LD=
+else
+ifeq ($(TYPE),lcnt)
+TYPE_SUFFIX = .lcnt
+CFLAGS += -DERTS_ENABLE_LOCK_COUNT
+OMIT_FP=true
+PRE_LD=
+else
+override TYPE=opt
+OMIT_FP=true
+TYPE_SUFFIX=
+PRE_LD=
+endif
+endif
+endif
+endif
+endif
+endif
+endif
+endif
+
+OPSYS=@OPSYS@
+sol2CFLAGS=
+linuxCFLAGS=
+darwinCFLAGS=-DDARWIN
+noopsysCFLAGS=
+OPSYSCFLAGS=$($(OPSYS)CFLAGS)
+ARCH=@ARCH@
+ultrasparcCFLAGS=-Wa,-xarch=v8plusa
+ARCHCFLAGS=$($(ARCH)CFLAGS)
+
+ifeq ($(OMIT_OMIT_FP),yes)
+OMIT_FP=false
+endif
+
+CREATE_DIRS=
+
+ifeq ($(CC)-$(OMIT_FP), gcc-true)
+CFLAGS += -fomit-frame-pointer
+endif
+
+CFLAGS += @WFLAGS@ @DEFS@ $(ARCHCFLAGS)
+
+ifeq ($(findstring -D_GNU_SOURCE,$(CFLAGS)),)
+THR_DEFS = $(ETHR_DEFS)
+else
+# Remove duplicate -D_GNU_SOURCE
+THR_DEFS = $(filter-out -D_GNU_SOURCE%, $(ETHR_DEFS))
+endif
+
+LIBS=@LIBS@
+
+TT_DIR=$(TARGET)/$(TYPE)
+
+ERTS_INCL=../include
+ERTS_INCL_INT=../include/internal
+
+INCLUDES=-I$(ERTS_INCL) -I$(ERTS_INCL)/$(TARGET) -I$(ERTS_INCL_INT) -I$(ERTS_INCL_INT)/$(TARGET)
+INCLUDES += -I../emulator/beam -I../emulator/sys/$(ERLANG_OSTYPE)
+
+USING_MINGW=@MIXED_CYGWIN_MINGW@
+USING_VC=@MIXED_CYGWIN_VC@
+
+ifeq ($(USING_VC),yes)
+LIB_SUFFIX=.lib
+LIB_PREFIX=
+else
+LIB_SUFFIX=.a
+LIB_PREFIX=lib
+endif
+
+OBJS_ROOT_DIR=obj
+OBJ_DIR=$(OBJS_ROOT_DIR)/$(TT_DIR)
+
+CREATE_DIRS += $(OBJ_DIR)
+
+ERTS_LIBS=
+
+ifeq ($(findstring -D_GNU_SOURCE,$(CFLAGS)),)
+THR_DEFS = $(ETHR_DEFS)
+else
+# Remove duplicate -D_GNU_SOURCE
+THR_DEFS = $(filter-out -D_GNU_SOURCE%, $(ETHR_DEFS))
+endif
+
+#
+# erts (public) library
+#
+
+ERTS_LIB_SRCS = common/erl_memory_trace_parser.c
+
+ERTS_LIB_DIR=../lib/$(TARGET)
+CREATE_DIRS += $(ERTS_LIB_DIR)
+
+ifeq ($(USING_VC),yes)
+# Windows obj dir
+MD_OBJ_DIR=$(OBJ_DIR)/MD
+MDd_OBJ_DIR=$(OBJ_DIR)/MDd
+MT_OBJ_DIR=$(OBJ_DIR)/MT
+MTd_OBJ_DIR=$(OBJ_DIR)/MTd
+
+CREATE_DIRS += $(MD_OBJ_DIR) \
+ $(MDd_OBJ_DIR) \
+ $(MT_OBJ_DIR) \
+ $(MTd_OBJ_DIR)
+
+ERTS_MD_LIB_OBJS=$(addprefix $(MD_OBJ_DIR)/,$(notdir $(ERTS_LIB_SRCS:.c=.o)))
+ERTS_MDd_LIB_OBJS=$(addprefix $(MDd_OBJ_DIR)/,$(notdir $(ERTS_LIB_SRCS:.c=.o)))
+ERTS_MT_LIB_OBJS=$(addprefix $(MT_OBJ_DIR)/,$(notdir $(ERTS_LIB_SRCS:.c=.o)))
+ERTS_MTd_LIB_OBJS=$(addprefix $(MTd_OBJ_DIR)/,$(notdir $(ERTS_LIB_SRCS:.c=.o)))
+
+else # --- Not windows ---
+
+# Reentrant obj dir
+ifneq ($(strip $(ETHR_LIB_NAME)),)
+r_OBJ_DIR = $(OBJ_DIR)/r
+CREATE_DIRS += $(r_OBJ_DIR)
+ERTS_r_LIB_OBJS=$(addprefix $(r_OBJ_DIR)/,$(notdir $(ERTS_LIB_SRCS:.c=.o)))
+endif
+ERTS_LIB_OBJS=$(addprefix $(OBJ_DIR)/,$(notdir $(ERTS_LIB_SRCS:.c=.o)))
+
+endif
+
+ifeq ($(USING_VC),yes)
+ERTS_MD_LIB=$(ERTS_LIB_DIR)/$(LIB_PREFIX)erts_MD$(TYPE_SUFFIX)$(LIB_SUFFIX)
+ERTS_MDd_LIB=$(ERTS_LIB_DIR)/$(LIB_PREFIX)erts_MDd$(TYPE_SUFFIX)$(LIB_SUFFIX)
+ERTS_MT_LIB=$(ERTS_LIB_DIR)/$(LIB_PREFIX)erts_MT$(TYPE_SUFFIX)$(LIB_SUFFIX)
+ERTS_MTd_LIB=$(ERTS_LIB_DIR)/$(LIB_PREFIX)erts_MTd$(TYPE_SUFFIX)$(LIB_SUFFIX)
+ERTS_LIBS += \
+ $(ERTS_MD_LIB) \
+ $(ERTS_MDd_LIB) \
+ $(ERTS_MT_LIB) \
+ $(ERTS_MTd_LIB)
+else
+
+ERTS_LIB = $(ERTS_LIB_DIR)/$(LIB_PREFIX)erts$(TYPE_SUFFIX)$(LIB_SUFFIX)
+ERTS_LIBS += $(ERTS_LIB)
+
+ifneq ($(strip $(ETHR_LIB_NAME)),)
+ERTS_r_LIB = $(ERTS_LIB_DIR)/$(LIB_PREFIX)erts_r$(TYPE_SUFFIX)$(LIB_SUFFIX)
+ERTS_LIBS += $(ERTS_r_LIB)
+endif
+
+endif
+
+#
+# erts_internal library
+#
+
+ERTS_LIB_INTERNAL_DIR=../lib/internal/$(TARGET)
+CREATE_DIRS += $(ERTS_LIB_INTERNAL_DIR)
+
+ERTS_INTERNAL_LIBS=
+
+ERTS_INTERNAL_LIB_SRCS = \
+ common/erl_printf_format.c \
+ common/erl_printf.c \
+ common/erl_misc_utils.c
+
+ERTS_INTERNAL_LIB_NAME=erts_internal$(TYPE_SUFFIX)
+
+ifeq ($(USING_VC),yes)
+ifeq ($(TYPE),debug)
+ERTS_INTERNAL_LIB_OBJS = \
+ $(addprefix $(MTd_OBJ_DIR)/,$(notdir $(ERTS_INTERNAL_LIB_SRCS:.c=.o)))
+else
+ERTS_INTERNAL_LIB_OBJS = \
+ $(addprefix $(MT_OBJ_DIR)/,$(notdir $(ERTS_INTERNAL_LIB_SRCS:.c=.o)))
+endif
+else
+ERTS_INTERNAL_LIB_OBJS = \
+ $(addprefix $(OBJ_DIR)/,$(notdir $(ERTS_INTERNAL_LIB_SRCS:.c=.o)))
+endif
+
+ERTS_INTERNAL_LIB=$(ERTS_LIB_INTERNAL_DIR)/$(LIB_PREFIX)$(ERTS_INTERNAL_LIB_NAME)$(LIB_SUFFIX)
+
+ERTS_INTERNAL_LIBS += $(ERTS_INTERNAL_LIB)
+
+ifneq ($(strip $(ETHR_LIB_NAME)),)
+ERTS_INTERNAL_r_LIB_NAME=erts_internal_r$(TYPE_SUFFIX)
+
+ifeq ($(USING_VC),yes)
+ifeq ($(TYPE),debug)
+ERTS_INTERNAL_r_LIB_OBJS = \
+ $(addprefix $(MDd_OBJ_DIR)/,$(notdir $(ERTS_INTERNAL_LIB_SRCS:.c=.o)))
+else
+ERTS_INTERNAL_r_LIB_OBJS = \
+ $(addprefix $(MD_OBJ_DIR)/,$(notdir $(ERTS_INTERNAL_LIB_SRCS:.c=.o)))
+endif
+else
+ERTS_INTERNAL_r_LIB_OBJS = \
+ $(addprefix $(r_OBJ_DIR)/,$(notdir $(ERTS_INTERNAL_LIB_SRCS:.c=.o)))
+endif
+
+ERTS_INTERNAL_r_LIB=$(ERTS_LIB_INTERNAL_DIR)/$(LIB_PREFIX)$(ERTS_INTERNAL_r_LIB_NAME)$(LIB_SUFFIX)
+
+ERTS_INTERNAL_LIBS += $(ERTS_INTERNAL_r_LIB)
+
+endif
+
+#
+# ethread library
+#
+ifneq ($(strip $(ETHR_LIB_NAME)),)
+ETHREAD_LIB_SRC=common/ethread.c
+ETHREAD_LIB_NAME=ethread$(TYPE_SUFFIX)
+
+ifeq ($(USING_VC),yes)
+ifeq ($(TYPE),debug)
+ETHREAD_LIB_OBJS = \
+ $(addprefix $(MDd_OBJ_DIR)/,$(notdir $(ETHREAD_LIB_SRC:.c=.o)))
+else
+ETHREAD_LIB_OBJS = \
+ $(addprefix $(MD_OBJ_DIR)/,$(notdir $(ETHREAD_LIB_SRC:.c=.o)))
+endif
+else
+ETHREAD_LIB_OBJS = \
+ $(addprefix $(r_OBJ_DIR)/,$(notdir $(ETHREAD_LIB_SRC:.c=.o)))
+endif
+
+ETHREAD_LIB=$(ERTS_LIB_INTERNAL_DIR)/$(LIB_PREFIX)$(ETHREAD_LIB_NAME)$(LIB_SUFFIX)
+
+else
+
+ETHREAD_LIB_SRC=
+ETHREAD_LIB_NAME=
+ETHREAD_LIB_OBJS=
+ETHREAD_LIB=
+
+endif
+
+#
+# Everything to build
+#
+all: $(CREATE_DIRS) $(ETHREAD_LIB) $(ERTS_LIBS) $(ERTS_INTERNAL_LIBS)
+ifeq ($(OMIT_OMIT_FP),yes)
+ @echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
+ @echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
+ @echo '* * * *'
+ @echo '* * NOTE: Omit frame pointer optimization has been omitted * *'
+ @echo '* * * *'
+ @echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
+ @echo '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
+endif
+#
+# The libs ...
+#
+ifeq ($(USING_VC),yes)
+AR_OUT=-out:
+AR_FLAGS=
+else
+AR_OUT=
+AR_FLAGS=rcv
+endif
+
+ifndef RANLIB
+RANLIB=true
+endif
+
+$(ETHREAD_LIB): $(ETHREAD_LIB_OBJS)
+ $(AR) $(AR_FLAGS) $(AR_OUT)$@ $(ETHREAD_LIB_OBJS)
+ $(RANLIB) $@
+
+$(ERTS_INTERNAL_LIB): $(ERTS_INTERNAL_LIB_OBJS)
+ $(AR) $(AR_FLAGS) $(AR_OUT)$@ $(ERTS_INTERNAL_LIB_OBJS)
+ $(RANLIB) $@
+
+$(ERTS_INTERNAL_r_LIB): $(ERTS_INTERNAL_r_LIB_OBJS)
+ $(AR) $(AR_FLAGS) $(AR_OUT)$@ $(ERTS_INTERNAL_r_LIB_OBJS)
+ $(RANLIB) $@
+
+$(ERTS_MD_LIB): $(ERTS_MD_LIB_OBJS)
+ $(AR) $(AR_FLAGS) $(AR_OUT)$@ $(ERTS_MD_LIB_OBJS)
+ $(RANLIB) $@
+
+$(ERTS_MDd_LIB): $(ERTS_MDd_LIB_OBJS)
+ $(AR) $(AR_FLAGS) $(AR_OUT)$@ $(ERTS_MDd_LIB_OBJS)
+ $(RANLIB) $@
+
+$(ERTS_MT_LIB): $(ERTS_MT_LIB_OBJS)
+ $(AR) $(AR_FLAGS) $(AR_OUT)$@ $(ERTS_MT_LIB_OBJS)
+ $(RANLIB) $@
+
+$(ERTS_MTd_LIB): $(ERTS_MTd_LIB_OBJS)
+ $(AR) $(AR_FLAGS) $(AR_OUT)$@ $(ERTS_MTd_LIB_OBJS)
+ $(RANLIB) $@
+
+$(ERTS_r_LIB): $(ERTS_r_LIB_OBJS)
+ $(AR) $(AR_FLAGS) $(AR_OUT)$@ $(ERTS_r_LIB_OBJS)
+ $(RANLIB) $@
+
+$(ERTS_LIB): $(ERTS_LIB_OBJS)
+ $(AR) $(AR_FLAGS) $(AR_OUT)$@ $(ERTS_LIB_OBJS)
+ $(RANLIB) $@
+
+#
+# Object files
+#
+
+$(r_OBJ_DIR)/%.o: common/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
+
+$(r_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
+
+$(OBJ_DIR)/%.o: common/%.c
+ $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
+
+$(OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+ $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
+
+# Win32 specific
+
+$(MD_OBJ_DIR)/%.o: common/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -MD $(INCLUDES) -c $< -o $@
+
+$(MD_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -MD $(INCLUDES) -c $< -o $@
+
+$(MDd_OBJ_DIR)/%.o: common/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -MDd $(INCLUDES) -c $< -o $@
+
+$(MDd_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -MDd $(INCLUDES) -c $< -o $@
+
+$(MT_OBJ_DIR)/%.o: common/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -MT $(INCLUDES) -c $< -o $@
+
+$(MT_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -MT $(INCLUDES) -c $< -o $@
+
+$(MTd_OBJ_DIR)/%.o: common/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -MTd $(INCLUDES) -c $< -o $@
+
+$(MTd_OBJ_DIR)/%.o: $(ERLANG_OSTYPE)/%.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -MTd $(INCLUDES) -c $< -o $@
+
+#
+# Create directories
+#
+
+$(CREATE_DIRS):
+ $(MKDIR) -p $@
+
+#
+# Install
+#
+
+include $(ERL_TOP)/make/otp_release_targets.mk
+include ../vsn.mk
+RELSYSDIR = $(RELEASE_PATH)/erts-$(VSN)
+
+RELEASE_INCLUDES= \
+ $(ERTS_INCL)/erl_memory_trace_parser.h \
+ $(ERTS_INCL)/$(TARGET)/erl_int_sizes_config.h \
+ $(ERTS_INCL)/erl_fixed_size_int_types.h
+RELEASE_LIBS=$(ERTS_LIBS)
+
+INTERNAL_RELEASE_INCLUDES= \
+ $(ERTS_INCL_INT)/README \
+ $(ERTS_INCL_INT)/ethread.h \
+ $(ERTS_INCL_INT)/$(TARGET)/ethread.mk \
+ $(ERTS_INCL_INT)/$(TARGET)/erts_internal.mk \
+ $(ERTS_INCL_INT)/$(TARGET)/ethread_header_config.h \
+ $(ERTS_INCL_INT)/erl_printf.h \
+ $(ERTS_INCL_INT)/erl_printf_format.h \
+ $(ERTS_INCL_INT)/erl_memory_trace_protocol.h \
+ $(ERTS_INCL_INT)/erl_misc_utils.h \
+ $(ERTS_INCL_INT)/erl_errno.h
+
+INTERNAL_X_RELEASE_INCLUDE_DIRS= i386 x86_64 ppc32 sparc32 sparc64 tile
+
+INTERNAL_RELEASE_LIBS= \
+ ../lib/internal/README \
+ $(ETHREAD_LIB) \
+ $(ERTS_INTERNAL_LIBS)
+
+release_spec: all
+ifneq ($(strip $(RELEASE_INCLUDES)),)
+ $(INSTALL_DIR) $(RELSYSDIR)/include
+ $(INSTALL_DIR) $(RELEASE_PATH)/usr/include
+ $(INSTALL_DATA) $(RELEASE_INCLUDES) $(RELSYSDIR)/include
+ $(INSTALL_DATA) $(RELEASE_INCLUDES) $(RELEASE_PATH)/usr/include
+endif
+ifneq ($(strip $(INTERNAL_RELEASE_INCLUDES)),)
+ $(INSTALL_DIR) $(RELSYSDIR)/include/internal
+ $(INSTALL_DATA) $(INTERNAL_RELEASE_INCLUDES) $(RELSYSDIR)/include/internal
+endif
+ifneq ($(strip $(INTERNAL_X_RELEASE_INCLUDE_DIRS)),)
+ for xdir in $(INTERNAL_X_RELEASE_INCLUDE_DIRS); do \
+ $(INSTALL_DIR) $(RELSYSDIR)/include/internal/$$xdir; \
+ $(INSTALL_DATA) $(ERTS_INCL_INT)/$$xdir/*.h \
+ $(RELSYSDIR)/include/internal/$$xdir; \
+ done
+endif
+ifneq ($(strip $(RELEASE_LIBS)),)
+ $(INSTALL_DIR) $(RELSYSDIR)/lib
+ $(INSTALL_DIR) $(RELEASE_PATH)/usr/lib
+ $(INSTALL_DATA) $(RELEASE_LIBS) $(RELSYSDIR)/lib
+ $(INSTALL_DATA) $(RELEASE_LIBS) $(RELEASE_PATH)/usr/lib
+endif
+ifneq ($(strip $(INTERNAL_RELEASE_LIBS)),)
+ $(INSTALL_DIR) $(RELSYSDIR)/lib/internal
+ $(INSTALL_DATA) $(INTERNAL_RELEASE_LIBS) $(RELSYSDIR)/lib/internal
+endif
+
+release_docs_spec:
+
+
+#
+# Cleanup
+#
+clean:
+ $(RM) -rf ../lib/internal/$(TARGET)/*
+ $(RM) -rf ../lib/$(TARGET)/*
+ $(RM) -rf obj/$(TARGET)/*
+ $(RM) -f $(TARGET)/depend.mk
+
+#
+# Make dependencies
+#
+
+ifeq ($(USING_VC),yes)
+# VC++ used for compiling. We undef __GNUC__ since if __GNUC__ is defined
+# we will include other headers than when compiling which will result in
+# faulty dependencies.
+#DEP_CC=@EMU_CC@ -U__GNUC__
+DEP_CC=$(CC)
+else
+DEP_CC=$(CC)
+endif
+
+#SED_REPL_WIN_DRIVE=s|\([ ]\)\([A-Za-z]\):|\1/cygdrive/\2|g;s|^\([A-Za-z]\):|/cygdrive/\1|g
+SED_REPL_O=s|^\([^:]*\)\.o:|$$(OBJ_DIR)/\1.o:|g
+SED_REPL_r_O=s|^\([^:]*\)\.o:|$$(r_OBJ_DIR)/\1.o:|g
+SED_REPL_MD_O=s|^\([^:]*\)\.o:|$$(MD_OBJ_DIR)/\1.o:|g
+SED_REPL_MDd_O=s|^\([^:]*\)\.o:|$$(MDd_OBJ_DIR)/\1.o:|g
+SED_REPL_MT_O=s|^\([^:]*\)\.o:|$$(MT_OBJ_DIR)/\1.o:|g
+SED_REPL_MTd_O=s|^\([^:]*\)\.o:|$$(MTd_OBJ_DIR)/\1.o:|g
+SED_REPL_TT_DIR=s|$(TT_DIR)/|$$(TT_DIR)/|g
+SED_REPL_TARGET=s|$(TARGET)/|$$(TARGET)/|g
+
+ifeq ($(TARGET),win32)
+#SED_PREFIX=$(SED_REPL_WIN_DRIVE);
+SED_PREFIX=
+DEP_FLAGS=$(subst -O3,,$(subst -O2,,$(CFLAGS))) $(INCLUDES)
+else
+SED_PREFIX=
+DEP_FLAGS=$(CFLAGS) $(INCLUDES)
+endif
+
+SED_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_O);$(SED_REPL_TT_DIR);$(SED_REPL_TARGET)'
+SED_r_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_r_O);$(SED_REPL_TT_DIR);$(SED_REPL_TARGET)'
+SED_MD_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_MD_O);$(SED_REPL_TT_DIR);$(SED_REPL_TARGET)'
+SED_MDd_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_MDd_O);$(SED_REPL_TT_DIR);$(SED_REPL_TARGET)'
+SED_MT_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_MT_O);$(SED_REPL_TT_DIR);$(SED_REPL_TARGET)'
+SED_MTd_DEPEND=sed '$(SED_PREFIX)$(SED_REPL_MTd_O);$(SED_REPL_TT_DIR);$(SED_REPL_TARGET)'
+
+DEPEND_MK=$(TARGET)/depend.mk
+
+depend:
+ @echo "Generating dependency file $(DEPEND_MK)..."
+ @echo "# Generated dependency rules" > $(DEPEND_MK);
+ @echo "# " >> $(DEPEND_MK);
+ifneq ($(strip $(ETHREAD_LIB_SRC)),)
+ @echo "# ethread lib objects..." >> $(DEPEND_MK);
+ifeq ($(USING_VC),yes)
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ETHREAD_LIB_SRC) \
+ | $(SED_MD_DEPEND) >> $(DEPEND_MK)
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ETHREAD_LIB_SRC) \
+ | $(SED_MDd_DEPEND) >> $(DEPEND_MK)
+else
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ETHREAD_LIB_SRC) \
+ | $(SED_r_DEPEND) >> $(DEPEND_MK)
+endif
+endif
+ifneq ($(strip $(ERTS_INTERNAL_LIB_SRCS)),)
+ifneq ($(strip $(ETHREAD_LIB_SRC)),)
+ @echo "# erts_internal_r lib objects..." >> $(DEPEND_MK);
+ifeq ($(USING_VC),yes)
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_INTERNAL_LIB_SRCS) \
+ | $(SED_MD_DEPEND) >> $(DEPEND_MK)
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_INTERNAL_LIB_SRCS) \
+ | $(SED_MDd_DEPEND) >> $(DEPEND_MK)
+else
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_INTERNAL_LIB_SRCS) \
+ | $(SED_r_DEPEND) >> $(DEPEND_MK)
+endif
+endif
+ @echo "# erts_internal lib objects..." >> $(DEPEND_MK);
+ifeq ($(USING_VC),yes)
+ $(DEP_CC) -MM $(DEP_FLAGS) $(ERTS_INTERNAL_LIB_SRCS) \
+ | $(SED_MD_DEPEND) >> $(DEPEND_MK)
+ $(DEP_CC) -MM $(DEP_FLAGS) $(ERTS_INTERNAL_LIB_SRCS) \
+ | $(SED_MDd_DEPEND) >> $(DEPEND_MK)
+else
+ $(DEP_CC) -MM $(DEP_FLAGS) $(ERTS_INTERNAL_LIB_SRCS) \
+ | $(SED_DEPEND) >> $(DEPEND_MK)
+endif
+endif
+ifneq ($(strip $(ERTS_LIB_SRCS)),)
+ifeq ($(USING_VC),yes)
+ @echo "# erts_MD lib objects..." >> $(DEPEND_MK);
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_LIB_SRCS) \
+ | $(SED_MD_DEPEND) >> $(DEPEND_MK)
+ @echo "# erts_MDd lib objects..." >> $(DEPEND_MK);
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_LIB_SRCS) \
+ | $(SED_MDd_DEPEND) >> $(DEPEND_MK)
+ @echo "# erts_MT lib objects..." >> $(DEPEND_MK);
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_LIB_SRCS) \
+ | $(SED_MT_DEPEND) >> $(DEPEND_MK)
+ @echo "# erts_MTd lib objects..." >> $(DEPEND_MK);
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_LIB_SRCS) \
+ | $(SED_MTd_DEPEND) >> $(DEPEND_MK)
+ @echo "# erts_internal_r lib objects..." >> $(DEPEND_MK);
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_INTERNAL_LIB_SRCS) \
+ | $(SED_MD_DEPEND) >> $(DEPEND_MK)
+ @echo "# erts_internal_r.debug lib objects..." >> $(DEPEND_MK);
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_INTERNAL_LIB_SRCS) \
+ | $(SED_MDd_DEPEND) >> $(DEPEND_MK)
+else
+ifneq ($(strip $(ETHREAD_LIB_SRC)),)
+ @echo "# erts_r lib objects..." >> $(DEPEND_MK);
+ $(DEP_CC) -MM $(THR_DEFS) $(DEP_FLAGS) $(ERTS_LIB_SRCS) \
+ | $(SED_r_DEPEND) >> $(DEPEND_MK)
+endif
+ @echo "# erts lib objects..." >> $(DEPEND_MK);
+ $(DEP_CC) -MM $(DEP_FLAGS) $(ERTS_LIB_SRCS) \
+ | $(SED_DEPEND) >> $(DEPEND_MK)
+endif
+endif
+ @echo "# EOF" >> $(DEPEND_MK);
+
+-include $(DEPEND_MK)
+
+# eof
diff --git a/erts/lib_src/common/erl_memory_trace_parser.c b/erts/lib_src/common/erl_memory_trace_parser.c
new file mode 100644
index 0000000000..54c3dfadec
--- /dev/null
+++ b/erts/lib_src/common/erl_memory_trace_parser.c
@@ -0,0 +1,1956 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description:
+ *
+ * Author: Rickard Green
+ */
+
+#include "erl_memory_trace_parser.h"
+#include "erl_memory_trace_protocol.h"
+#include <string.h> /* For memcpy */
+
+#ifdef DEBUG
+#include <assert.h>
+#define ASSERT assert
+#define PRINT_ERROR_ORIGIN 1
+#if PRINT_ERROR_ORIGIN
+#include <stdio.h>
+#endif
+#define PRINT_PARSED_OP 0
+#if PRINT_PARSED_OP
+#include <stdio.h>
+static void print_op(emtp_operation *op_p);
+#endif
+static void hexdump(void *start, void *end);
+#else
+#define PRINT_ERROR_ORIGIN 0
+#define PRINT_PARSED_OP 0
+#define ASSERT(B)
+#endif
+
+
+#if ERTS_MT_MAJOR_VSN != 2 || ERTS_MT_MINOR_VSN != 0
+#error trace version mismatch (expected version 2.0)
+/* Make sure that older versions are supported when implementing
+ support for newer versions! */
+#endif
+
+
+#if defined(__GNUC__)
+# define EMTP_CAN_INLINE 1
+# define EMTP_INLINE __inline__
+#elif defined(__WIN32__)
+# define EMTP_CAN_INLINE 1
+# define EMTP_INLINE __forceinline
+#else
+# define EMTP_CAN_INLINE 0
+# define EMTP_INLINE
+#endif
+
+
+#define UI8_SZ 1
+#define UI16_SZ 2
+#define UI32_SZ 4
+#define UI64_SZ 8
+
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+
+#define DEFAULT_OVERFLOW_BUF_SZ 128
+
+#define UNKNOWN_BLOCK_TYPE_IX (-1)
+#define UNKNOWN_ALLOCATOR_IX (-1)
+
+#define INVALID_SIZE (((sgnd_int_32) 1) << 31)
+#define INVALID_RESULT ((int) INVALID_SIZE)
+
+typedef enum {
+ EMTP_PROGRESS_PARSE_HDR_VSN,
+ EMTP_PROGRESS_PARSE_HDR_PROLOG,
+ EMTP_PROGRESS_ALLOC_HDR_INFO,
+ EMTP_PROGRESS_PARSE_TAGGED_HDR,
+ EMTP_PROGRESS_PARSE_BODY,
+ EMTP_PROGRESS_ENDED
+} emtp_progress;
+
+struct emtp_state_ {
+
+ /* Trace version */
+ emtp_version version;
+
+ /* Flags */
+ usgnd_int_32 flags;
+
+ /* Progress */
+ emtp_progress progress;
+
+ /* Name, host, and pid as strings */
+ char nodename[256];
+ char hostname[256];
+ char pid[256];
+
+ /* Local time on the traced node when the node started */
+ struct {
+ usgnd_int_32 year;
+ usgnd_int_32 month;
+ usgnd_int_32 day;
+ usgnd_int_32 hour;
+ usgnd_int_32 minute;
+ usgnd_int_32 second;
+ usgnd_int_32 micro_second;
+ } start_time;
+
+ /* Function to parse body with */
+ int (*parse_body_func)(emtp_state *,
+ usgnd_int_8 **,
+ usgnd_int_8 *,
+ emtp_operation **,
+ emtp_operation *,
+ size_t);
+ /* Current time elapsed */
+ struct {
+ usgnd_int_32 secs;
+ usgnd_int_32 usecs;
+ } time;
+
+ /* */
+
+ int force_return;
+
+ /* Overflow buffer */
+ size_t overflow_size;
+ size_t overflow_buf_size;
+ usgnd_int_8 * overflow;
+ sgnd_int_32 fetch_size;
+ int known_need;
+
+ usgnd_int_16 segment_ix;
+ usgnd_int_16 max_allocator_ix;
+ emtp_allocator ** allocator;
+ usgnd_int_16 max_block_type_ix;
+ emtp_block_type ** block_type;
+
+ /* Memory allocation functions */
+ void * (*alloc)(size_t);
+ void * (*realloc)(void *, size_t);
+ void (*free)(void *);
+
+};
+
+static char unknown_allocator[] = "unknown_allocator";
+static char unknown_block_type[] = "unknown_block_type";
+
+const char *
+emtp_error_string(int res)
+{
+ switch (res) {
+ case EMTP_NO_TRACE_ERROR:
+ return "no trace error";
+ case EMTP_HEADER_TAG_IN_BODY_ERROR:
+ return "header tag in body error";
+ case EMTP_BODY_TAG_IN_HEADER_ERROR:
+ return "body tag in header error";
+ case EMTP_NOT_SUPPORTED_MTRACE_VERSION_ERROR:
+ return "not supported mtrace version error";
+ case EMTP_NOT_AN_ERL_MTRACE_ERROR:
+ return "not an erl mtrace error";
+ case EMTP_NO_MEMORY_ERROR:
+ return "no memory error";
+ case EMTP_BAD_OP_SIZE_ERROR:
+ return "bad op size error";
+ case EMTP_NO_OPERATIONS_ERROR:
+ return "no operations error";
+ case EMTP_NOT_SUPPORTED_64_BITS_TRACE_ERROR:
+ return "not supported 64 bits trace error";
+ case EMTP_PARSE_ERROR:
+ return "parse error";
+ case EMTP_UNKNOWN_TAG_ERROR:
+ return "unknown tag error";
+ case EMTP_END_OF_TRACE:
+ return "end of trace";
+ case EMTP_END_OF_TRACE_GARBAGE_FOLLOWS:
+ return "end of trace; garbage follows";
+ case EMTP_ALL_OPS_FILLED:
+ return "all operations filled";
+ case EMTP_NEED_MORE_TRACE:
+ return "need more trace";
+ case EMTP_HEADER_PARSED:
+ return "header parsed";
+ default:
+ return NULL;
+ }
+
+}
+
+int
+emtp_get_info(emtp_info *infop, size_t *info_szp, emtp_state *statep)
+{
+ if (!infop || !info_szp || *info_szp < sizeof(emtp_info))
+ return 0;
+
+ infop->version.parser.major = ERTS_MT_MAJOR_VSN;
+ infop->version.parser.minor = ERTS_MT_MINOR_VSN;
+
+ *info_szp = sizeof(emtp_version);
+
+ if (!statep || statep->version.major == 0)
+ return 1;
+
+ infop->version.trace.major = statep->version.major;
+ infop->version.trace.minor = statep->version.minor;
+
+ *info_szp = sizeof(emtp_versions);
+
+ if (statep->progress != EMTP_PROGRESS_PARSE_BODY
+ && statep->progress != EMTP_PROGRESS_ENDED)
+ return 1;
+
+ infop->bits = (statep->flags & ERTS_MT_64_BIT_FLAG
+ ? 64
+ : 32);
+
+ infop->nodename = statep->nodename;
+ infop->hostname = statep->hostname;
+ infop->pid = statep->pid;
+
+ infop->start_time.year = statep->start_time.year;
+ infop->start_time.month = statep->start_time.month;
+ infop->start_time.day = statep->start_time.day;
+ infop->start_time.hour = statep->start_time.hour;
+ infop->start_time.minute = statep->start_time.minute;
+ infop->start_time.second = statep->start_time.second;
+ infop->start_time.micro_second = statep->start_time.micro_second;
+
+ infop->have_carrier_info = statep->flags & ERTS_MT_CRR_INFO;
+ infop->have_segment_carrier_info = statep->flags & ERTS_MT_SEG_CRR_INFO;
+ infop->segment_ix = statep->segment_ix;
+ infop->max_allocator_ix = statep->max_allocator_ix;
+ infop->allocator = statep->allocator;
+ infop->max_block_type_ix = statep->max_block_type_ix;
+ infop->block_type = statep->block_type;
+
+ *info_szp = sizeof(emtp_info);
+
+ return 1;
+}
+
+emtp_state *
+emtp_state_new(void * (*alloc)(size_t),
+ void * (*realloc)(void *, size_t),
+ void (*free)(void *))
+{
+ emtp_state *statep;
+
+ if (!alloc || !realloc || !free)
+ return NULL;
+
+ statep = (emtp_state *) (*alloc)(sizeof(emtp_state));
+ if (!statep)
+ return NULL;
+
+ statep->version.major = 0;
+ statep->version.minor = 0;
+ statep->flags = 0;
+ statep->progress = EMTP_PROGRESS_PARSE_HDR_VSN;
+
+ statep->nodename[0] = '\0';
+ statep->hostname[0] = '\0';
+ statep->pid[0] = '\0';
+
+ statep->start_time.year = 0;
+ statep->start_time.month = 0;
+ statep->start_time.day = 0;
+ statep->start_time.hour = 0;
+ statep->start_time.minute = 0;
+ statep->start_time.second = 0;
+ statep->start_time.micro_second = 0;
+
+ statep->parse_body_func = NULL;
+ statep->time.secs = 0;
+ statep->time.usecs = 0;
+ statep->force_return = 0;
+ statep->overflow_size = 0;
+ statep->overflow_buf_size = DEFAULT_OVERFLOW_BUF_SZ;
+ statep->overflow =
+ (usgnd_int_8 *) (*alloc)(DEFAULT_OVERFLOW_BUF_SZ*sizeof(usgnd_int_8));
+ statep->fetch_size = 0;
+ statep->known_need = 0;
+ statep->segment_ix = 0;
+ statep->max_allocator_ix = 0;
+ statep->allocator = NULL;
+ statep->max_block_type_ix = 0;
+ statep->block_type = NULL;
+ statep->alloc = alloc;
+ statep->realloc = realloc;
+ statep->free = free;
+
+ return statep;
+}
+
+void
+emtp_state_destroy(emtp_state *statep)
+{
+ void (*freep)(void *);
+ int i;
+
+ if (!statep)
+ return;
+
+ freep = statep->free;
+
+ if (statep->overflow)
+ (*freep)((void *) statep->overflow);
+
+ if (statep->allocator) {
+ for (i = -1; i <= statep->max_allocator_ix; i++) {
+ if (statep->allocator[i]) {
+ if (statep->allocator[i]->name
+ && statep->allocator[i]->name != unknown_allocator)
+ (*freep)((void *) statep->allocator[i]->name);
+ if (statep->allocator[i]->carrier.provider)
+ (*freep)((void *) statep->allocator[i]->carrier.provider);
+ (*freep)((void *) statep->allocator[i]);
+ }
+ }
+ statep->allocator--;
+ (*freep)((void *) statep->allocator);
+ }
+
+ if (statep->block_type) {
+ for (i = -1; i <= statep->max_block_type_ix; i++) {
+ if (statep->block_type[i]) {
+ if (statep->block_type[i]->name
+ && statep->block_type[i]->name != unknown_block_type)
+ (*freep)((void *) statep->block_type[i]->name);
+ (*freep)((void *) statep->block_type[i]);
+ }
+ }
+ statep->block_type--;
+ (*freep)((void *) statep->block_type);
+ }
+
+ (*freep)((void *) statep);
+}
+
+/*
+ * The following macros are for use in emtp_parse(), parse_vX_body,
+ * and parse_header.
+ *
+ * Note that some of them depend on function local variable names
+ * and lables:
+ *
+ * Variables:
+ * * result -> the result to return
+ * * statep -> pointer to the state
+ *
+ * Lables:
+ * * restore_return -> restore then return result
+ */
+
+
+#define GET_UI8(UI, BP) ((UI) = *((BP)++))
+#define GET_UI16(UI, BP) \
+ do { \
+ (UI) = ((( (usgnd_int_16) (BP)[0]) << 8) \
+ | ((usgnd_int_16) (BP)[1])); \
+ (BP) += UI16_SZ; \
+} while(0)
+
+#define GET_UI32(UI, BP) \
+ do { \
+ (UI) = ((( (usgnd_int_32) (BP)[0]) << 24) \
+ | (((usgnd_int_32) (BP)[1]) << 16) \
+ | (((usgnd_int_32) (BP)[2]) << 8) \
+ | ( (usgnd_int_32) (BP)[3])); \
+ (BP) += UI32_SZ; \
+} while(0)
+
+#define GET_UI64(UI, BP) \
+ do { \
+ (UI) = ((( (usgnd_int_64) (BP)[0]) << 56) \
+ | (((usgnd_int_64) (BP)[1]) << 48) \
+ | (((usgnd_int_64) (BP)[2]) << 40) \
+ | (((usgnd_int_64) (BP)[3]) << 32) \
+ | (((usgnd_int_64) (BP)[4]) << 24) \
+ | (((usgnd_int_64) (BP)[5]) << 16) \
+ | (((usgnd_int_64) (BP)[6]) << 8) \
+ | ( (usgnd_int_64) (BP)[7])); \
+ (BP) += UI64_SZ; \
+} while(0)
+
+#define GET_VSZ_UI16(UI, BP, MSB) \
+do { \
+ usgnd_int_16 ui_ = 0; \
+ switch ((MSB)) { \
+ case 1: ui_ |= (usgnd_int_16) *((BP)++); ui_ <<= 8; \
+ case 0: ui_ |= (usgnd_int_16) *((BP)++); break; \
+ default: ERROR(EMTP_PARSE_ERROR); \
+ } \
+ (UI) = ui_; \
+} while (0)
+
+#define GET_VSZ_UI32(UI, BP, MSB) \
+do { \
+ usgnd_int_32 ui_ = 0; \
+ switch ((MSB)) { \
+ case 3: ui_ |= (usgnd_int_32) *((BP)++); ui_ <<= 8; \
+ case 2: ui_ |= (usgnd_int_32) *((BP)++); ui_ <<= 8; \
+ case 1: ui_ |= (usgnd_int_32) *((BP)++); ui_ <<= 8; \
+ case 0: ui_ |= (usgnd_int_32) *((BP)++); break; \
+ default: ERROR(EMTP_PARSE_ERROR); \
+ } \
+ (UI) = ui_; \
+} while (0)
+
+#define GET_VSZ_UI64(UI, BP, MSB) \
+do { \
+ usgnd_int_64 ui_ = 0; \
+ switch ((MSB)) { \
+ case 7: ui_ |= (usgnd_int_64) *((BP)++); ui_ <<= 8; \
+ case 6: ui_ |= (usgnd_int_64) *((BP)++); ui_ <<= 8; \
+ case 5: ui_ |= (usgnd_int_64) *((BP)++); ui_ <<= 8; \
+ case 4: ui_ |= (usgnd_int_64) *((BP)++); ui_ <<= 8; \
+ case 3: ui_ |= (usgnd_int_64) *((BP)++); ui_ <<= 8; \
+ case 2: ui_ |= (usgnd_int_64) *((BP)++); ui_ <<= 8; \
+ case 1: ui_ |= (usgnd_int_64) *((BP)++); ui_ <<= 8; \
+ case 0: ui_ |= (usgnd_int_64) *((BP)++); break; \
+ default: ERROR(EMTP_PARSE_ERROR); \
+ } \
+ (UI) = ui_; \
+} while (0)
+
+
+#if HAVE_INT_64
+#define GET_VSZ_UIMAX(UI, BP, MSB) \
+do { \
+ usgnd_int_64 ui64_; \
+ GET_VSZ_UI64(ui64_, (BP), (MSB)); \
+ (UI) = (usgnd_int_max) ui64_; \
+} while (0)
+#else
+#define GET_VSZ_UIMAX(UI, BP, MSB) \
+do { \
+ usgnd_int_32 ui32_; \
+ GET_VSZ_UI32(ui32_, (BP), (MSB)); \
+ (UI) = (usgnd_int_max) ui32_; \
+} while (0)
+#endif
+
+
+
+#define INC_TIME(C_SECS, C_USECS, SECS, USECS) \
+do { \
+ if ((USECS) >= 1000000) \
+ ERROR(EMTP_PARSE_ERROR); \
+ (C_SECS) += (SECS); \
+ (C_USECS) += (USECS); \
+ if ((C_USECS) >= 1000000) { \
+ (C_USECS) -= 1000000; \
+ (C_SECS)++; \
+ } \
+} while (0)
+
+#if PRINT_ERROR_ORIGIN
+#include <stdio.h>
+#define ERROR(E) \
+do { \
+ result = (E); \
+ fprintf(stderr,"ERROR:%s:%d: result=%d\n",__FILE__,__LINE__,result);\
+ statep->force_return = 1; abort(); \
+ goto restore_return; \
+} while (0)
+#else
+#define ERROR(E) do { \
+ result = (E); \
+ statep->force_return = 1; \
+ goto restore_return; \
+} while (0)
+#endif
+
+#define NEED(NSZ, TSZ) \
+do { \
+ sgnd_int_32 need_ = (NSZ); \
+ if (need_ > (TSZ)) { \
+ statep->known_need = 1; \
+ statep->fetch_size = need_; \
+ result = EMTP_NEED_MORE_TRACE; \
+ goto restore_return; \
+ } \
+} while (0)
+
+#define NEED_AT_LEAST(NSZ, FSZ, TSZ) \
+do { \
+ sgnd_int_32 need_ = (NSZ); \
+ ASSERT(need_ <= (FSZ)); \
+ if (need_ > (TSZ)) { \
+ statep->known_need = 0; \
+ statep->fetch_size = (FSZ); \
+ result = EMTP_NEED_MORE_TRACE; \
+ goto restore_return; \
+ } \
+} while (0)
+
+
+#define SECS_PER_DAY (60*60*24)
+#define IS_LEAP_YEAR(X) (((X) % 4 == 0 && (X) % 100 != 0) || (X) % 400 == 0)
+
+static void
+set_start_time(emtp_state *state,
+ usgnd_int_32 giga_seconds,
+ usgnd_int_32 seconds,
+ usgnd_int_32 micro_seconds)
+{
+ /* Input is elapsed time since 1970-01-01 00:00.000000 (UTC) */
+
+ usgnd_int_32 year, days_of_this_year, days, secs, month;
+ usgnd_int_32 days_of_month[] = {0,31,28,31,30,31,30,31,31,30,31,30,31};
+
+ days = 1000000000 / SECS_PER_DAY;
+ secs = 1000000000 % SECS_PER_DAY;
+ days *= giga_seconds;
+ secs *= giga_seconds;
+ secs += seconds;
+ days += secs / SECS_PER_DAY;
+ secs %= SECS_PER_DAY;
+ days++;
+
+ year = 1969;
+ days_of_this_year = 0;
+ while (days > days_of_this_year) {
+ days -= days_of_this_year;
+ year++;
+ days_of_this_year = 365 + (IS_LEAP_YEAR(year) ? 1 : 0);
+ }
+
+ for (month = 1; month <= 12; month++) {
+ usgnd_int_32 days_of_this_month = days_of_month[month];
+ if (month == 2 && IS_LEAP_YEAR(year))
+ days_of_this_month++;
+ if (days <= days_of_this_month)
+ break;
+ days -= days_of_this_month;
+ }
+
+ state->start_time.year = year;
+ state->start_time.month = month;
+ state->start_time.day = days;
+ state->start_time.hour = secs / (60*60);
+ secs %= 60*60;
+ state->start_time.minute = secs / 60;
+ state->start_time.second = secs % 60;
+ state->start_time.micro_second = micro_seconds;
+}
+
+static int
+parse_v1_body(emtp_state *statep,
+ usgnd_int_8 **tracepp, usgnd_int_8 *trace_endp,
+ emtp_operation **op_pp, emtp_operation *op_endp, size_t op_size)
+{
+ /* "cache" some frequently used values */
+ register usgnd_int_8 *c_p = *tracepp;
+ register emtp_operation *op_p = *op_pp;
+ register usgnd_int_32 current_secs = statep->time.secs;
+ register usgnd_int_32 current_usecs = statep->time.usecs;
+
+ sgnd_int_32 trace_size = trace_endp - c_p;
+ usgnd_int_8 *tracep = c_p;
+ int result = 0;
+
+ usgnd_int_16 max_block_type = statep->max_block_type_ix;
+
+ while (trace_size >= UI16_SZ) {
+ usgnd_int_16 ehdr, tag;
+ unsigned time_inc_msb;
+
+ GET_UI16(ehdr, c_p);
+ tag = ehdr & ERTS_MT_TAG_EHDR_FLD_MSK;
+ switch (tag) {
+ case ERTS_MT_V1_ALLOC_TAG:
+
+ op_p->type = EMTP_ALLOC;
+
+ alloc_common: {
+ usgnd_int_16 block_type;
+ unsigned block_type_msb, new_ptr_msb, new_size_msb;
+
+ ehdr >>= ERTS_MT_TAG_EHDR_FLD_SZ;
+ block_type_msb = ehdr & ERTS_MT_UI16_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI16_MSB_EHDR_FLD_SZ;
+ new_ptr_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ new_size_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI16_SZ
+ + 4
+ + block_type_msb
+ + new_ptr_msb
+ + new_size_msb
+ + time_inc_msb,
+ trace_size);
+
+ GET_VSZ_UI16(block_type, c_p, block_type_msb);
+ if (block_type > max_block_type)
+ ERROR(EMTP_PARSE_ERROR);
+ op_p->u.block.type = (int) block_type;
+
+ GET_VSZ_UIMAX(op_p->u.block.new_ptr, c_p, new_ptr_msb);
+ GET_VSZ_UIMAX(op_p->u.block.new_size, c_p, new_size_msb);
+
+ op_p->u.block.prev_ptr = 0;
+ }
+
+ read_time_inc: {
+ usgnd_int_32 secs, usecs, time_inc;
+
+ GET_VSZ_UI32(time_inc, c_p, time_inc_msb);
+
+ secs = ((time_inc >> ERTS_MT_TIME_INC_SECS_SHIFT)
+ & ERTS_MT_TIME_INC_SECS_MASK);
+ usecs = ((time_inc >> ERTS_MT_TIME_INC_USECS_SHIFT)
+ & ERTS_MT_TIME_INC_USECS_MASK);
+
+ INC_TIME(current_secs, current_usecs, secs, usecs);
+
+ op_p->time.secs = current_secs;
+ op_p->time.usecs = current_usecs;
+
+#if PRINT_PARSED_OP
+ print_op(op_p);
+#endif
+
+ op_p = (emtp_operation *) (((char *) op_p) + op_size);
+ break;
+ }
+
+ case ERTS_MT_V1_REALLOC_NPB_TAG:
+ op_p->type = EMTP_REALLOC;
+ goto alloc_common;
+
+ case ERTS_MT_V1_REALLOC_MV_TAG: {
+ unsigned new_ptr_msb, prev_ptr_msb, new_size_msb;
+
+ op_p->type = EMTP_REALLOC;
+
+ ehdr >>= ERTS_MT_TAG_EHDR_FLD_SZ;
+ new_ptr_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ prev_ptr_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ new_size_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI16_SZ
+ + 4
+ + new_ptr_msb
+ + prev_ptr_msb
+ + new_size_msb
+ + time_inc_msb,
+ trace_size);
+
+ GET_VSZ_UIMAX(op_p->u.block.new_ptr, c_p, new_ptr_msb);
+ GET_VSZ_UIMAX(op_p->u.block.prev_ptr, c_p, prev_ptr_msb);
+ GET_VSZ_UIMAX(op_p->u.block.new_size, c_p, new_size_msb);
+
+ op_p->u.block.type = UNKNOWN_BLOCK_TYPE_IX;
+ goto read_time_inc;
+ }
+
+ case ERTS_MT_V1_REALLOC_NMV_TAG: {
+ usgnd_int_max new_ptr;
+ unsigned new_ptr_msb, new_size_msb;
+
+ op_p->type = EMTP_REALLOC;
+
+ ehdr >>= ERTS_MT_TAG_EHDR_FLD_SZ;
+ new_ptr_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ new_size_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI16_SZ
+ + 3
+ + new_ptr_msb
+ + new_size_msb
+ + time_inc_msb,
+ trace_size);
+
+ GET_VSZ_UIMAX(new_ptr, c_p, new_ptr_msb);
+ GET_VSZ_UIMAX(op_p->u.block.new_size, c_p, new_size_msb);
+
+ op_p->u.block.new_ptr = new_ptr;
+ op_p->u.block.prev_ptr = new_ptr;
+
+ op_p->u.block.type = UNKNOWN_BLOCK_TYPE_IX;
+ goto read_time_inc;
+ }
+
+ case ERTS_MT_V1_FREE_TAG: {
+ unsigned prev_ptr_msb;
+
+ op_p->type = EMTP_FREE;
+
+ ehdr >>= ERTS_MT_TAG_EHDR_FLD_SZ;
+ prev_ptr_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI16_SZ
+ + 2
+ + prev_ptr_msb
+ + time_inc_msb,
+ trace_size);
+
+ GET_VSZ_UIMAX(op_p->u.block.prev_ptr, c_p, prev_ptr_msb);
+
+ op_p->u.block.new_ptr = 0;
+ op_p->u.block.new_size = 0;
+
+ op_p->u.block.type = UNKNOWN_BLOCK_TYPE_IX;
+ goto read_time_inc;
+ }
+
+ case ERTS_MT_V1_TIME_INC_TAG: {
+ unsigned secs_msb, usecs_msb;
+ usgnd_int_32 secs, usecs;
+
+ ehdr >>= ERTS_MT_TAG_EHDR_FLD_SZ;
+
+ secs_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI32_MSB_EHDR_FLD_SZ;
+
+ usecs_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI16_SZ + 2 + secs_msb + usecs_msb, trace_size);
+
+ GET_VSZ_UI32(secs, c_p, secs_msb);
+ GET_VSZ_UI32(usecs, c_p, usecs_msb);
+
+ INC_TIME(current_secs, current_usecs, secs, usecs);
+
+ break;
+ }
+
+ case ERTS_MT_V1_STOP_TAG:
+
+ op_p->type = EMTP_STOP;
+
+ ehdr >>= ERTS_MT_TAG_EHDR_FLD_SZ;
+
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI16_SZ + 1 + time_inc_msb, trace_size);
+
+ goto read_ending_time_inc;
+
+ case ERTS_MT_V1_EXIT_TAG: {
+ unsigned exit_status_msb;
+
+ op_p->type = EMTP_EXIT;
+
+ ehdr >>= ERTS_MT_TAG_EHDR_FLD_SZ;
+ exit_status_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI32_MSB_EHDR_FLD_SZ;
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI16_SZ + 2 + exit_status_msb + time_inc_msb,
+ trace_size);
+
+ GET_VSZ_UI32(op_p->u.exit_status, c_p, exit_status_msb);
+
+ read_ending_time_inc: {
+ usgnd_int_32 secs, usecs, time_inc;
+
+ GET_VSZ_UI32(time_inc, c_p, time_inc_msb);
+
+ secs = ((time_inc >> ERTS_MT_TIME_INC_SECS_SHIFT)
+ & ERTS_MT_TIME_INC_SECS_MASK);
+ usecs = ((time_inc >> ERTS_MT_TIME_INC_USECS_SHIFT)
+ & ERTS_MT_TIME_INC_USECS_MASK);
+
+ INC_TIME(current_secs, current_usecs, secs, usecs);
+
+ op_p->time.secs = current_secs;
+ op_p->time.usecs = current_usecs;
+
+#if PRINT_PARSED_OP
+ print_op(op_p);
+#endif
+
+ op_p = (emtp_operation *) (((char *) op_p) + op_size);
+ statep->force_return = 1;
+ statep->progress = EMTP_PROGRESS_ENDED;
+
+ tracep = c_p;
+ trace_size = trace_endp - tracep;
+ result = (trace_size
+ ? EMTP_END_OF_TRACE_GARBAGE_FOLLOWS
+ : EMTP_END_OF_TRACE);
+ goto restore_return;
+ }
+ }
+
+ case ERTS_MT_V1_ALLOCATOR_TAG:
+ case ERTS_MT_V1_BLOCK_TYPE_TAG:
+
+#ifdef DEBUG
+ hexdump(tracep, trace_endp);
+#endif
+ ERROR(EMTP_HEADER_TAG_IN_BODY_ERROR);
+
+ default:
+
+#ifdef DEBUG
+ hexdump(tracep, trace_endp);
+#endif
+ ERROR(EMTP_UNKNOWN_TAG_ERROR);
+ }
+
+ tracep = c_p;
+ trace_size = trace_endp - tracep;
+
+ if (op_p >= op_endp) {
+ statep->force_return = 1;
+ result = EMTP_ALL_OPS_FILLED;
+ goto restore_return;
+ }
+ }
+
+ statep->known_need = 0;
+ statep->fetch_size = ERTS_MT_MAX_V1_BODY_ENTRY_SIZE;
+
+ result = EMTP_NEED_MORE_TRACE;
+
+ restore_return:
+ *tracepp = tracep;
+ *op_pp = op_p;
+ statep->time.secs = current_secs;
+ statep->time.usecs = current_usecs;
+
+ return result;
+}
+
+#define GET_ALLOC_MSBS(EHDR, BT, NP, NS, TI) \
+do { \
+ (BT) = (EHDR) & ERTS_MT_UI16_MSB_EHDR_FLD_MSK; \
+ (EHDR) >>= ERTS_MT_UI16_MSB_EHDR_FLD_SZ; \
+ (NP) = (EHDR) & ERTS_MT_UI_MSB_EHDR_FLD_MSK; \
+ (EHDR) >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ; \
+ (NS) = (EHDR) & ERTS_MT_UI_MSB_EHDR_FLD_MSK; \
+ (EHDR) >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ; \
+ (TI) = (EHDR) & ERTS_MT_UI32_MSB_EHDR_FLD_MSK; \
+} while (0)
+
+
+static EMTP_INLINE int
+parse_v2_body(emtp_state *statep,
+ usgnd_int_8 **tracepp, usgnd_int_8 *trace_endp,
+ emtp_operation **op_pp, emtp_operation *op_endp, size_t op_size)
+{
+ /* "cache" some frequently used values */
+ register usgnd_int_8 *c_p = *tracepp;
+ register emtp_operation *op_p = *op_pp;
+ register usgnd_int_32 current_secs = statep->time.secs;
+ register usgnd_int_32 current_usecs = statep->time.usecs;
+
+ sgnd_int_32 trace_size = trace_endp - c_p;
+ usgnd_int_8 *tracep = c_p;
+ int result = 0;
+
+ while (trace_size >= UI8_SZ + UI16_SZ) {
+ usgnd_int_8 tag;
+ usgnd_int_16 ehdr;
+ unsigned time_inc_msb;
+
+ tag = *(c_p++);
+
+ GET_UI16(ehdr, c_p);
+
+ switch (tag) {
+
+ case ERTS_MT_CRR_ALLOC_BDY_TAG: {
+ usgnd_int_16 type;
+ unsigned carrier_bytes, carrier_type_msb, block_type_msb,
+ new_ptr_msb, new_size_msb;
+
+ op_p->type = EMTP_CARRIER_ALLOC;
+
+ carrier_type_msb = ehdr & ERTS_MT_UI16_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI16_MSB_EHDR_FLD_SZ;
+
+ if (trace_size < ERTS_MT_MAX_CRR_ALLOC_SIZE)
+ NEED_AT_LEAST(UI8_SZ + UI16_SZ + 1 + carrier_type_msb,
+ ERTS_MT_MAX_CRR_ALLOC_SIZE,
+ trace_size);
+
+ GET_VSZ_UI16(type, c_p, carrier_type_msb);
+ op_p->u.block.carrier_type = (int) type;
+
+ carrier_bytes = carrier_type_msb + 1;
+ goto alloc_common;
+
+ case ERTS_MT_ALLOC_BDY_TAG:
+
+ op_p->type = EMTP_ALLOC;
+ carrier_bytes = 0;
+
+ alloc_common:
+ block_type_msb = ehdr & ERTS_MT_UI16_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI16_MSB_EHDR_FLD_SZ;
+ new_ptr_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ new_size_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ if (trace_size < ERTS_MT_MAX_CRR_ALLOC_SIZE)
+ NEED(UI8_SZ
+ + UI16_SZ
+ + 4
+ + carrier_bytes
+ + block_type_msb
+ + new_ptr_msb
+ + new_size_msb
+ + time_inc_msb,
+ trace_size);
+
+ GET_VSZ_UI16(type, c_p, block_type_msb);
+ op_p->u.block.type = (int) type;
+
+ GET_VSZ_UIMAX(op_p->u.block.new_ptr, c_p, new_ptr_msb);
+ GET_VSZ_UIMAX(op_p->u.block.new_size, c_p, new_size_msb);
+
+ op_p->u.block.prev_ptr = 0;
+ }
+
+ read_time_inc: {
+ usgnd_int_32 secs, usecs, time_inc;
+
+ GET_VSZ_UI32(time_inc, c_p, time_inc_msb);
+
+ secs = ((time_inc >> ERTS_MT_TIME_INC_SECS_SHIFT)
+ & ERTS_MT_TIME_INC_SECS_MASK);
+ usecs = ((time_inc >> ERTS_MT_TIME_INC_USECS_SHIFT)
+ & ERTS_MT_TIME_INC_USECS_MASK);
+
+ INC_TIME(current_secs, current_usecs, secs, usecs);
+
+ op_p->time.secs = current_secs;
+ op_p->time.usecs = current_usecs;
+
+#if PRINT_PARSED_OP
+ print_op(op_p);
+#endif
+
+ op_p = (emtp_operation *) (((char *) op_p) + op_size);
+ break;
+ }
+
+ case ERTS_MT_CRR_REALLOC_BDY_TAG: {
+ usgnd_int_16 type;
+ unsigned carrier_bytes, carrier_type_msb, block_type_msb,
+ new_ptr_msb, prev_ptr_msb, new_size_msb;
+
+ op_p->type = EMTP_CARRIER_REALLOC;
+
+ carrier_type_msb = ehdr & ERTS_MT_UI16_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI16_MSB_EHDR_FLD_SZ;
+
+ if (trace_size < ERTS_MT_MAX_CRR_REALLOC_SIZE)
+ NEED_AT_LEAST(UI8_SZ + UI16_SZ + 1 + carrier_type_msb,
+ ERTS_MT_MAX_CRR_REALLOC_SIZE,
+ trace_size);
+
+ GET_VSZ_UI16(type, c_p, carrier_type_msb);
+ op_p->u.block.carrier_type = (int) type;
+
+ carrier_bytes = carrier_type_msb + 1;
+ goto realloc_common;
+
+ case ERTS_MT_REALLOC_BDY_TAG:
+
+ op_p->type = EMTP_REALLOC;
+ carrier_bytes = 0;
+
+ realloc_common:
+
+ block_type_msb = ehdr & ERTS_MT_UI16_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI16_MSB_EHDR_FLD_SZ;
+ new_ptr_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ prev_ptr_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ new_size_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ if (trace_size < ERTS_MT_MAX_CRR_REALLOC_SIZE)
+ NEED(UI8_SZ
+ + UI16_SZ
+ + 5
+ + carrier_bytes
+ + block_type_msb
+ + new_ptr_msb
+ + prev_ptr_msb
+ + new_size_msb
+ + time_inc_msb,
+ trace_size);
+
+ GET_VSZ_UI16(op_p->u.block.type, c_p, block_type_msb);
+ GET_VSZ_UIMAX(op_p->u.block.new_ptr, c_p, new_ptr_msb);
+ GET_VSZ_UIMAX(op_p->u.block.prev_ptr, c_p, prev_ptr_msb);
+ GET_VSZ_UIMAX(op_p->u.block.new_size, c_p, new_size_msb);
+
+ goto read_time_inc;
+ }
+
+ case ERTS_MT_CRR_FREE_BDY_TAG: {
+ usgnd_int_16 type;
+ unsigned carrier_bytes, carrier_type_msb, block_type_msb,
+ prev_ptr_msb;
+
+ op_p->type = EMTP_CARRIER_FREE;
+
+ carrier_type_msb = ehdr & ERTS_MT_UI16_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI16_MSB_EHDR_FLD_SZ;
+
+ if (trace_size < ERTS_MT_MAX_CRR_FREE_SIZE)
+ NEED_AT_LEAST(UI8_SZ + UI16_SZ + 1 + carrier_type_msb,
+ ERTS_MT_MAX_CRR_FREE_SIZE,
+ trace_size);
+
+ GET_VSZ_UI16(type, c_p, carrier_type_msb);
+ op_p->u.block.carrier_type = (int) type;
+
+ carrier_bytes = carrier_type_msb + 1;
+ goto free_common;
+
+ case ERTS_MT_FREE_BDY_TAG:
+
+ op_p->type = EMTP_FREE;
+ carrier_bytes = 0;
+
+ free_common:
+
+ block_type_msb = ehdr & ERTS_MT_UI16_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI16_MSB_EHDR_FLD_SZ;
+ prev_ptr_msb = ehdr & ERTS_MT_UI_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI_MSB_EHDR_FLD_SZ;
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ if (trace_size < ERTS_MT_MAX_CRR_FREE_SIZE)
+ NEED(UI8_SZ
+ + UI16_SZ
+ + 3
+ + carrier_bytes
+ + block_type_msb
+ + prev_ptr_msb
+ + time_inc_msb,
+ trace_size);
+
+ GET_VSZ_UI16(op_p->u.block.type, c_p, block_type_msb);
+ GET_VSZ_UIMAX(op_p->u.block.prev_ptr, c_p, prev_ptr_msb);
+
+ op_p->u.block.new_ptr = 0;
+ op_p->u.block.new_size = 0;
+
+ goto read_time_inc;
+ }
+
+ case ERTS_MT_TIME_INC_BDY_TAG: {
+ unsigned secs_msb, usecs_msb;
+ usgnd_int_32 secs, usecs;
+
+ secs_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI32_MSB_EHDR_FLD_SZ;
+ usecs_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI8_SZ + UI16_SZ + 2 + secs_msb + usecs_msb, trace_size);
+
+ GET_VSZ_UI32(secs, c_p, secs_msb);
+ GET_VSZ_UI32(usecs, c_p, usecs_msb);
+
+ INC_TIME(current_secs, current_usecs, secs, usecs);
+
+ break;
+ }
+
+ case ERTS_MT_STOP_BDY_TAG:
+
+ op_p->type = EMTP_STOP;
+
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI16_SZ + 1 + time_inc_msb, trace_size);
+
+ goto read_ending_time_inc;
+
+ case ERTS_MT_EXIT_BDY_TAG: {
+ unsigned exit_status_msb;
+
+ op_p->type = EMTP_EXIT;
+
+ exit_status_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+ ehdr >>= ERTS_MT_UI32_MSB_EHDR_FLD_SZ;
+ time_inc_msb = ehdr & ERTS_MT_UI32_MSB_EHDR_FLD_MSK;
+
+ NEED(UI16_SZ + 2 + exit_status_msb + time_inc_msb, trace_size);
+
+ GET_VSZ_UI32(op_p->u.exit_status, c_p, exit_status_msb);
+
+ read_ending_time_inc: {
+ usgnd_int_32 secs, usecs, time_inc;
+
+ GET_VSZ_UI32(time_inc, c_p, time_inc_msb);
+
+ secs = ((time_inc >> ERTS_MT_TIME_INC_SECS_SHIFT)
+ & ERTS_MT_TIME_INC_SECS_MASK);
+ usecs = ((time_inc >> ERTS_MT_TIME_INC_USECS_SHIFT)
+ & ERTS_MT_TIME_INC_USECS_MASK);
+
+ INC_TIME(current_secs, current_usecs, secs, usecs);
+
+ op_p->time.secs = current_secs;
+ op_p->time.usecs = current_usecs;
+
+#if PRINT_PARSED_OP
+ print_op(op_p);
+#endif
+
+ op_p = (emtp_operation *) (((char *) op_p) + op_size);
+ statep->force_return = 1;
+ statep->progress = EMTP_PROGRESS_ENDED;
+
+ tracep = c_p;
+ trace_size = trace_endp - tracep;
+ result = (trace_size
+ ? EMTP_END_OF_TRACE_GARBAGE_FOLLOWS
+ : EMTP_END_OF_TRACE);
+ goto restore_return;
+ }
+ }
+
+ case ERTS_MT_X_BDY_TAG: {
+ /* X for extension
+ * ehdr contains total size of entry
+ *
+ * Entry should at least consist of tag (1 byte),
+ * total size (2 bytes) and subtag (1 byte).
+ */
+ if (ehdr < UI8_SZ + UI16_SZ + UI8_SZ)
+ ERROR(EMTP_PARSE_ERROR);
+ NEED(ehdr, trace_size);
+ c_p = tracep + ehdr; /* No subtags known yet skip entry... */
+ break;
+ }
+
+ default:
+#ifdef DEBUG
+ hexdump(c_p-2, trace_endp);
+#endif
+ ERROR(EMTP_UNKNOWN_TAG_ERROR);
+ }
+
+ tracep = c_p;
+ trace_size = trace_endp - tracep;
+
+ if (op_p >= op_endp) {
+ statep->force_return = 1;
+ result = EMTP_ALL_OPS_FILLED;
+ goto restore_return;
+ }
+ }
+
+ statep->known_need = 0;
+ statep->fetch_size = ERTS_MT_MAX_BODY_ENTRY_SIZE;
+
+ result = EMTP_NEED_MORE_TRACE;
+
+ restore_return:
+ *tracepp = tracep;
+ *op_pp = op_p;
+ statep->time.secs = current_secs;
+ statep->time.usecs = current_usecs;
+
+ return result;
+}
+
+static void
+remove_unused_allocators(emtp_state *statep)
+{
+ emtp_allocator *allctr;
+ sgnd_int_32 i, j, k;
+ for (i = -1; i <= statep->max_block_type_ix; i++) {
+ if (statep->block_type[i]->valid) {
+ allctr = statep->allocator[statep->block_type[i]->allocator];
+ if (allctr->name != unknown_allocator)
+ allctr->valid = 1;
+ }
+ }
+ for (i = -1; i <= statep->max_allocator_ix; i++) {
+ allctr = statep->allocator[i];
+ if (allctr->valid && allctr->carrier.provider) {
+ for (j = 0; j < allctr->carrier.no_providers; j++) {
+ k = allctr->carrier.provider[j];
+ if (statep->allocator[k]->name != unknown_allocator)
+ statep->allocator[k]->valid = 1;
+ }
+ }
+ }
+ for (i = -1; i <= statep->max_allocator_ix; i++) {
+ allctr = statep->allocator[i];
+ if (!allctr->valid) {
+ allctr->flags = 0;
+ if (allctr->name != unknown_allocator) {
+ (*statep->free)((void *) allctr->name);
+ allctr->name = unknown_allocator;
+ }
+ allctr->carrier.no_providers = 0;
+ if (allctr->carrier.provider) {
+ (*statep->free)((void *) allctr->carrier.provider);
+ }
+ }
+ }
+}
+
+static int
+parse_header(emtp_state *statep,
+ usgnd_int_8 **tracepp, usgnd_int_8 *trace_endp)
+{
+ sgnd_int_32 trace_size;
+ usgnd_int_8 *tracep;
+ int i, result;
+
+ tracep = *tracepp;
+
+ switch (statep->progress) {
+ case EMTP_PROGRESS_PARSE_HDR_VSN: {
+ usgnd_int_32 start_word;
+
+ trace_size = trace_endp - tracep;
+ NEED(3*UI32_SZ, trace_size);
+
+ GET_UI32(start_word, tracep);
+ if (start_word != ERTS_MT_START_WORD)
+ return EMTP_NOT_AN_ERL_MTRACE_ERROR;
+
+ GET_UI32(statep->version.major, tracep);
+ GET_UI32(statep->version.minor, tracep);
+
+ statep->progress = EMTP_PROGRESS_PARSE_HDR_PROLOG;
+ }
+ case EMTP_PROGRESS_PARSE_HDR_PROLOG:
+
+ trace_size = trace_endp - tracep;
+
+ switch (statep->version.major) {
+ case 1: {
+ usgnd_int_32 hdr_sz;
+ NEED(2*UI32_SZ + 2*UI16_SZ, trace_size);
+
+ GET_UI32(statep->flags, tracep);
+ GET_UI32(hdr_sz, tracep); /* ignore this; may contain garbage! */
+ GET_UI16(statep->max_allocator_ix, tracep);
+ GET_UI16(statep->max_block_type_ix, tracep);
+
+ statep->parse_body_func = parse_v1_body;
+
+ break;
+ }
+ case 2: {
+ usgnd_int_32 giga_seconds;
+ usgnd_int_32 seconds;
+ usgnd_int_32 micro_seconds;
+ usgnd_int_8 len;
+ usgnd_int_8 *hdr_prolog_start;
+ usgnd_int_32 hdr_prolog_sz;
+ NEED(UI32_SZ, trace_size);
+ hdr_prolog_start = tracep;
+ GET_UI32(hdr_prolog_sz, tracep);
+ NEED(hdr_prolog_sz - UI32_SZ, trace_size);
+
+ GET_UI32(statep->flags, tracep);
+ GET_UI16(statep->segment_ix, tracep);
+ GET_UI16(statep->max_allocator_ix, tracep);
+ GET_UI16(statep->max_block_type_ix, tracep);
+
+ GET_UI32(giga_seconds, tracep);
+ GET_UI32(seconds, tracep);
+ GET_UI32(micro_seconds, tracep);
+
+ set_start_time(statep, giga_seconds, seconds, micro_seconds);
+
+ GET_UI8(len, tracep);
+ memcpy((void *) statep->nodename, (void *) tracep, (size_t) len);
+ statep->nodename[len] = '\0';
+ tracep += len;
+
+ GET_UI8(len, tracep);
+ memcpy((void *) statep->hostname, (void *) tracep, (size_t) len);
+ statep->hostname[len] = '\0';
+ tracep += len;
+
+ GET_UI8(len, tracep);
+ memcpy((void *) statep->pid, (void *) tracep, (size_t) len);
+ statep->pid[len] = '\0';
+ tracep += len;
+
+
+
+ /* Skip things in header prolog we dont know about */
+ tracep = hdr_prolog_start + hdr_prolog_sz;
+
+#if EMTP_CAN_INLINE
+ statep->parse_body_func = NULL;
+#else
+ statep->parse_body_func = parse_v2_body;
+#endif
+
+ break;
+ }
+ default:
+ return EMTP_NOT_SUPPORTED_MTRACE_VERSION_ERROR;
+ }
+
+ statep->progress = EMTP_PROGRESS_ALLOC_HDR_INFO;
+
+ case EMTP_PROGRESS_ALLOC_HDR_INFO:
+
+ /* Allocator info */
+ if (!statep->allocator) {
+ statep->allocator = (emtp_allocator **)
+ (*statep->alloc)((statep->max_allocator_ix + 2)
+ * sizeof(emtp_allocator *));
+ if (!statep->allocator)
+ ERROR(EMTP_NO_MEMORY_ERROR);
+ statep->allocator++;
+ for (i = -1; i <= statep->max_allocator_ix; i++)
+ statep->allocator[i] = NULL;
+ for (i = -1; i <= statep->max_allocator_ix; i++) {
+ statep->allocator[i] = (emtp_allocator *)
+ (*statep->alloc)(sizeof(emtp_allocator));
+ if (!statep->allocator[i])
+ ERROR(EMTP_NO_MEMORY_ERROR);
+ statep->allocator[i]->valid = 0;
+ statep->allocator[i]->flags = 0;
+ statep->allocator[i]->name = unknown_allocator;
+ statep->allocator[i]->carrier.no_providers = 0;
+ statep->allocator[i]->carrier.provider = NULL;
+ }
+
+ }
+
+ /* Block type info */
+ if (!statep->block_type) {
+ statep->block_type = (emtp_block_type **)
+ (*statep->alloc)((statep->max_block_type_ix + 2)
+ * sizeof(emtp_block_type *));
+ if (!statep->block_type)
+ ERROR(EMTP_NO_MEMORY_ERROR);
+ statep->block_type++;
+ for (i = -1; i <= statep->max_block_type_ix; i++)
+ statep->block_type[i] = NULL;
+ for (i = -1; i <= statep->max_block_type_ix; i++) {
+ statep->block_type[i] = (emtp_block_type *)
+ (*statep->alloc)(sizeof(emtp_block_type));
+ if (!statep->block_type[i])
+ ERROR(EMTP_NO_MEMORY_ERROR);
+ statep->block_type[i]->valid = 0;
+ statep->block_type[i]->flags = 0;
+ statep->block_type[i]->name = unknown_block_type;
+ statep->block_type[i]->allocator = UNKNOWN_ALLOCATOR_IX;
+ }
+
+ }
+
+ statep->progress = EMTP_PROGRESS_PARSE_TAGGED_HDR;
+
+ case EMTP_PROGRESS_PARSE_TAGGED_HDR: {
+ usgnd_int_8 *c_p = tracep;
+ trace_size = trace_endp - tracep;
+
+ switch (statep->version.major) {
+ case 1: /* Version 1.X ---------------------------------------------- */
+
+ while (trace_size >= UI16_SZ) {
+ size_t str_len;
+ usgnd_int_16 ehdr;
+
+ GET_UI16(ehdr, c_p);
+
+ switch (ehdr & ERTS_MT_TAG_EHDR_FLD_MSK) {
+ case ERTS_MT_V1_ALLOCATOR_TAG: {
+ usgnd_int_16 a_ix;
+
+ NEED_AT_LEAST(2*UI16_SZ + UI8_SZ,
+ ERTS_MT_MAX_HEADER_ENTRY_SIZE,
+ trace_size);
+
+ GET_UI16(a_ix, c_p);
+ if (a_ix > statep->max_allocator_ix)
+ ERROR(EMTP_PARSE_ERROR);
+
+ GET_UI8(str_len, c_p);
+
+ NEED(2*UI16_SZ + UI8_SZ + str_len, trace_size);
+
+ statep->allocator[a_ix]->name
+ = (char *) (*statep->alloc)(str_len + 1);
+ if (!statep->allocator[a_ix]->name)
+ ERROR(EMTP_NO_MEMORY_ERROR);
+
+ memcpy((void *) statep->allocator[a_ix]->name,
+ (void *) c_p,
+ str_len);
+ c_p += str_len;
+
+ statep->allocator[a_ix]->name[str_len] = '\0';
+ break;
+ }
+ case ERTS_MT_V1_BLOCK_TYPE_TAG: {
+ usgnd_int_16 bt_ix, a_ix;
+
+ NEED_AT_LEAST(2*UI16_SZ + UI8_SZ,
+ ERTS_MT_MAX_HEADER_ENTRY_SIZE,
+ trace_size);
+
+ GET_UI16(bt_ix, c_p);
+ if (bt_ix > statep->max_block_type_ix)
+ ERROR(EMTP_PARSE_ERROR);
+
+ GET_UI8(str_len, c_p);
+
+ NEED(2*UI16_SZ + UI8_SZ + str_len + UI16_SZ, trace_size);
+
+ statep->block_type[bt_ix]->name
+ = (char *) (*statep->alloc)(str_len + 1);
+
+ if (!statep->block_type[bt_ix]->name)
+ ERROR(EMTP_NO_MEMORY_ERROR);
+
+ memcpy((void *) statep->block_type[bt_ix]->name,
+ (void *) c_p,
+ str_len);
+ c_p += str_len;
+
+ statep->block_type[bt_ix]->name[str_len] = '\0';
+
+ GET_UI16(a_ix, c_p);
+
+ if (a_ix > statep->max_allocator_ix)
+ ERROR(EMTP_PARSE_ERROR);
+
+ statep->block_type[bt_ix]->allocator = (sgnd_int_32) a_ix;
+ statep->block_type[bt_ix]->valid = 1;
+ break;
+ }
+
+ case ERTS_MT_V1_ALLOC_TAG:
+ case ERTS_MT_V1_REALLOC_NPB_TAG:
+ case ERTS_MT_V1_REALLOC_MV_TAG:
+ case ERTS_MT_V1_REALLOC_NMV_TAG:
+ case ERTS_MT_V1_FREE_TAG:
+ case ERTS_MT_V1_TIME_INC_TAG:
+ case ERTS_MT_V1_STOP_TAG:
+ case ERTS_MT_V1_EXIT_TAG:
+ remove_unused_allocators(statep);
+ statep->progress = EMTP_PROGRESS_PARSE_BODY;
+ result = EMTP_HEADER_PARSED;
+ statep->force_return = 1;
+ goto restore_return;
+ default:
+ ERROR(EMTP_UNKNOWN_TAG_ERROR);
+ }
+
+ tracep = c_p;
+ trace_size = trace_endp - tracep;
+ }
+
+ statep->fetch_size = ERTS_MT_MAX_V1_HEADER_ENTRY_SIZE;
+ break;
+
+ case 2: /* Version 2.X ---------------------------------------------- */
+
+ while (trace_size >= UI8_SZ + UI16_SZ) {
+ usgnd_int_16 entry_sz;
+ size_t str_len;
+ usgnd_int_8 tag;
+
+ GET_UI8(tag, c_p);
+ GET_UI16(entry_sz, c_p);
+ NEED(entry_sz, trace_size);
+
+ switch (tag) {
+ case ERTS_MT_ALLOCATOR_HDR_TAG: {
+ usgnd_int_8 crr_prvds;
+ usgnd_int_16 a_ix, aflgs;
+
+ if (entry_sz
+ < UI8_SZ + 3*UI16_SZ + UI8_SZ + 0 + UI8_SZ)
+ ERROR(EMTP_PARSE_ERROR);
+
+ GET_UI16(aflgs, c_p);
+ GET_UI16(a_ix, c_p);
+ if (a_ix > statep->max_allocator_ix)
+ ERROR(EMTP_PARSE_ERROR);
+
+ if (aflgs & ERTS_MT_ALLCTR_USD_CRR_INFO)
+ statep->allocator[a_ix]->flags
+ |= EMTP_ALLOCATOR_FLAG_HAVE_USED_CARRIERS_INFO;
+
+ GET_UI8(str_len, c_p);
+
+ if (entry_sz
+ < UI8_SZ + 3*UI16_SZ + UI8_SZ + str_len + UI8_SZ)
+ ERROR(EMTP_PARSE_ERROR);
+
+ statep->allocator[a_ix]->name
+ = (char *) (*statep->alloc)(str_len + 1);
+ if (!statep->allocator[a_ix]->name)
+ ERROR(EMTP_NO_MEMORY_ERROR);
+
+ memcpy((void *) statep->allocator[a_ix]->name,
+ (void *) c_p,
+ str_len);
+ c_p += str_len;
+
+ statep->allocator[a_ix]->name[str_len] = '\0';
+
+ GET_UI8(crr_prvds, c_p);
+ if (entry_sz < (UI8_SZ
+ + 3*UI16_SZ
+ + UI8_SZ
+ + str_len
+ + UI8_SZ
+ + crr_prvds*UI16_SZ))
+ ERROR(EMTP_PARSE_ERROR);
+ statep->allocator[a_ix]->carrier.no_providers
+ = (usgnd_int_16) crr_prvds;
+ statep->allocator[a_ix]->carrier.provider = (usgnd_int_16 *)
+ (*statep->alloc)(crr_prvds*sizeof(usgnd_int_16));
+ if (!statep->allocator[a_ix]->carrier.provider)
+ ERROR(EMTP_NO_MEMORY_ERROR);
+ for (i = 0; i < crr_prvds; i++) {
+ usgnd_int_16 cp_ix;
+ GET_UI16(cp_ix, c_p);
+ if (cp_ix > statep->max_allocator_ix)
+ ERROR(EMTP_PARSE_ERROR);
+ statep->allocator[a_ix]->carrier.provider[i] = cp_ix;
+ }
+
+ break;
+ }
+
+ case ERTS_MT_BLOCK_TYPE_HDR_TAG: {
+ usgnd_int_16 bt_ix, a_ix, btflgs;
+
+ if (entry_sz
+ < UI8_SZ + 3*UI16_SZ + UI8_SZ + 0 + UI16_SZ)
+ ERROR(EMTP_PARSE_ERROR);
+
+ GET_UI16(btflgs, c_p);
+ GET_UI16(bt_ix, c_p);
+ if (bt_ix > statep->max_block_type_ix)
+ ERROR(EMTP_PARSE_ERROR);
+
+ GET_UI8(str_len, c_p);
+
+ if (entry_sz
+ < UI8_SZ + 3*UI16_SZ + UI8_SZ + str_len + UI16_SZ)
+ ERROR(EMTP_PARSE_ERROR);
+
+ statep->block_type[bt_ix]->name
+ = (char *) (*statep->alloc)(str_len + 1);
+
+ if (!statep->block_type[bt_ix]->name)
+ ERROR(EMTP_NO_MEMORY_ERROR);
+
+ memcpy((void *) statep->block_type[bt_ix]->name,
+ (void *) c_p,
+ str_len);
+ c_p += str_len;
+
+ statep->block_type[bt_ix]->name[str_len] = '\0';
+
+ GET_UI16(a_ix, c_p);
+
+ if (a_ix > statep->max_allocator_ix)
+ ERROR(EMTP_PARSE_ERROR);
+
+ statep->block_type[bt_ix]->allocator = (sgnd_int_32) a_ix;
+ statep->block_type[bt_ix]->valid = 1;
+ break;
+ }
+
+ case ERTS_MT_END_OF_HDR_TAG:
+ tracep = tracep + ((size_t) entry_sz);
+ remove_unused_allocators(statep);
+ statep->progress = EMTP_PROGRESS_PARSE_BODY;
+ result = EMTP_HEADER_PARSED;
+ statep->force_return = 1;
+ goto restore_return;
+
+ default:
+ /* Skip tags that we do not understand. */
+ break;
+ }
+
+ tracep = tracep + ((size_t) entry_sz);
+ ASSERT(c_p <= tracep);
+ c_p = tracep;
+ trace_size = trace_endp - tracep;
+ }
+
+ statep->fetch_size = UI8_SZ + UI16_SZ;
+ break;
+ default: /* Not supported version --------------------------------- */
+ ASSERT(0);
+ }
+
+ break;
+ }
+ default:
+ ASSERT(0);
+ }
+
+ statep->known_need = 0;
+ result = EMTP_NEED_MORE_TRACE;
+
+ restore_return:
+
+ *tracepp = tracep;
+
+ return result;
+
+}
+
+
+int
+emtp_parse(emtp_state *statep,
+ usgnd_int_8 **tracepp, size_t *trace_lenp,
+ emtp_operation *op_start, size_t op_size, size_t *op_lenp)
+{
+ int result, have_all_in_overflow;
+ usgnd_int_8 *tracep, *trace_endp;
+ emtp_operation *op_p, *op_endp;
+
+
+ have_all_in_overflow = 0;
+
+ op_p = op_start;
+
+ if (!statep)
+ return EMTP_NO_MEMORY_ERROR;
+
+ if (!tracepp || !trace_lenp)
+ return EMTP_NO_TRACE_ERROR;
+
+ if (*trace_lenp <= 0) {
+ if (op_lenp)
+ *op_lenp = 0;
+ return EMTP_NEED_MORE_TRACE;
+ }
+
+ statep->force_return = 0;
+
+ if (statep->overflow_size) { /* Overflow from prevoius parse */
+ sgnd_int_32 tsz;
+ sgnd_int_32 sz;
+
+ fetch_for_overflow:
+ sz = statep->fetch_size - statep->overflow_size;
+ ASSERT(sz > 0);
+
+ if (*trace_lenp <= sz) {
+ have_all_in_overflow = 1;
+ sz = *trace_lenp;
+ }
+
+ if (sz > statep->overflow_buf_size) {
+ size_t buf_sz = statep->overflow_size + sz;
+ void *buf = (*statep->realloc)((void *) statep->overflow, buf_sz);
+ if (!buf)
+ return EMTP_NO_MEMORY_ERROR;
+ statep->overflow_buf_size = buf_sz;
+ statep->overflow = (usgnd_int_8 *) buf;
+ }
+
+ memcpy((void *) (statep->overflow + statep->overflow_size),
+ (void *) *tracepp,
+ sz);
+
+ tsz = statep->overflow_size + sz;
+
+ tracep = statep->overflow;
+ trace_endp = statep->overflow + tsz;
+
+ if (tsz < statep->fetch_size && statep->known_need) {
+ ASSERT(have_all_in_overflow);
+ statep->overflow_size = tsz;
+ op_endp = NULL;
+ result = EMTP_NEED_MORE_TRACE;
+ goto restore_return;
+ }
+ }
+ else {
+ tracep = *tracepp;
+ trace_endp = tracep + *trace_lenp;
+ }
+
+ if (statep->progress == EMTP_PROGRESS_PARSE_BODY) {
+
+#if !HAVE_INT_64
+ if (statep->flags & ERTS_MT_64_BIT_FLAG)
+ return EMTP_NOT_SUPPORTED_64_BITS_TRACE_ERROR;
+#endif
+
+ if (op_size < sizeof(emtp_operation))
+ return EMTP_BAD_OP_SIZE_ERROR;
+ if (!op_start || !op_lenp || *op_lenp < 1)
+ return EMTP_NO_OPERATIONS_ERROR;
+ op_endp = (emtp_operation *) (((char *) op_start) + (*op_lenp)*op_size);
+
+ restart_parse_body:
+#if EMTP_CAN_INLINE
+ if (statep->parse_body_func)
+#endif
+ result = (*statep->parse_body_func)(statep,
+ &tracep, trace_endp,
+ &op_p, op_endp, op_size);
+#if EMTP_CAN_INLINE
+ else
+ result = parse_v2_body(statep,
+ &tracep, trace_endp,
+ &op_p, op_endp, op_size);
+#endif
+ }
+ else {
+ restart_parse_header:
+ op_endp = NULL;
+ if (statep->progress == EMTP_PROGRESS_ENDED) {
+ result = EMTP_END_OF_TRACE;
+ goto restore_return;
+ }
+ result = parse_header(statep, &tracep, trace_endp);
+ }
+
+ /* Check overflow */
+ if (statep->overflow_size) {
+ if (tracep == statep->overflow) {
+ /* Nothing parsed, i.e. less new input than 1 entry :( */
+ if (!have_all_in_overflow)
+ goto fetch_for_overflow;
+ statep->overflow_size = trace_endp - tracep;
+ trace_endp = tracep = *tracepp + *trace_lenp;
+ }
+ else {
+ size_t sz = tracep - (statep->overflow + statep->overflow_size);
+
+ ASSERT(sz > 0);
+
+ statep->overflow_size = 0;
+
+ tracep = *tracepp + sz;
+ trace_endp = *tracepp + *trace_lenp;
+ ASSERT(trace_endp >= tracep);
+ if (!statep->force_return && (trace_endp - tracep)) {
+ if (statep->progress == EMTP_PROGRESS_PARSE_BODY)
+ goto restart_parse_body;
+ else
+ goto restart_parse_header;
+ }
+ /* else: got it all in the overflow buffer */
+ }
+ }
+ else {
+ size_t sz = trace_endp - tracep;
+ if (!statep->force_return && sz) {
+ if (sz >= statep->fetch_size) {
+ ASSERT(0);
+ ERROR(EMTP_PARSE_ERROR);
+ }
+ if (sz > statep->overflow_buf_size) {
+ (*statep->free)((void *) statep->overflow);
+ statep->overflow = (usgnd_int_8 *) (*statep->alloc)(sz);
+ if (!statep->overflow) {
+ statep->overflow_buf_size = 0;
+ return EMTP_NO_MEMORY_ERROR;
+ }
+ statep->overflow_buf_size = sz;
+ }
+ memcpy((void *) statep->overflow, tracep, sz);
+ statep->overflow_size = sz;
+ ASSERT(tracep + sz == trace_endp);
+ tracep = trace_endp;
+ }
+ }
+
+ restore_return:
+ ASSERT(trace_endp >= tracep);
+
+ *tracepp = tracep;
+ *trace_lenp = trace_endp - tracep;
+
+ if (op_lenp && op_size > 0)
+ *op_lenp = (int) (((char *) op_p) - ((char *) op_start))/op_size;
+
+ return result;
+}
+
+#ifdef DEBUG
+static void
+hexdump(void *start, void *end)
+{
+ unsigned char *p = (unsigned char *) start;
+
+ fprintf(stderr, "hexdump: ");
+ while ((void *) p < end) {
+ fprintf(stderr, "%x", (unsigned) *p);
+ p++;
+ }
+ fprintf(stderr, "\n");
+}
+
+#if PRINT_PARSED_OP
+static void
+print_op(emtp_operation *op_p)
+{
+ switch (op_p->type) {
+ case EMTP_ALLOC:
+ fprintf(stderr,
+ "alloc: "
+ "type=%" USGND_INT_16_FSTR ", "
+ "ptr=%" USGND_INT_MAX_FSTR ", "
+ "sz=%" USGND_INT_MAX_FSTR ", "
+ "(secs=%" USGND_INT_32_FSTR ", usecs=%" USGND_INT_32_FSTR ")"
+ "\n",
+ op_p->u.block.type,
+ op_p->u.block.new_ptr,
+ op_p->u.block.new_size,
+ op_p->time.secs,
+ op_p->time.usecs);
+ break;
+ case EMTP_REALLOC:
+ fprintf(stderr,
+ "realloc: "
+ "type=%" USGND_INT_16_FSTR ", "
+ "ptr=%" USGND_INT_MAX_FSTR ", "
+ "prev_ptr=%" USGND_INT_MAX_FSTR ", "
+ "sz=%" USGND_INT_MAX_FSTR ", "
+ "(secs=%" USGND_INT_32_FSTR ", usecs=%" USGND_INT_32_FSTR ")"
+ "\n",
+ op_p->u.block.type,
+ op_p->u.block.new_ptr,
+ op_p->u.block.prev_ptr,
+ op_p->u.block.new_size,
+ op_p->time.secs,
+ op_p->time.usecs);
+ break;
+ case EMTP_FREE:
+ fprintf(stderr,
+ "free: "
+ "type=%" USGND_INT_16_FSTR ", "
+ "ptr=%" USGND_INT_MAX_FSTR ", "
+ "(secs=%" USGND_INT_32_FSTR ", usecs=%" USGND_INT_32_FSTR ")"
+ "\n",
+ op_p->u.block.type,
+ op_p->u.block.prev_ptr,
+ op_p->time.secs,
+ op_p->time.usecs);
+ break;
+ case EMTP_CARRIER_ALLOC:
+ fprintf(stderr,
+ "carrier_alloc: "
+ "type=%" USGND_INT_16_FSTR ", "
+ "carrier_type=%" USGND_INT_16_FSTR ", "
+ "ptr=%" USGND_INT_MAX_FSTR ", "
+ "sz=%" USGND_INT_MAX_FSTR ", "
+ "(secs=%" USGND_INT_32_FSTR ", usecs=%" USGND_INT_32_FSTR ")"
+ "\n",
+ op_p->u.block.type,
+ op_p->u.block.carrier_type,
+ op_p->u.block.new_ptr,
+ op_p->u.block.new_size,
+ op_p->time.secs,
+ op_p->time.usecs);
+ break;
+ case EMTP_CARRIER_REALLOC:
+ fprintf(stderr,
+ "carrier_realloc: "
+ "type=%" USGND_INT_16_FSTR ", "
+ "carrier_type=%" USGND_INT_16_FSTR ", "
+ "ptr=%" USGND_INT_MAX_FSTR ", "
+ "prev_ptr=%" USGND_INT_MAX_FSTR ", "
+ "sz=%" USGND_INT_MAX_FSTR ", "
+ "(secs=%" USGND_INT_32_FSTR ", usecs=%" USGND_INT_32_FSTR ")"
+ "\n",
+ op_p->u.block.type,
+ op_p->u.block.carrier_type,
+ op_p->u.block.new_ptr,
+ op_p->u.block.prev_ptr,
+ op_p->u.block.new_size,
+ op_p->time.secs,
+ op_p->time.usecs);
+ break;
+ case EMTP_CARRIER_FREE:
+ fprintf(stderr,
+ "carrier_free: "
+ "type=%" USGND_INT_16_FSTR ", "
+ "carrier_type=%" USGND_INT_16_FSTR ", "
+ "ptr=%" USGND_INT_MAX_FSTR ", "
+ "(secs=%" USGND_INT_32_FSTR ", usecs=%" USGND_INT_32_FSTR ")"
+ "\n",
+ op_p->u.block.type,
+ op_p->u.block.carrier_type,
+ op_p->u.block.prev_ptr,
+ op_p->time.secs,
+ op_p->time.usecs);
+ break;
+ case EMTP_STOP:
+ fprintf(stderr,
+ "stop: "
+ "(secs=%" USGND_INT_32_FSTR ", usecs=%" USGND_INT_32_FSTR ")"
+ "\n",
+ op_p->time.secs,
+ op_p->time.usecs);
+ break;
+ case EMTP_EXIT:
+ fprintf(stderr,
+ "exit: "
+ "status=%" USGND_INT_32_FSTR ", "
+ "(secs=%" USGND_INT_32_FSTR ", usecs=%" USGND_INT_32_FSTR ")"
+ "\n",
+ op_p->u.exit_status,
+ op_p->time.secs,
+ op_p->time.usecs);
+ break;
+ default:
+ fprintf(stderr, "Unknown op type: %d\n", op_p->type);
+ break;
+ }
+}
+
+#endif
+#endif
diff --git a/erts/lib_src/common/erl_misc_utils.c b/erts/lib_src/common/erl_misc_utils.c
new file mode 100644
index 0000000000..9c25d33a3c
--- /dev/null
+++ b/erts/lib_src/common/erl_misc_utils.c
@@ -0,0 +1,967 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "erl_misc_utils.h"
+
+#if defined(__WIN32__)
+# include <windows.h>
+#elif defined(VXWORKS)
+# include <selectLib.h>
+#else /* UNIX */
+# include <stdio.h>
+# include <sys/types.h>
+# include <sys/param.h>
+# include <limits.h>
+# include <dirent.h>
+# include <sys/stat.h>
+# include <fcntl.h>
+# ifdef SYS_SELECT_H
+# include <sys/select.h>
+# endif
+# if TIME_WITH_SYS_TIME
+# include <sys/time.h>
+# include <time.h>
+# else
+# if HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# include <time.h>
+# endif
+# endif
+# include <string.h>
+# ifdef HAVE_UNISTD_H
+# include <unistd.h>
+# endif
+# if (defined(NO_SYSCONF) || !defined(_SC_NPROCESSORS_CONF))
+# ifdef HAVE_SYS_SYSCTL_H
+# include <sys/sysctl.h>
+# endif
+# endif
+#endif
+
+#ifdef HAVE_SCHED_xETAFFINITY
+# include <sched.h>
+#endif
+#ifdef HAVE_PSET_INFO
+# include <sys/pset.h>
+#endif
+#ifdef HAVE_PROCESSOR_BIND
+# include <sys/processor.h>
+# include <sys/procset.h>
+#endif
+
+#include <stdlib.h>
+#ifdef HAVE_LIMITS_H
+#include <limits.h>
+#endif
+
+#ifdef __linux__
+# define ERTS_SYS_NODE_PATH "/sys/devices/system/node"
+# define ERTS_SYS_CPU_PATH "/sys/devices/system/cpu"
+#endif
+
+static int read_topology(erts_cpu_info_t *cpuinfo);
+
+int
+erts_milli_sleep(long ms)
+{
+ if (ms > 0) {
+#ifdef __WIN32__
+ Sleep((DWORD) ms);
+#else
+ struct timeval tv;
+ tv.tv_sec = ms / 1000;
+ tv.tv_usec = (ms % 1000) * 1000;
+ if (select(0, NULL, NULL, NULL, &tv) < 0)
+ return errno == EINTR ? 1 : -1;
+#endif
+ }
+ return 0;
+}
+
+struct erts_cpu_info_t_ {
+ int configured;
+ int online;
+ int available;
+ int topology_size;
+ erts_cpu_topology_t *topology;
+#if defined(HAVE_SCHED_xETAFFINITY)
+ char *affinity_str;
+ char affinity_str_buf[CPU_SETSIZE/4+2];
+ cpu_set_t cpuset;
+ pid_t pid;
+#elif defined(HAVE_PSET_INFO)
+ processorid_t *cpuids;
+#endif
+};
+
+erts_cpu_info_t *
+erts_cpu_info_create(void)
+{
+ erts_cpu_info_t *cpuinfo = malloc(sizeof(erts_cpu_info_t));
+ if (!cpuinfo)
+ return NULL;
+#if defined(HAVE_SCHED_xETAFFINITY)
+ cpuinfo->affinity_str = NULL;
+ cpuinfo->pid = getpid();
+#elif defined(HAVE_PSET_INFO)
+ cpuinfo->cpuids = NULL;
+#endif
+ cpuinfo->topology_size = 0;
+ cpuinfo->topology = NULL;
+ erts_cpu_info_update(cpuinfo);
+ return cpuinfo;
+}
+
+void
+erts_cpu_info_destroy(erts_cpu_info_t *cpuinfo)
+{
+ if (cpuinfo) {
+ cpuinfo->configured = 0;
+ cpuinfo->online = 0;
+ cpuinfo->available = 0;
+#ifdef HAVE_PSET_INFO
+ if (cpuinfo->cpuids)
+ free(cpuinfo->cpuids);
+#endif
+ cpuinfo->topology_size = 0;
+ if (cpuinfo->topology) {
+ cpuinfo->topology = NULL;
+ free(cpuinfo->topology);
+ }
+ free(cpuinfo);
+ }
+}
+
+void
+erts_cpu_info_update(erts_cpu_info_t *cpuinfo)
+{
+ cpuinfo->configured = 0;
+ cpuinfo->online = 0;
+ cpuinfo->available = 0;
+
+#ifdef __WIN32__
+ {
+ SYSTEM_INFO sys_info;
+ GetSystemInfo(&sys_info);
+ cpuinfo->configured = (int) sys_info.dwNumberOfProcessors;
+
+ }
+#elif !defined(NO_SYSCONF) && (defined(_SC_NPROCESSORS_CONF) \
+ || defined(_SC_NPROCESSORS_ONLN))
+#ifdef _SC_NPROCESSORS_CONF
+ cpuinfo->configured = (int) sysconf(_SC_NPROCESSORS_CONF);
+ if (cpuinfo->configured < 0)
+ cpuinfo->configured = 0;
+#endif
+#ifdef _SC_NPROCESSORS_ONLN
+ cpuinfo->online = (int) sysconf(_SC_NPROCESSORS_ONLN);
+ if (cpuinfo->online < 0)
+ cpuinfo->online = 0;
+#endif
+#elif defined(HAVE_SYS_SYSCTL_H) && defined(CTL_HW) && (defined(HW_NCPU) \
+ || defined(HW_AVAILCPU))
+ {
+ int mib[2];
+ size_t len;
+
+#ifdef HW_NCPU
+ len = sizeof(int);
+ mib[0] = CTL_HW;
+ mib[1] = HW_NCPU;
+ if (sysctl(&mib[0], 2, &cpuinfo->configured, &len, NULL, 0) < 0)
+ cpuinfo->configured = 0;
+#endif
+#ifdef HW_AVAILCPU
+ len = sizeof(int);
+ mib[0] = CTL_HW;
+ mib[1] = HW_AVAILCPU;
+ if (sysctl(&mib[0], 2, &cpuinfo->online, &len, NULL, 0) < 0)
+ cpuinfo->online = 0;
+#endif
+ }
+#endif
+
+ if (cpuinfo->online > cpuinfo->configured)
+ cpuinfo->online = cpuinfo->configured;
+
+#ifdef HAVE_SCHED_xETAFFINITY
+ if (sched_getaffinity(cpuinfo->pid, sizeof(cpu_set_t), &cpuinfo->cpuset) == 0) {
+ int i, c, cn, si;
+ c = cn = 0;
+ si = sizeof(cpuinfo->affinity_str_buf) - 1;
+ cpuinfo->affinity_str_buf[si] = '\0';
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, &cpuinfo->cpuset)) {
+ c |= 1 << cn;
+ cpuinfo->available++;
+ }
+ cn++;
+ if (cn == 4) {
+ cpuinfo->affinity_str_buf[--si] = (c < 10
+ ? '0' + c
+ : 'A' + c - 10);
+ c = cn = 0;
+ }
+ }
+ if (c)
+ cpuinfo->affinity_str_buf[--si] = (c < 10
+ ? '0' + c
+ : 'A' + c - 10);
+ while (cpuinfo->affinity_str_buf[si] == '0')
+ si++;
+ cpuinfo->affinity_str = &cpuinfo->affinity_str_buf[si];
+ }
+#elif defined(HAVE_PSET_INFO)
+ {
+ uint_t numcpus = cpuinfo->configured;
+ if (cpuinfo->cpuids)
+ free(cpuinfo->cpuids);
+ cpuinfo->cpuids = malloc(sizeof(processorid_t)*numcpus);
+ if (cpuinfo->cpuids) {
+ if (pset_info(PS_MYID, NULL, &numcpus, &cpuinfo->cpuids) == 0)
+ cpuinfo->available = (int) numcpus;
+ if (cpuinfo->available < 0) {
+ free(cpuinfo->cpuid);
+ cpuinfo->available = 0;
+ }
+ }
+ }
+#endif
+
+ if (cpuinfo->available > cpuinfo->online)
+ cpuinfo->available = cpuinfo->online;
+
+ read_topology(cpuinfo);
+
+}
+
+int
+erts_get_cpu_configured(erts_cpu_info_t *cpuinfo)
+{
+ if (!cpuinfo)
+ return -EINVAL;
+ if (cpuinfo->configured <= 0)
+ return -ENOTSUP;
+ return cpuinfo->configured;
+}
+
+int
+erts_get_cpu_online(erts_cpu_info_t *cpuinfo)
+{
+ if (!cpuinfo)
+ return -EINVAL;
+ if (cpuinfo->online <= 0)
+ return -ENOTSUP;
+ return cpuinfo->online;
+}
+
+int
+erts_get_cpu_available(erts_cpu_info_t *cpuinfo)
+{
+ if (!cpuinfo)
+ return -EINVAL;
+ if (cpuinfo->available <= 0)
+ return -ENOTSUP;
+ return cpuinfo->available;
+}
+
+char *
+erts_get_unbind_from_cpu_str(erts_cpu_info_t *cpuinfo)
+{
+#if defined(HAVE_SCHED_xETAFFINITY)
+ if (!cpuinfo)
+ return "false";
+ return cpuinfo->affinity_str;
+#else
+ return "true";
+#endif
+}
+
+int
+erts_get_available_cpu(erts_cpu_info_t *cpuinfo, int no)
+{
+ if (!cpuinfo || no < 1 || cpuinfo->available < no)
+ return -EINVAL;
+#ifdef HAVE_SCHED_xETAFFINITY
+ {
+ cpu_set_t *allowed = &cpuinfo->cpuset;
+ int ix, n;
+ for (ix = 0, n = 1; ix < CPU_SETSIZE; ix++) {
+ if (CPU_ISSET(ix, allowed)) {
+ if (no == n)
+ return ix;
+ n++;
+ }
+ }
+ }
+ return -EINVAL;
+#elif defined(HAVE_PROCESSOR_BIND)
+#if defined(HAVE_PSET_INFO)
+ return (int) cpuinfo->cpuids[no-1];
+#elif defined(HAVE_KSTAT)
+ if (cpuinfo->topology && cpuinfo->online <= no) {
+ /* May not be available, but this is the best we can do */
+ return cpuinfo->topology[no-1].logical;
+ }
+ return -EINVAL;
+#endif
+#else
+ return -ENOTSUP;
+#endif
+}
+
+int
+erts_is_cpu_available(erts_cpu_info_t *cpuinfo, int id)
+{
+ if (cpuinfo && 0 <= id) {
+#ifdef HAVE_SCHED_xETAFFINITY
+ if (id <= CPU_SETSIZE)
+ return CPU_ISSET(id, &cpuinfo->cpuset);
+#elif defined(HAVE_PROCESSOR_BIND)
+ int no;
+#if defined(HAVE_PSET_INFO)
+ for (no = 0; no < cpuinfo->available; no++)
+ if (id == (int) cpuinfo->cpuids[no])
+ return 1;
+#elif defined(HAVE_KSTAT)
+ if (cpuinfo->topology) {
+ for (no = 0; no < cpuinfo->online; no++) {
+ if (id == (int) cpuinfo->topology[no].logical) {
+ /* May not be available, but this is the best we can do... */
+ return 1;
+ }
+ }
+ }
+#endif
+#endif
+ }
+ return 0;
+}
+
+int
+erts_get_cpu_topology_size(erts_cpu_info_t *cpuinfo)
+{
+ return cpuinfo->topology_size;
+}
+
+int
+erts_get_cpu_topology(erts_cpu_info_t *cpuinfo,
+ erts_cpu_topology_t *topology)
+{
+ if (!cpuinfo->topology)
+ return 0;
+ memcpy((void *) topology,
+ (void *) cpuinfo->topology,
+ cpuinfo->configured*sizeof(erts_cpu_topology_t));
+ return cpuinfo->configured;
+}
+
+int
+erts_bind_to_cpu(erts_cpu_info_t *cpuinfo, int cpu)
+{
+ /*
+ * Caller can test for available functionality by
+ * passing a negative cpu id. If functionality is
+ * available -EINVAL is returned; otherwise,
+ * -ENOTSUP.
+ */
+ if (!cpuinfo)
+ return -EINVAL;
+#ifdef HAVE_SCHED_xETAFFINITY
+ {
+ cpu_set_t bind_set;
+ if (cpu < 0)
+ return -EINVAL;
+ if (!CPU_ISSET(cpu, &cpuinfo->cpuset))
+ return -EINVAL;
+
+ CPU_ZERO(&bind_set);
+ CPU_SET(cpu, &bind_set);
+ if (sched_setaffinity(0, sizeof(cpu_set_t), &bind_set) != 0)
+ return -errno;
+ return 0;
+ }
+#elif defined(HAVE_PROCESSOR_BIND)
+ if (cpu < 0)
+ return -EINVAL;
+ if (processor_bind(P_LWPID, P_MYID, (processorid_t) cpu, NULL) != 0)
+ return -errno;
+ return 0;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+int
+erts_unbind_from_cpu(erts_cpu_info_t *cpuinfo)
+{
+ if (!cpuinfo)
+ return -EINVAL;
+#if defined(HAVE_SCHED_xETAFFINITY)
+ if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuinfo->cpuset) != 0)
+ return -errno;
+ return 0;
+#elif defined(HAVE_PROCESSOR_BIND)
+ if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
+ return -errno;
+ return 0;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+int
+erts_unbind_from_cpu_str(char *str)
+{
+#if defined(HAVE_SCHED_xETAFFINITY)
+ char *c = str;
+ int cpus = 0;
+ int shft = 0;
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+
+ if (!c)
+ return -EINVAL;
+
+ while (*c)
+ c++;
+
+ while (c != str) {
+ int shft2;
+ int mask = 0;
+ c--;
+ switch (*c) {
+ case '0': mask = 0; break;
+ case '1': mask = 1; break;
+ case '2': mask = 2; break;
+ case '3': mask = 3; break;
+ case '4': mask = 4; break;
+ case '5': mask = 5; break;
+ case '6': mask = 6; break;
+ case '7': mask = 7; break;
+ case '8': mask = 8; break;
+ case '9': mask = 9; break;
+ case 'A': case 'a': mask = 10; break;
+ case 'B': case 'b': mask = 11; break;
+ case 'C': case 'c': mask = 12; break;
+ case 'D': case 'd': mask = 13; break;
+ case 'E': case 'e': mask = 14; break;
+ case 'F': case 'f': mask = 15; break;
+ default: return -EINVAL;
+ }
+ for (shft2 = 0; shft2 < 4; shft2++) {
+ if (mask & (1 << shft2)) {
+ int cpu = shft + shft2;
+ if (cpu >= CPU_SETSIZE)
+ return -EINVAL;
+ cpus++;
+ CPU_SET(cpu, &cpuset);
+ }
+ }
+ shft += 4;
+ }
+
+ if (!cpus)
+ return -EINVAL;
+
+ if (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0)
+ return -errno;
+ return 0;
+#elif defined(HAVE_PROCESSOR_BIND)
+ if (processor_bind(P_LWPID, P_MYID, PBIND_NONE, NULL) != 0)
+ return -errno;
+ return 0;
+#else
+ return -ENOTSUP;
+#endif
+}
+
+
+static int
+pn_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ if (x->node != y->node)
+ return x->node - y->node;
+ if (x->processor_node != y->processor_node)
+ return x->processor_node - y->processor_node;
+ if (x->core != y->core)
+ return x->core - y->core;
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ if (x->logical != y->logical)
+ return x->logical - y->logical;
+ return 0;
+}
+
+static int
+cpu_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->node != y->node)
+ return x->node - y->node;
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ if (x->processor_node != y->processor_node)
+ return x->processor_node - y->processor_node;
+ if (x->core != y->core)
+ return x->core - y->core;
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ if (x->logical != y->logical)
+ return x->logical - y->logical;
+ return 0;
+}
+
+#ifdef __linux__
+
+static int
+read_file(char *path, char *buf, int size)
+{
+ int ix = 0;
+ ssize_t sz = size-1;
+ int fd = open(path, O_RDONLY);
+ if (fd < 0)
+ goto error;
+ while (size > ix) {
+ sz = read(fd, &buf[ix], size - ix);
+ if (sz <= 0) {
+ if (sz == 0)
+ break;
+ if (errno == EINTR)
+ continue;
+ goto error;
+ }
+ ix += sz;
+ }
+ buf[ix] = '\0';
+ close(fd);
+ return ix;
+
+ error: {
+ int saved_errno = errno;
+ if (fd >= 0)
+ close(fd);
+ if (saved_errno)
+ return -saved_errno;
+ else
+ return -EINVAL;
+ }
+}
+
+static int
+read_topology(erts_cpu_info_t *cpuinfo)
+{
+ char npath[MAXPATHLEN];
+ char cpath[MAXPATHLEN];
+ char tpath[MAXPATHLEN];
+ char fpath[MAXPATHLEN];
+ DIR *ndir = NULL;
+ DIR *cdir = NULL;
+ struct dirent *nde;
+ int ix;
+ int res = 0;
+ int got_nodes = 0;
+ int no_nodes = 0;
+
+ errno = 0;
+
+ if (cpuinfo->topology)
+ free(cpuinfo->topology);
+
+ if (cpuinfo->configured < 1)
+ goto error;
+
+ cpuinfo->topology = malloc(sizeof(erts_cpu_topology_t)
+ * cpuinfo->configured);
+ if (!cpuinfo)
+ goto error;
+
+ for (ix = 0; ix < cpuinfo->configured; ix++) {
+ cpuinfo->topology[ix].node = -1;
+ cpuinfo->topology[ix].processor = -1;
+ cpuinfo->topology[ix].processor_node = -1;
+ cpuinfo->topology[ix].core = -1;
+ cpuinfo->topology[ix].thread = -1;
+ cpuinfo->topology[ix].logical = -1;
+ }
+
+ ix = -1;
+
+ if (realpath(ERTS_SYS_NODE_PATH, npath)) {
+ got_nodes = 1;
+ ndir = opendir(npath);
+ }
+
+ do {
+ int node_id = -1;
+
+ if (!got_nodes) {
+ if (!realpath(ERTS_SYS_CPU_PATH, cpath))
+ goto error;
+ }
+ else {
+
+ nde = readdir(ndir);
+
+ if (!nde)
+ break;
+
+ if (sscanf(nde->d_name, "node%d", &node_id) != 1)
+ continue;
+
+ no_nodes++;
+
+ sprintf(tpath, "%s/node%d", npath, node_id);
+
+ if (!realpath(tpath, cpath))
+ goto error;
+ }
+
+ cdir = opendir(cpath);
+ if (!cdir)
+ goto error;
+
+ while (1) {
+ int cpu_id;
+ struct dirent *cde = readdir(cdir);
+ if (!cde) {
+ closedir(cdir);
+ cdir = NULL;
+ break;
+ }
+ if (sscanf(cde->d_name, "cpu%d", &cpu_id) == 1) {
+ char buf[50]; /* Much more than enough for an integer */
+ int processor_id, core_id;
+ sprintf(tpath, "%s/cpu%d/topology/physical_package_id",
+ cpath, cpu_id);
+ if (!realpath(tpath, fpath))
+ continue;
+ if (read_file(fpath, buf, sizeof(buf)) <= 0)
+ continue;
+ if (sscanf(buf, "%d", &processor_id) != 1)
+ continue;
+ sprintf(tpath, "%s/cpu%d/topology/core_id",
+ cpath, cpu_id);
+ if (!realpath(tpath, fpath))
+ continue;
+ if (read_file(fpath, buf, sizeof(buf)) <= 0)
+ continue;
+ if (sscanf(buf, "%d", &core_id) != 1)
+ continue;
+
+ /*
+ * We now know node id, processor id, and
+ * core id of the logical processor with
+ * the cpu id 'cpu_id'.
+ */
+ ix++;
+ cpuinfo->topology[ix].node = node_id;
+ cpuinfo->topology[ix].processor = processor_id;
+ cpuinfo->topology[ix].processor_node = -1; /* Fixed later */
+ cpuinfo->topology[ix].core = core_id;
+ cpuinfo->topology[ix].thread = 0; /* we'll numerate later */
+ cpuinfo->topology[ix].logical = cpu_id;
+ }
+ }
+ } while (got_nodes);
+
+ res = ix+1;
+
+ if (!res || res < cpuinfo->online)
+ res = 0;
+ else {
+ erts_cpu_topology_t *prev, *this, *last;
+
+ cpuinfo->topology_size = res;
+
+ if (cpuinfo->topology_size != cpuinfo->configured) {
+ void *t = realloc(cpuinfo->topology, (sizeof(erts_cpu_topology_t)
+ * cpuinfo->topology_size));
+ if (t)
+ cpuinfo->topology = t;
+ }
+
+ if (no_nodes > 1) {
+ int processor = -1;
+ int processor_node = 0;
+ int node = -1;
+
+ qsort(cpuinfo->topology,
+ cpuinfo->topology_size,
+ sizeof(erts_cpu_topology_t),
+ pn_cmp);
+
+ prev = NULL;
+ this = &cpuinfo->topology[0];
+ last = &cpuinfo->topology[cpuinfo->configured-1];
+ while (1) {
+ if (processor == this->processor) {
+ if (node != this->node)
+ processor_node = 1;
+ }
+ else {
+ if (processor_node) {
+ make_processor_node:
+ while (prev->processor == processor) {
+ prev->processor_node = prev->node;
+ prev->node = -1;
+ if (prev == &cpuinfo->topology[0])
+ break;
+ prev--;
+ }
+ processor_node = 0;
+ }
+ processor = this->processor;
+ node = this->node;
+ }
+ if (this == last) {
+ if (processor_node) {
+ prev = this;
+ goto make_processor_node;
+ }
+ break;
+ }
+ prev = this++;
+ }
+ }
+
+ qsort(cpuinfo->topology,
+ cpuinfo->topology_size,
+ sizeof(erts_cpu_topology_t),
+ cpu_cmp);
+
+ this = &cpuinfo->topology[0];
+ this->thread = 0;
+
+ if (res > 1) {
+ prev = this++;
+ last = &cpuinfo->topology[cpuinfo->configured-1];
+
+ while (1) {
+ this->thread = ((this->node == prev->node
+ && this->processor == prev->processor
+ && this->processor_node == prev->processor_node
+ && this->core == prev->core)
+ ? prev->thread + 1
+ : 0);
+ if (this == last)
+ break;
+ prev = this++;
+ }
+ }
+ }
+
+ error:
+
+ if (res == 0) {
+ cpuinfo->topology_size = 0;
+ if (cpuinfo->topology) {
+ free(cpuinfo->topology);
+ cpuinfo->topology = NULL;
+ }
+ if (errno)
+ res = -errno;
+ else
+ res = -EINVAL;
+ }
+
+ if (ndir)
+ closedir(ndir);
+ if (cdir)
+ closedir(cdir);
+
+ return res;
+}
+
+#elif defined(HAVE_KSTAT) /* SunOS kstat */
+
+#include <kstat.h>
+
+static int
+data_lookup_int(kstat_t *ks, char *what)
+{
+ int res;
+ kstat_named_t *ks_n;
+
+ ks_n = kstat_data_lookup(ks, what);
+ if (!ks_n)
+ return 0;
+
+ switch (ks_n->data_type) {
+ case KSTAT_DATA_CHAR:
+ res = atoi(ks_n->value.c);
+ break;
+ case KSTAT_DATA_INT32:
+ res = (int) ks_n->value.i32;
+ break;
+ case KSTAT_DATA_UINT32:
+ res = (int) ks_n->value.ui32;
+ break;
+ case KSTAT_DATA_INT64:
+ res = (int) ks_n->value.i64;
+ break;
+ case KSTAT_DATA_UINT64:
+ res = (int) ks_n->value.ui64;
+ break;
+ default:
+ res = 0;
+ break;
+ }
+ return res;
+}
+
+static int
+read_topology(erts_cpu_info_t *cpuinfo)
+{
+ int res = 0;
+ int ix;
+ kstat_ctl_t *ks_ctl;
+ kstat_t *ks;
+
+ errno = 0;
+
+ if (cpuinfo->topology)
+ free(cpuinfo->topology);
+
+ if (cpuinfo->configured < 1)
+ goto error;
+
+ cpuinfo->topology = malloc(sizeof(erts_cpu_topology_t)
+ * cpuinfo->configured);
+ if (!cpuinfo)
+ goto error;
+
+ for (ix = 0; ix < cpuinfo->configured; ix++) {
+ cpuinfo->topology[ix].node = -1;
+ cpuinfo->topology[ix].processor = -1;
+ cpuinfo->topology[ix].processor_node = -1;
+ cpuinfo->topology[ix].core = -1;
+ cpuinfo->topology[ix].thread = -1;
+ cpuinfo->topology[ix].logical = -1;
+ }
+
+ ks_ctl = kstat_open();
+ if (!ks_ctl)
+ goto error;
+
+ ix = 0;
+ for (ks = ks_ctl->kc_chain; ks; ks = ks->ks_next) {
+ if (strcmp("cpu_info", ks->ks_module) == 0) {
+ kstat_read(ks_ctl, ks, NULL);
+ if (ks->ks_type == KSTAT_TYPE_NAMED) {
+ /*
+ * Don't know how to figure numa nodes out;
+ * hope there is none...
+ */
+ cpuinfo->topology[ix].node = -1;
+ cpuinfo->topology[ix].processor = data_lookup_int(ks,"chip_id");
+ cpuinfo->topology[ix].processor_node = -1;
+ cpuinfo->topology[ix].core = data_lookup_int(ks, "core_id");
+ cpuinfo->topology[ix].thread = 0; /* we'll numerate later */
+ cpuinfo->topology[ix].logical = ks->ks_instance;
+ if (++ix == cpuinfo->configured)
+ break;
+ }
+ }
+ }
+
+ kstat_close(ks_ctl);
+
+ res = ix;
+
+ if (!res || res < cpuinfo->online)
+ res = 0;
+ else {
+ erts_cpu_topology_t *prev, *this, *last;
+
+ cpuinfo->topology_size = res;
+
+ if (cpuinfo->topology_size != cpuinfo->configured) {
+ void *t = realloc(cpuinfo->topology, (sizeof(erts_cpu_topology_t)
+ * cpuinfo->topology_size));
+ if (t)
+ cpuinfo->topology = t;
+ }
+
+ qsort(cpuinfo->topology,
+ cpuinfo->topology_size,
+ sizeof(erts_cpu_topology_t),
+ cpu_cmp);
+
+ this = &cpuinfo->topology[0];
+ this->thread = 0;
+
+ if (res > 1) {
+ prev = this++;
+ last = &cpuinfo->topology[cpuinfo->configured-1];
+
+ while (1) {
+ this->thread = ((this->node == prev->node
+ && this->processor == prev->processor
+ && this->processor_node == prev->processor_node
+ && this->core == prev->core)
+ ? prev->thread + 1
+ : 0);
+ if (this == last)
+ break;
+ prev = this++;
+ }
+ }
+ }
+
+ error:
+
+ if (res == 0) {
+ cpuinfo->topology_size = 0;
+ if (cpuinfo->topology) {
+ free(cpuinfo->topology);
+ cpuinfo->topology = NULL;
+ }
+ if (errno)
+ res = -errno;
+ else
+ res = -EINVAL;
+ }
+
+ return res;
+
+}
+
+#else
+
+static int
+read_topology(erts_cpu_info_t *cpuinfo)
+{
+ return -ENOTSUP;
+}
+
+#endif
diff --git a/erts/lib_src/common/erl_printf.c b/erts/lib_src/common/erl_printf.c
new file mode 100644
index 0000000000..72d18ab6f1
--- /dev/null
+++ b/erts/lib_src/common/erl_printf.c
@@ -0,0 +1,427 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Without this, variable argument lists break on VxWorks */
+#ifdef VXWORKS
+#include <vxWorks.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <string.h>
+#include "erl_errno.h"
+#ifdef __WIN32__
+# include <io.h>
+#else
+# include <unistd.h>
+#endif
+#include "erl_printf.h"
+#include "erl_printf_format.h"
+
+#ifdef DEBUG
+#include <assert.h>
+#define ASSERT(X) assert(X)
+#else
+#define ASSERT(X)
+#endif
+
+#if defined(__WIN32__) && !defined(__GNUC__)
+typedef int ssize_t;
+#endif
+
+int (*erts_printf_stdout_func)(char *, va_list) = NULL;
+int (*erts_printf_stderr_func)(char *, va_list) = NULL;
+
+int erts_printf_add_cr_to_stdout = 0;
+int erts_printf_add_cr_to_stderr = 0;
+
+int (*erts_printf_block_fpe)(void) = NULL;
+void (*erts_printf_unblock_fpe)(int) = NULL;
+
+#undef FLOCKFILE
+#undef FUNLOCKFILE
+#undef PUTC
+#undef FWRITE
+#undef PUTC_ON_SMALL_WRITES
+
+#if defined(USE_THREADS) && defined(HAVE_FLOCKFILE)
+# define FLOCKFILE(FP) flockfile(FP)
+# define FUNLOCKFILE(FP) funlockfile(FP)
+# ifdef HAVE_PUTC_UNLOCKED
+# define PUTC putc_unlocked
+# define PUTC_ON_SMALL_WRITES
+# endif
+# ifdef HAVE_FWRITE_UNLOCKED
+# define FWRITE fwrite_unlocked
+# endif
+#endif
+#if !defined(USE_THREADS) && defined(putc) && !defined(fwrite)
+# define PUTC_ON_SMALL_WRITES
+#endif
+#if !defined(FLOCKFILE) || !defined(FUNLOCKFILE)
+# define FLOCKFILE(FP)
+# define FUNLOCKFILE(FP)
+#endif
+#ifndef PUTC
+# define PUTC putc
+#endif
+#ifndef FWRITE
+# define FWRITE fwrite
+#endif
+
+static int
+get_error_result(void)
+{
+ int res = errno;
+ if (res <= 0)
+ res = EIO;
+ return -res;
+}
+
+
+static int
+write_f_add_cr(void *vfp, char* buf, size_t len)
+{
+ size_t i;
+ ASSERT(vfp);
+ for (i = 0; i < len; i++) {
+ if (buf[i] == '\n' && PUTC('\r', (FILE *) vfp) == EOF)
+ return get_error_result();
+ if (PUTC(buf[i], (FILE *) vfp) == EOF)
+ return get_error_result();
+ }
+ return 0;
+}
+
+static int
+write_f(void *vfp, char* buf, size_t len)
+{
+ ASSERT(vfp);
+#ifdef PUTC_ON_SMALL_WRITES
+ if (len <= 64) { /* Try to optimize writes of small bufs. */
+ int i;
+ for (i = 0; i < len; i++)
+ if (PUTC(buf[i], (FILE *) vfp) == EOF)
+ return get_error_result();
+ }
+ else
+#endif
+ if (FWRITE((void *) buf, sizeof(char), len, (FILE *) vfp) != len)
+ return get_error_result();
+ return 0;
+}
+
+static int
+write_fd(void *vfdp, char* buf, size_t len)
+{
+ ssize_t size;
+ ASSERT(vfdp);
+
+ while (len) {
+ size = write(*((int *) vfdp), (void *) buf, len);
+ if (size < 0) {
+#ifdef EINTR
+ if (errno == EINTR)
+ continue;
+#endif
+ return get_error_result();
+ }
+ if (size > len)
+ return -EIO;
+ len -= size;
+ }
+
+ return 0;
+}
+
+static int
+write_s(void *vwbufpp, char* bufp, size_t len)
+{
+ char **wbufpp = (char **) vwbufpp;
+ ASSERT(wbufpp && *wbufpp);
+ ASSERT(len > 0);
+ memcpy((void *) *wbufpp, (void *) bufp, len);
+ *wbufpp += len;
+ return 0;
+}
+
+
+typedef struct {
+ char *buf;
+ size_t len;
+} write_sn_arg_t;
+
+static int
+write_sn(void *vwsnap, char* buf, size_t len)
+{
+ write_sn_arg_t *wsnap = (write_sn_arg_t *) vwsnap;
+ ASSERT(wsnap);
+ ASSERT(len > 0);
+ if (wsnap->len > 0) {
+ size_t sz = len;
+ if (sz >= wsnap->len)
+ sz = wsnap->len;
+ memcpy((void *) wsnap->buf, (void *) buf, sz);
+ wsnap->buf += sz;
+ wsnap->len -= sz;
+ }
+ return 0;
+}
+
+static int
+write_ds(void *vdsbufp, char* buf, size_t len)
+{
+ erts_dsprintf_buf_t *dsbufp = (erts_dsprintf_buf_t *) vdsbufp;
+ size_t need_len = len + 1; /* Also trailing '\0' */
+ ASSERT(dsbufp);
+ ASSERT(len > 0);
+ ASSERT(dsbufp->str_len <= dsbufp->size);
+ if (need_len > dsbufp->size - dsbufp->str_len) {
+ dsbufp = (*dsbufp->grow)(dsbufp, need_len);
+ if (!dsbufp)
+ return -ENOMEM;
+ }
+ memcpy((void *) (dsbufp->str + dsbufp->str_len), (void *) buf, len);
+ dsbufp->str_len += len;
+ return 0;
+}
+
+int
+erts_printf(const char *format, ...)
+{
+ int res;
+ va_list arglist;
+ va_start(arglist, format);
+ errno = 0;
+ if (erts_printf_stdout_func)
+ res = (*erts_printf_stdout_func)((char *) format, arglist);
+ else {
+ FLOCKFILE(stdout);
+ res = erts_printf_format(erts_printf_add_cr_to_stdout
+ ? write_f_add_cr
+ : write_f,
+ (void *) stdout,
+ (char *) format,
+ arglist);
+ FUNLOCKFILE(stdout);
+ }
+ va_end(arglist);
+ return res;
+}
+
+int
+erts_fprintf(FILE *filep, const char *format, ...)
+{
+ int res;
+ va_list arglist;
+ va_start(arglist, format);
+ errno = 0;
+ if (erts_printf_stdout_func && filep == stdout)
+ res = (*erts_printf_stdout_func)((char *) format, arglist);
+ else if (erts_printf_stderr_func && filep == stderr)
+ res = (*erts_printf_stderr_func)((char *) format, arglist);
+ else {
+ int (*fmt_f)(void*, char*, size_t);
+ if (erts_printf_add_cr_to_stdout && filep == stdout)
+ fmt_f = write_f_add_cr;
+ else if (erts_printf_add_cr_to_stderr && filep == stderr)
+ fmt_f = write_f_add_cr;
+ else
+ fmt_f = write_f;
+ FLOCKFILE(filep);
+ res = erts_printf_format(fmt_f,(void *)filep,(char *)format,arglist);
+ FUNLOCKFILE(filep);
+ }
+ va_end(arglist);
+ return res;
+}
+
+int
+erts_fdprintf(int fd, const char *format, ...)
+{
+ int res;
+ va_list arglist;
+ va_start(arglist, format);
+ errno = 0;
+ res = erts_printf_format(write_fd,(void *)&fd,(char *)format,arglist);
+ va_end(arglist);
+ return res;
+}
+
+int
+erts_sprintf(char *buf, const char *format, ...)
+{
+ int res;
+ char *p = buf;
+ va_list arglist;
+ va_start(arglist, format);
+ errno = 0;
+ res = erts_printf_format(write_s, (void *) &p, (char *) format, arglist);
+ if (res < 0)
+ buf[0] = '\0';
+ else
+ buf[res] = '\0';
+ va_end(arglist);
+ return res;
+}
+
+int
+erts_snprintf(char *buf, size_t size, const char *format, ...)
+{
+ write_sn_arg_t wsnap;
+ int res;
+ va_list arglist;
+ if (size < 1)
+ return -EINVAL;
+ wsnap.buf = buf;
+ wsnap.len = size-1; /* Always need room for trailing '\0' */
+ va_start(arglist, format);
+ errno = 0;
+ res = erts_printf_format(write_sn, (void *)&wsnap, (char *)format, arglist);
+ if (res < 0)
+ buf[0] = '\0';
+ else if (res < size)
+ buf[res] = '\0';
+ else
+ buf[size-1] = '\0';
+ va_end(arglist);
+ return res;
+}
+
+int
+erts_dsprintf(erts_dsprintf_buf_t *dsbufp, const char *format, ...)
+{
+ int res;
+ va_list arglist;
+ if (!dsbufp)
+ return -EINVAL;
+ va_start(arglist, format);
+ errno = 0;
+ res = erts_printf_format(write_ds, (void *)dsbufp, (char *)format, arglist);
+ if (dsbufp->str) {
+ if (res < 0)
+ dsbufp->str[0] = '\0';
+ else
+ dsbufp->str[dsbufp->str_len] = '\0';
+ }
+ va_end(arglist);
+ return res;
+}
+
+int
+erts_vprintf(const char *format, va_list arglist)
+{
+ int res;
+ if (erts_printf_stdout_func)
+ res = (*erts_printf_stdout_func)((char *) format, arglist);
+ else {
+ errno = 0;
+ res = erts_printf_format(erts_printf_add_cr_to_stdout
+ ? write_f_add_cr
+ : write_f,
+ (void *) stdout,
+ (char *) format,
+ arglist);
+ }
+ return res;
+}
+
+int
+erts_vfprintf(FILE *filep, const char *format, va_list arglist)
+{
+ int res;
+ if (erts_printf_stdout_func && filep == stdout)
+ res = (*erts_printf_stdout_func)((char *) format, arglist);
+ else if (erts_printf_stderr_func && filep == stderr)
+ res = (*erts_printf_stderr_func)((char *) format, arglist);
+ else {
+ int (*fmt_f)(void*, char*, size_t);
+ errno = 0;
+ if (erts_printf_add_cr_to_stdout && filep == stdout)
+ fmt_f = write_f_add_cr;
+ else if (erts_printf_add_cr_to_stderr && filep == stderr)
+ fmt_f = write_f_add_cr;
+ else
+ fmt_f = write_f;
+ res = erts_printf_format(fmt_f,(void *)filep,(char *)format,arglist);
+ }
+ return res;
+}
+
+int
+erts_vfdprintf(int fd, const char *format, va_list arglist)
+{
+ int res;
+ errno = 0;
+ res = erts_printf_format(write_fd,(void *)&fd,(char *)format,arglist);
+ return res;
+}
+
+int
+erts_vsprintf(char *buf, const char *format, va_list arglist)
+{
+ int res;
+ char *p = buf;
+ errno = 0;
+ res = erts_printf_format(write_s, (void *) &p, (char *) format, arglist);
+ if (res < 0)
+ buf[0] = '\0';
+ else
+ buf[res] = '\0';
+ return res;
+}
+
+int
+erts_vsnprintf(char *buf, size_t size, const char *format, va_list arglist)
+{
+ write_sn_arg_t wsnap;
+ int res;
+ if (size < 1)
+ return -EINVAL;
+ wsnap.buf = buf;
+ wsnap.len = size-1; /* Always need room for trailing '\0' */
+ errno = 0;
+ res = erts_printf_format(write_sn, (void *)&wsnap, (char *)format, arglist);
+ if (res < 0)
+ buf[0] = '\0';
+ else if (res < size)
+ buf[res] = '\0';
+ else
+ buf[size-1] = '\0';
+ return res;
+}
+
+int
+erts_vdsprintf(erts_dsprintf_buf_t *dsbufp, const char *format, va_list arglist)
+{
+ int res;
+ if (!dsbufp)
+ return -EINVAL;
+ errno = 0;
+ res = erts_printf_format(write_ds, (void *)dsbufp, (char *)format, arglist);
+ if (dsbufp->str) {
+ if (res < 0)
+ dsbufp->str[0] = '\0';
+ else
+ dsbufp->str[dsbufp->str_len] = '\0';
+ }
+ return res;
+}
diff --git a/erts/lib_src/common/erl_printf_format.c b/erts/lib_src/common/erl_printf_format.c
new file mode 100644
index 0000000000..bd3d38e649
--- /dev/null
+++ b/erts/lib_src/common/erl_printf_format.c
@@ -0,0 +1,940 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * fmt:
+ * '%' <flag>* [ <width> [.<precision>]][<length>]<conversion>
+ *
+ * flag: # | O | - | <sp> | + | ' | I
+ * width: [0-9]+ | '*'
+ * precision: [0-9]+ | '*'
+ * length: hh | h | l | ll | L | j | t | b<sz>
+ * conversion: d,i | o,u,x,X | e,E | f,F | g,G | a,A | c | s | T |
+ * p | n | %
+ * sz: 8 | 16 | 32 | 64 | p
+ */
+
+/* Without this, variable argument lists break on VxWorks */
+#ifdef VXWORKS
+#include <vxWorks.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef __WIN32__
+#undef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#endif
+
+#include <ctype.h>
+#include <string.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include "erl_errno.h"
+#include <limits.h>
+#include "erl_printf.h"
+#include "erl_printf_format.h"
+
+#ifdef DEBUG
+#include <assert.h>
+#define ASSERT(X) assert(X)
+#else
+#define ASSERT(X)
+#endif
+
+#ifdef __WIN32__
+#define long_long LONGLONG
+#define signed_long_long LONGLONG
+#define unsigned_long_long ULONGLONG
+#undef SIZEOF_LONG_LONG
+#define SIZEOF_LONG_LONG 8
+#else
+#if SIZEOF_LONG_LONG
+#define long_long long long
+#define signed_long_long signed long long
+#define unsigned_long_long unsigned long long
+#endif
+#endif
+
+#if defined(__GNUC__)
+# undef inline
+# define inline __inline__
+#elif defined(__WIN32__)
+# undef inline
+# define inline __forceinline
+#else
+# ifndef inline
+# define inline
+# endif
+#endif
+
+#define FMTC_d 0x0000
+#define FMTC_i 0x0001
+#define FMTC_o 0x0002
+#define FMTC_u 0x0003
+#define FMTC_x 0x0004
+#define FMTC_X 0x0005
+#define FMTC_e 0x0006
+#define FMTC_E 0x0007
+#define FMTC_f 0x0008
+#define FMTC_T 0x0009
+#define FMTC_g 0x000a
+#define FMTC_G 0x000b
+#define FMTC_c 0x000c
+#define FMTC_s 0x000d
+#define FMTC_p 0x000e
+#define FMTC_n 0x000f
+#define FMTC_MASK 0x000f
+
+#define FMTL_no 0x0000
+#define FMTL_hh 0x0010
+#define FMTL_h 0x0020
+#define FMTL_l 0x0030
+#define FMTL_ll 0x0040
+#define FMTL_L 0x0050
+#define FMTL_j 0x0060
+#define FMTL_t 0x0070
+#define FMTL_MASK 0x00f0
+
+#define FMTF_alt 0x0100 /* # alterlate form ie 0x */
+#define FMTF_pad 0x0200 /* 0 zero pad */
+#define FMTF_adj 0x0400 /* left adjust */
+#define FMTF_blk 0x0800 /* add blank */
+#define FMTF_sgn 0x1000 /* add sign */
+#define FMTF_cnv 0x2000 /* decimal conversion */
+#define FMTF_cnV 0x4000 /* alternate decimal conversion */
+#define FMTF_MASK 0x7f00
+
+
+static char zeros[] = "00000000000000000000000000000000";
+static char blanks[] = " ";
+static char hex[] = "0123456789abcdef";
+static char heX[] = "0123456789ABCDEF";
+
+#define FMT(fn,arg,buf,len,count) do { \
+ int res__ = (fn)((arg),(buf),(len)); \
+ if (res__ < 0) \
+ return res__; \
+ (count) += (len); \
+ } while(0)
+
+#define FILL(fn,arg,cs,len, count) do { \
+ int __i = (len); \
+ while(__i >= sizeof(cs)-1) { \
+ FMT((fn),(arg),(cs),sizeof(cs)-1,(count)); \
+ __i -= sizeof(cs)-1; \
+ } \
+ if (__i) FMT((fn),(arg),(cs),__i,(count)); \
+ } while(0)
+
+#define BLANKS(fn,arg,n,count) FILL((fn),(arg),blanks,(n),count)
+#define ZEROS(fn,arg,n,count) FILL((fn),(arg),zeros,(n),count)
+
+#define SIGN(X) ((X) > 0 ? 1 : ((X) < 0 ? -1 : 0))
+#define USIGN(X) ((X) == 0 ? 0 : 1)
+
+int (*erts_printf_eterm_func)(fmtfn_t, void*, unsigned long, long) = NULL;
+
+static int
+noop_fn(void *vfp, char* buf, size_t len)
+{
+ return 0;
+}
+
+static int fmt_fld(fmtfn_t fn,void* arg,
+ char* wbuf, int w, int sign,
+ int width,int precision,int fmt,int* count)
+{
+ char prefix[8];
+ char* pp = prefix;
+ int pw = 0;
+ int len;
+
+ /* format the prefix */
+ if ((sign || (fmt & (FMTF_sgn|FMTF_blk))) &&
+ (((fmt & FMTC_MASK) == FMTC_d) || ((fmt & FMTC_MASK) == FMTC_i))) {
+ if (sign < 0)
+ *pp++ = '-';
+ else if ((fmt & FMTF_sgn))
+ *pp++ = '+';
+ else if (fmt & FMTF_blk)
+ *pp++ = ' ';
+ }
+
+ if ((fmt & FMTF_alt)) {
+ switch((fmt & FMTC_MASK)) {
+ case FMTC_X: *pp++ = '0'; *pp++ = 'X'; break;
+ case FMTC_x: *pp++ = '0'; *pp++ = 'x'; break;
+ case FMTC_o: *pp++ = '0'; if (precision>1) precision--; break;
+ }
+ }
+
+ pw = pp-prefix;
+ len = ((w < precision) ? precision : w) + pw;
+
+ if (fmt & FMTF_adj) { /* left adjust */
+ if (pw)
+ FMT(fn,arg,prefix,pw,*count);
+ if (w < precision)
+ ZEROS(fn,arg,precision-w,*count);
+ FMT(fn,arg, wbuf, w, *count);
+ if (len < width)
+ BLANKS(fn,arg,width-len,*count);
+ }
+ else if ((fmt & FMTF_pad) && (precision<0)) { /* pad zeros */
+ if (pw)
+ FMT(fn,arg, prefix, pw, *count);
+ if (w < precision)
+ ZEROS(fn, arg, precision-w, *count);
+ if (len < width)
+ ZEROS(fn,arg,width-len,*count);
+ FMT(fn,arg,wbuf,w,*count);
+ }
+ else {
+ if (len < width)
+ BLANKS(fn,arg,width-len,*count);
+ if (pw)
+ FMT(fn,arg,prefix,pw,*count);
+ if (w < precision)
+ ZEROS(fn,arg,precision-w,*count);
+ FMT(fn,arg,wbuf,w,*count);
+ }
+ return 0;
+}
+
+static int fmt_long(fmtfn_t fn,void* arg,int sign,unsigned long uval,
+ int width,int precision,int fmt,int* count)
+{
+ char buf[32];
+ int base = 10;
+ int w = 0;
+ char* dc = hex;
+ char* p = buf+sizeof(buf);
+
+ switch(fmt & FMTC_MASK) {
+ case FMTC_d:
+ case FMTC_i:
+ case FMTC_u:
+ break;
+ case FMTC_o:
+ base = 8;
+ break;
+ case FMTC_X:
+ dc = heX;
+ case FMTC_x:
+ base = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* format the unsigned value */
+ if (!sign && precision) {
+ *--p = '0';
+ w++;
+ }
+ else {
+ while(uval) {
+ *--p = dc[(uval % base)];
+ uval /= base;
+ w++;
+ }
+ }
+ return fmt_fld(fn, arg, p, w, sign, width, precision, fmt, count);
+}
+
+#if SIZEOF_LONG_LONG
+
+static inline int
+do_div(unsigned_long_long *n, unsigned_long_long base)
+{
+ unsigned_long_long q = *n/base;
+ int mod = (int) (*n - q*base);
+ *n = q;
+ return mod;
+}
+
+static int fmt_long_long(fmtfn_t fn,void* arg,int sign,
+ unsigned_long_long uval,
+ int width,int precision,int fmt,int* count)
+{
+ char buf[32];
+ int base = 10;
+ int w = 0;
+ char* dc = hex;
+ char* p = buf+sizeof(buf);
+
+ switch(fmt & FMTC_MASK) {
+ case FMTC_d:
+ case FMTC_i:
+ case FMTC_u:
+ break;
+ case FMTC_o:
+ base = 8;
+ break;
+ case FMTC_X:
+ dc = heX;
+ case FMTC_x:
+ base = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* format the unsigned value */
+ if (!sign && precision) {
+ *--p = '0';
+ w++;
+ }
+ else {
+ while(uval) {
+ int m = do_div(&uval,base);
+ *--p = dc[m];
+ w++;
+ }
+ }
+ return fmt_fld(fn, arg, p, w, sign, width, precision, fmt, count);
+}
+
+#endif /* #if SIZEOF_LONG_LONG */
+
+static int fmt_double(fmtfn_t fn,void*arg,double val,
+ int width, int precision, int fmt,int* count)
+{
+ int res;
+ int fi = 0;
+ char format_str[7];
+ char sbuf[32];
+ char *bufp;
+ double dexp;
+ int exp;
+ size_t max_size = 1;
+ size_t size;
+ int new_fmt = fmt;
+ int fpe_was_unmasked;
+
+ fpe_was_unmasked = erts_printf_block_fpe ? (*erts_printf_block_fpe)() : 0;
+
+ if (val < 0.0)
+ dexp = log10(-val);
+ else if (val == 0.0)
+ dexp = 0.0;
+ else
+ dexp = log10(val);
+ exp = (int) dexp;
+
+ new_fmt &= ~FMTF_sgn;
+ new_fmt &= ~FMTF_blk;
+
+ format_str[fi++] = '%';
+ if (fmt & FMTF_alt)
+ format_str[fi++] = '#';
+ if (fmt & FMTF_sgn)
+ format_str[fi++] = '+';
+ else if (fmt & FMTF_blk)
+ format_str[fi++] = ' ';
+ format_str[fi++] = '0';
+ format_str[fi++] = '.';
+ format_str[fi++] = '*';
+
+ switch(fmt & FMTC_MASK) {
+ case FMTC_G:
+ format_str[fi] = 'E';
+ goto gG_common;
+ case FMTC_g:
+ format_str[fi] = 'e';
+ gG_common:
+ if (dexp < -4.0 || exp >= precision) {
+ fi++;
+ precision--;
+ if (precision < 1)
+ precision = 1;
+ goto eE_common;
+ }
+ /* fall through ... */
+ case FMTC_f:
+ format_str[fi++] = 'f';
+ max_size += exp > 0 ? exp : 1;
+ max_size++;
+ if (precision)
+ max_size += precision;
+ else if (fmt && FMTF_alt)
+ max_size++;
+ break;
+ case FMTC_E:
+ format_str[fi++] = 'E';
+ goto eE_common;
+ case FMTC_e:
+ format_str[fi++] = 'e';
+ eE_common: {
+ int aexp;
+
+ max_size += 4;
+ if (precision)
+ max_size += precision;
+ else if (fmt && FMTF_alt)
+ max_size++;
+ aexp = exp >= 0 ? exp : -exp;
+ if (aexp < 100)
+ max_size += 2;
+ else {
+ while (aexp) {
+ max_size++;
+ aexp /= 10;
+ }
+ }
+ break;
+ }
+ default:
+ res = -EINVAL;
+ goto out;
+ }
+
+ format_str[fi++] = '\0';
+ ASSERT(fi <= sizeof(format_str));
+
+ max_size++; /* '\0' */
+
+ if (max_size < sizeof(sbuf))
+ bufp = sbuf;
+ else {
+ bufp = (char *) malloc(sizeof(char)*max_size);
+ if (!bufp) {
+ res = -ENOMEM;
+ goto out;
+ }
+ }
+
+ size = sprintf(bufp, format_str, precision, val);
+ if (size < 0) {
+ if (errno > 0)
+ res = -errno;
+ else
+ res = -EIO;
+ goto out;
+ }
+
+ ASSERT(max_size >= size);
+
+ res = fmt_fld(fn, arg, bufp, size, 0, width, 0, new_fmt, count);
+
+ if (bufp != sbuf)
+ free((void *) bufp);
+
+ out:
+ if (erts_printf_unblock_fpe)
+ (*erts_printf_unblock_fpe)(fpe_was_unmasked);
+ return res;
+}
+
+int erts_printf_format(fmtfn_t fn, void* arg, char* fmt, va_list ap)
+{
+ char* ptr0 = fmt;
+ char* ptr = ptr0;
+ int count = 0;
+ int n;
+ int res = 0;
+
+ while(*ptr) {
+ unsigned long ul_val;
+ int fmt = 0;
+ int width = -1;
+ int precision = -1;
+
+ if (res < 0)
+ return res;
+
+ if (*ptr == '%') {
+ if ((n=ptr-ptr0))
+ FMT(fn,arg,ptr0,n,count);
+ ptr++;
+
+ do_flag:
+ switch(*ptr) {
+ case '#': fmt |= FMTF_alt; ptr++; goto do_flag;
+ case '0': fmt |= FMTF_pad; ptr++; goto do_flag;
+ case '-': fmt |= FMTF_adj; ptr++; goto do_flag;
+ case ' ': fmt |= FMTF_blk; ptr++; goto do_flag;
+ case '+': fmt |= FMTF_sgn; ptr++; goto do_flag;
+ case '\'': fmt |= FMTF_cnv; ptr++; goto do_flag;
+ case 'I': fmt |= FMTF_cnV; ptr++; goto do_flag;
+ }
+
+ /* width */
+ if (*ptr == '*') {
+ width = va_arg(ap, int);
+ ptr++;
+ }
+ else if (isdigit((int) *ptr)) {
+ width = *ptr++ - '0';
+ while(isdigit((int) *ptr))
+ width = 10*width + (*ptr++ - '0');
+ }
+
+ /* precision */
+ if (*ptr == '.') {
+ ptr++;
+ if (*ptr == '*') {
+ precision = va_arg(ap, int);
+ ptr++;
+ }
+ else if (isdigit((int) *ptr)) {
+ precision = *ptr++ - '0';
+ while(isdigit((int) *ptr))
+ precision = 10*precision + (*ptr++ - '0');
+ }
+ }
+
+ /* length modifier */
+ switch(*ptr) {
+ case 'b': {
+ ptr++;
+ if (*ptr == 'p') {
+ ptr++;
+#if SIZEOF_INT == SIZEOF_VOID_P
+#elif SIZEOF_LONG == SIZEOF_VOID_P
+ fmt |= FMTL_l;
+#elif SIZEOF_LONG_LONG == SIZEOF_VOID_P
+ fmt |= FMTL_ll;
+#else
+#error No integer datatype with the same size as 'void *' found
+#endif
+ }
+ else {
+ int bits = 0;
+ while(isdigit((int) *ptr))
+ bits = 10*bits + (*ptr++ - '0');
+ switch (bits) {
+ case 64:
+#if SIZEOF_INT == 8
+#elif SIZEOF_LONG == 8
+ fmt |= FMTL_l;
+#elif SIZEOF_LONG_LONG == 8
+ fmt |= FMTL_ll;
+#else
+#error No 64-bit integer datatype found
+#endif
+ break;
+ case 32:
+#if SIZEOF_INT == 4
+#elif SIZEOF_SHORT == 4
+ fmt |= FMTL_h;
+#elif SIZEOF_LONG == 4
+ fmt |= FMTL_l;
+#elif SIZEOF_LONG_LONG == 4
+ fmt |= FMTL_ll;
+#else
+#error No 32-bit integer datatype found
+#endif
+ break;
+ case 16:
+#if SIZEOF_INT == 2
+#elif SIZEOF_SHORT == 2
+ fmt |= FMTL_h;
+#elif SIZEOF_LONG == 2
+ fmt |= FMTL_l;
+#else
+#error No 16-bit integer datatype found
+#endif
+ case 8:
+#if SIZEOF_CHAR == 1
+ fmt |= FMTL_hh;
+#else
+#error Unexpected size of char
+#endif
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ break;
+ }
+ case 'h':
+ ptr++;
+ if (*ptr == 'h') {
+ ptr++;
+ fmt |= FMTL_hh;
+ }
+ else
+ fmt |= FMTL_h;
+ break;
+ case 'l':
+ ptr++;
+ if (*ptr == 'l') {
+ ptr++;
+#if SIZEOF_LONG_LONG
+ fmt |= FMTL_ll;
+#else
+ fmt |= FMTL_l;
+#endif
+ }
+ else
+ fmt |= FMTL_l;
+ break;
+ case 'L': ptr++; fmt |= FMTL_L; break;
+ case 'j': ptr++; fmt |= FMTL_j; break;
+ case 't': ptr++; fmt |= FMTL_t; break;
+ }
+
+ /* specifier */
+ switch(*ptr) {
+ case 'd': ptr++; fmt |= FMTC_d; break;
+ case 'i': ptr++; fmt |= FMTC_i; break;
+ case 'o': ptr++; fmt |= FMTC_o; break;
+ case 'u': ptr++; fmt |= FMTC_u; break;
+ case 'x': ptr++; fmt |= FMTC_x; break;
+ case 'X': ptr++; fmt |= FMTC_X; break;
+ case 'e': ptr++; fmt |= FMTC_e; break;
+ case 'E': ptr++; fmt |= FMTC_E; break;
+ case 'f': ptr++; fmt |= FMTC_f; break;
+ case 'g': ptr++; fmt |= FMTC_g; break;
+ case 'G': ptr++; fmt |= FMTC_G; break;
+ case 'c': ptr++; fmt |= FMTC_c; break;
+ case 's': ptr++; fmt |= FMTC_s; break;
+ case 'p': ptr++; fmt |= FMTC_p; break;
+ case 'n': ptr++; fmt |= FMTC_n; break;
+ case 'T': ptr++; fmt |= FMTC_T; break;
+ case '%':
+ FMT(fn,arg,ptr,1,count);
+ ptr++;
+ ptr0 = ptr;
+ continue;
+ default:
+ /* ignore */
+ ptr0 = ptr;
+ continue;
+ }
+
+ switch(fmt & FMTC_MASK) {
+ case FMTC_d:
+ case FMTC_i:
+ switch(fmt & FMTL_MASK) {
+ case FMTL_hh: {
+ signed char tval = (signed char) va_arg(ap,int);
+ ul_val = (unsigned long) (tval < 0 ? (-tval) : tval);
+ res = fmt_long(fn,arg,SIGN(tval),ul_val,
+ width,precision,fmt,&count);
+ break;
+ }
+ case FMTL_h: {
+ signed short tval = (signed short) va_arg(ap,int);
+ ul_val = (unsigned long) (tval < 0 ? (-tval) : tval);
+ res = fmt_long(fn,arg,SIGN(tval),ul_val,
+ width,precision,fmt,&count);
+ break;
+ }
+ case FMTL_l: {
+ signed long tval = (signed long) va_arg(ap,long);
+ ul_val = (unsigned long) (tval < 0 ? (-tval) : tval);
+ res = fmt_long(fn,arg,SIGN(tval),ul_val,
+ width,precision,fmt,&count);
+ break;
+ }
+#if SIZEOF_LONG_LONG
+ case FMTL_ll: {
+ unsigned_long_long ull_val;
+ signed_long_long tval;
+ tval = (signed_long_long) va_arg(ap,long_long);
+ ull_val = (unsigned_long_long) (tval < 0 ? (-tval) : tval);
+ res = fmt_long_long(fn,arg,SIGN(tval),ull_val,
+ width,precision,fmt,&count);
+ break;
+ }
+#endif
+ default: {
+ signed int tval = (signed int) va_arg(ap,int);
+ ul_val = (unsigned long) (tval < 0 ? (-tval) : tval);
+ res = fmt_long(fn,arg,SIGN(tval),ul_val,
+ width,precision,fmt,&count);
+ break;
+ }
+ }
+ break;
+ case FMTC_o:
+ case FMTC_u:
+ case FMTC_x:
+ case FMTC_X:
+ switch(fmt & FMTL_MASK) {
+ case FMTL_hh: {
+ unsigned char tval = (unsigned char) va_arg(ap,int);
+ ul_val = (unsigned long) tval;
+ res = fmt_long(fn,arg,USIGN(tval),ul_val,
+ width,precision,fmt,&count);
+ break;
+ }
+ case FMTL_h: {
+ unsigned short tval = (unsigned short) va_arg(ap,int);
+ ul_val = (unsigned long) tval;
+ res = fmt_long(fn,arg,USIGN(tval),ul_val,
+ width,precision,fmt,&count);
+ break;
+ }
+ case FMTL_l: {
+ ul_val = (unsigned long) va_arg(ap,long);
+ res = fmt_long(fn,arg,USIGN(ul_val),ul_val,
+ width,precision,fmt,&count);
+ break;
+ }
+#if SIZEOF_LONG_LONG
+ case FMTL_ll: {
+ unsigned_long_long ull_val;
+ ull_val = (signed_long_long) va_arg(ap,long_long);
+ res = fmt_long_long(fn,arg,USIGN(ull_val),ull_val,
+ width,precision,fmt,&count);
+ break;
+ }
+#endif
+ default: {
+ unsigned int tval = (unsigned int) va_arg(ap,int);
+ ul_val = (unsigned long) tval;
+ res = fmt_long(fn,arg,USIGN(tval),ul_val,
+ width,precision,fmt,&count);
+ break;
+ }
+ }
+ break;
+ case FMTC_e:
+ case FMTC_E:
+ case FMTC_f:
+ case FMTC_g:
+ case FMTC_G:
+ if (precision < 0)
+ precision = 6;
+ switch(fmt & FMTL_MASK) {
+ case FMTL_L:
+ return -EINVAL;
+ break;
+ default:
+ res = fmt_double(fn,arg,va_arg(ap,double),
+ width,precision,fmt,&count);
+ break;
+ }
+ break;
+
+ case FMTC_c: {
+ /* fixme: add wide char support l-modifier */
+ char c = va_arg(ap,int);
+ int len = 1;
+ if (precision == 0)
+ len = 0;
+ if (width > 0 && !(fmt & FMTF_adj)) {
+ if (width > len)
+ BLANKS(fn, arg, width - len, count);
+ }
+ if (len)
+ FMT(fn,arg,&c,len,count);
+ if (width > len && fmt & FMTF_adj)
+ BLANKS(fn, arg, width - len, count);
+ break;
+ }
+
+ case FMTC_s: {
+ char* str = va_arg(ap,char*);
+ int len = strlen(str);
+ if (precision >= 0 && precision < len)
+ len = precision;
+ if (width > 0 && !(fmt & FMTF_adj)) {
+ if (width > len)
+ BLANKS(fn, arg, width - len, count);
+ }
+ if (len)
+ FMT(fn,arg,str,len,count);
+ if (width > len && fmt & FMTF_adj)
+ BLANKS(fn, arg, width - len, count);
+ break;
+ }
+
+ case FMTC_p: {
+ void* addr = va_arg(ap, void*);
+
+ res = fmt_long(fn,
+ arg,
+ USIGN((unsigned long) addr),
+ (unsigned long) addr,
+ width < 0 ? ((int) 2*sizeof(void *)) : width,
+ (precision < 0
+ ? ((int) 2*sizeof(void *))
+ : precision),
+ FMTC_x|FMTF_pad|FMTF_alt,
+ &count);
+ break;
+ }
+
+ case FMTC_n:
+ switch(fmt & FMTL_MASK) {
+ case FMTL_hh: *va_arg(ap,char*) = count; break;
+ case FMTL_h: *va_arg(ap,short*) = count; break;
+ case FMTL_l: *va_arg(ap,long*) = count; break;
+#if SIZEOF_LONG_LONG
+ case FMTL_ll: *va_arg(ap,long_long*) = count; break;
+#endif
+ default: *va_arg(ap,int*) = count; break;
+ }
+ break;
+ case FMTC_T: {
+ long prec;
+ unsigned long eterm;
+ if (!erts_printf_eterm_func)
+ return -EINVAL;
+ if (precision < 0)
+ prec = 100000;
+ else if (precision == INT_MAX)
+ prec = LONG_MAX;
+ else
+ prec = (long) precision;
+ eterm = va_arg(ap, unsigned long);
+ if (width > 0 && !(fmt & FMTF_adj)) {
+ res = (*erts_printf_eterm_func)(noop_fn, NULL, eterm, prec);
+ if (res < 0)
+ return res;
+ if (width > res)
+ BLANKS(fn, arg, width - res, count);
+ }
+ res = (*erts_printf_eterm_func)(fn, arg, eterm, prec);
+ if (res < 0)
+ return res;
+ count += res;
+ if (width > res && fmt & FMTF_adj)
+ BLANKS(fn, arg, width - res, count);
+ break;
+ }
+ default:
+ if ((n=ptr-ptr0))
+ FMT(fn,arg,ptr0,n,count);
+ }
+ ptr0 = ptr;
+ }
+ else
+ ptr++;
+ }
+
+ if ((n=ptr-ptr0))
+ FMT(fn,arg,ptr0,n,count);
+ return count;
+}
+
+
+int
+erts_printf_char(fmtfn_t fn, void *arg, char c)
+{
+ return (*fn)(arg, &c, 1);
+}
+
+int
+erts_printf_string(fmtfn_t fn, void *arg, char *str)
+{
+ size_t sz = strlen(str);
+ return (*fn)(arg, str, sz);
+}
+
+int
+erts_printf_buf(fmtfn_t fn, void *arg, char *buf, size_t sz)
+{
+ return (*fn)(arg, buf, sz);
+}
+
+int
+erts_printf_pointer(fmtfn_t fn, void *arg, void *ptr)
+{
+ int count = 0;
+ int res = fmt_long(fn, arg, USIGN((unsigned long) ptr),
+ (unsigned long) ptr, 2*sizeof(void *),
+ 2*sizeof(void *), FMTC_x|FMTF_pad|FMTF_alt, &count);
+ if (res < 0)
+ return res;
+ return count;
+}
+
+int
+erts_printf_ulong(fmtfn_t fn, void *arg, char conv, int pad, int width,
+ unsigned long val)
+{
+ int count = 0;
+ int res;
+ int fmt = 0;
+ int prec = -1;
+ switch (conv) {
+ case 'o': fmt |= FMTC_o; break;
+ case 'u': fmt |= FMTC_u; break;
+ case 'x': fmt |= FMTC_x; break;
+ case 'X': fmt |= FMTC_X; break;
+ case 'p': fmt |= FMTC_p; break;
+ default:
+ return -EINVAL;
+ }
+ if (pad)
+ prec = width;
+ res = fmt_long(fn, arg, USIGN(val), val, width, prec, fmt, &count);
+ if (res < 0)
+ return res;
+ return count;
+}
+
+extern int
+erts_printf_slong(fmtfn_t fn, void *arg, char conv, int pad, int width,
+ signed long val)
+{
+ int count = 0;
+ int res;
+ int fmt = 0;
+ int prec = -1;
+ unsigned long ul_val;
+ switch (conv) {
+ case 'd': fmt |= FMTC_d; break;
+ case 'i': fmt |= FMTC_i; break;
+ case 'o': fmt |= FMTC_o; break;
+ case 'x': fmt |= FMTC_x; break;
+ case 'X': fmt |= FMTC_X; break;
+ default:
+ return -EINVAL;
+ }
+ if (pad)
+ prec = width;
+ ul_val = (unsigned long) (val < 0 ? -val : val);
+ res = fmt_long(fn, arg, SIGN(val), ul_val, width, prec, fmt, &count);
+ if (res < 0)
+ return res;
+ return count;
+}
+
+int
+erts_printf_double(fmtfn_t fn, void *arg, char conv, int precision, int width,
+ double val)
+{
+ int count = 0;
+ int res;
+ int fmt = 0;
+ switch (conv) {
+ case 'e': fmt |= FMTC_e; break;
+ case 'E': fmt |= FMTC_E; break;
+ case 'f': fmt |= FMTC_f; break;
+ case 'g': fmt |= FMTC_g; break;
+ case 'G': fmt |= FMTC_G; break;
+ default:
+ return -EINVAL;
+ }
+ res = fmt_double(fn, arg, val, width, precision, fmt, &count);
+ if (res < 0)
+ return res;
+ return count;
+}
diff --git a/erts/lib_src/common/ethread.c b/erts/lib_src/common/ethread.c
new file mode 100644
index 0000000000..eb4d0cad20
--- /dev/null
+++ b/erts/lib_src/common/ethread.c
@@ -0,0 +1,3346 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A Thread library for use in the ERTS and other OTP
+ * applications.
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#undef ETHR_STACK_GUARD_SIZE
+
+#if defined(ETHR_PTHREADS)
+
+#ifdef ETHR_TIME_WITH_SYS_TIME
+# include <time.h>
+# include <sys/time.h>
+#else
+# ifdef ETHR_HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# include <time.h>
+# endif
+#endif
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+
+#ifdef ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE
+# define ETHR_STACK_GUARD_SIZE (pagesize)
+#endif
+
+#elif defined(ETHR_WIN32_THREADS)
+
+#undef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <process.h>
+#include <winerror.h>
+
+#else
+#error "Missing thread implementation"
+#endif
+
+#include <limits.h>
+
+#define ETHR_FORCE_INLINE_FUNCS
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#include "ethread.h"
+
+#ifndef ETHR_HAVE_ETHREAD_DEFINES
+#error Missing configure defines
+#endif
+
+/*
+ * ----------------------------------------------------------------------------
+ * Common stuff
+ * ----------------------------------------------------------------------------
+ */
+
+#define ETHR_MAX_THREADS 2048 /* Has to be an even power of 2 */
+
+static int ethr_not_inited = 1;
+
+#define ASSERT(A) ETHR_ASSERT((A))
+
+static void *(*allocp)(size_t) = malloc;
+static void *(*reallocp)(void *, size_t) = realloc;
+static void (*freep)(void *) = free;
+
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+#endif
+
+void *(*thread_create_prepare_func)(void) = NULL;
+void (*thread_create_parent_func)(void *) = NULL;
+void (*thread_create_child_func)(void *) = NULL;
+
+typedef struct ethr_xhndl_list_ ethr_xhndl_list;
+struct ethr_xhndl_list_ {
+ ethr_xhndl_list *next;
+ void (*funcp)(void);
+};
+
+static size_t pagesize;
+#define ETHR_PAGE_ALIGN(SZ) (((((size_t) (SZ)) - 1)/pagesize + 1)*pagesize)
+static size_t min_stack_size; /* kilo words */
+static size_t max_stack_size; /* kilo words */
+#define ETHR_B2KW(B) ((((size_t) (B)) - 1)/(sizeof(void *)*1024) + 1)
+#define ETHR_KW2B(KW) (((size_t) (KW))*sizeof(void *)*1024)
+
+ethr_mutex xhndl_mtx;
+ethr_xhndl_list *xhndl_list;
+
+static int
+init_common(ethr_init_data *id)
+{
+ int res;
+ if (id) {
+ allocp = id->alloc;
+ reallocp = id->realloc;
+ freep = id->free;
+ thread_create_prepare_func = id->thread_create_prepare_func;
+ thread_create_parent_func = id->thread_create_parent_func;
+ thread_create_child_func = id->thread_create_child_func;
+ }
+ if (!allocp || !reallocp || !freep)
+ return EINVAL;
+
+#ifdef _SC_PAGESIZE
+ pagesize = (size_t) sysconf(_SC_PAGESIZE);
+#elif defined(HAVE_GETPAGESIZE)
+ pagesize = (size_t) getpagesize();
+#else
+ pagesize = (size_t) 4*1024; /* Guess 4 KB */
+#endif
+
+ /* User needs at least 4 KB */
+ min_stack_size = 4*1024;
+#if SIZEOF_VOID_P == 8
+ /* Double that on 64-bit archs */
+ min_stack_size *= 2;
+#endif
+ /* On some systems as much as about 4 KB is used by the system */
+ min_stack_size += 4*1024;
+ /* There should be room for signal handlers */
+#ifdef SIGSTKSZ
+ min_stack_size += SIGSTKSZ;
+#else
+ min_stack_size += pagesize;
+#endif
+ /* The system may think that we need more stack */
+#if defined(PTHREAD_STACK_MIN)
+ if (min_stack_size < PTHREAD_STACK_MIN)
+ min_stack_size = PTHREAD_STACK_MIN;
+#elif defined(_SC_THREAD_STACK_MIN)
+ {
+ size_t thr_min_stk_sz = (size_t) sysconf(_SC_THREAD_STACK_MIN);
+ if (min_stack_size < thr_min_stk_sz)
+ min_stack_size = thr_min_stk_sz;
+ }
+#endif
+ /* The guard is at least on some platforms included in the stack size
+ passed when creating threads */
+#ifdef ETHR_STACK_GUARD_SIZE
+ min_stack_size += ETHR_STACK_GUARD_SIZE;
+#endif
+ min_stack_size = ETHR_PAGE_ALIGN(min_stack_size);
+
+ min_stack_size = ETHR_B2KW(min_stack_size);
+
+ max_stack_size = 32*1024*1024;
+#if SIZEOF_VOID_P == 8
+ max_stack_size *= 2;
+#endif
+ max_stack_size = ETHR_B2KW(max_stack_size);
+
+ xhndl_list = NULL;
+
+ res = ethr_mutex_init(&xhndl_mtx);
+ if (res != 0)
+ return res;
+
+ res = ethr_mutex_set_forksafe(&xhndl_mtx);
+ if (res != 0 && res != ENOTSUP)
+ return res;
+
+ return 0;
+}
+
+int
+ethr_install_exit_handler(void (*funcp)(void))
+{
+ ethr_xhndl_list *xhp;
+ int res;
+
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (!funcp)
+ return EINVAL;
+
+ xhp = (ethr_xhndl_list *) (*allocp)(sizeof(ethr_xhndl_list));
+ if (!xhp)
+ return ENOMEM;
+
+ res = ethr_mutex_lock__(&xhndl_mtx);
+ if (res != 0) {
+ (*freep)((void *) xhp);
+ return res;
+ }
+
+ xhp->funcp = funcp;
+ xhp->next = xhndl_list;
+ xhndl_list = xhp;
+
+ res = ethr_mutex_unlock__(&xhndl_mtx);
+ if (res != 0)
+ abort();
+
+ return res;
+}
+
+static void
+run_exit_handlers(void)
+{
+ int res;
+ ethr_xhndl_list *xhp;
+
+ res = ethr_mutex_lock__(&xhndl_mtx);
+ if (res != 0)
+ abort();
+
+ xhp = xhndl_list;
+
+ res = ethr_mutex_unlock__(&xhndl_mtx);
+ if (res != 0)
+ abort();
+
+ for (; xhp; xhp = xhp->next)
+ (*xhp->funcp)();
+}
+
+#if defined(ETHR_PTHREADS)
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * pthread implementation *
+\* */
+
+typedef struct {
+ pthread_mutex_t mtx;
+ pthread_cond_t cnd;
+ int initialized;
+ void *(*thr_func)(void *);
+ void *arg;
+ void *prep_func_res;
+} thr_wrap_data_;
+
+static int no_ethreads;
+static ethr_mutex no_ethrs_mtx;
+
+#ifndef ETHR_HAVE_PTHREAD_ATFORK
+#define ETHR_HAVE_PTHREAD_ATFORK 0
+#endif
+
+#if !ETHR_HAVE_PTHREAD_ATFORK
+#warning "Cannot enforce fork-safety"
+#endif
+
+/*
+ * ----------------------------------------------------------------------------
+ * Static functions
+ * ----------------------------------------------------------------------------
+ */
+
+/*
+ * Functions with safe_ prefix aborts on failure. To be used when
+ * we cannot recover after failure.
+ */
+
+static ETHR_INLINE void
+safe_mutex_lock(pthread_mutex_t *mtxp)
+{
+ int res = pthread_mutex_lock(mtxp);
+ if (res != 0)
+ abort();
+}
+
+static ETHR_INLINE void
+safe_mutex_unlock(pthread_mutex_t *mtxp)
+{
+ int res = pthread_mutex_unlock(mtxp);
+ if (res != 0)
+ abort();
+}
+
+static ETHR_INLINE void
+safe_cond_signal(pthread_cond_t *cndp)
+{
+ int res = pthread_cond_signal(cndp);
+ if (res != 0)
+ abort();
+}
+
+#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+
+static volatile int rec_mtx_attr_need_init = 1;
+static pthread_mutexattr_t rec_mtx_attr;
+
+static int init_rec_mtx_attr(void);
+
+#endif
+
+#if ETHR_HAVE_PTHREAD_ATFORK
+
+static ethr_mutex forksafe_mtx = ETHR_MUTEX_INITER;
+
+static void lock_mutexes(void)
+{
+ ethr_mutex *m = &forksafe_mtx;
+ do {
+
+ safe_mutex_lock(&m->pt_mtx);
+
+ m = m->next;
+
+ } while (m != &forksafe_mtx);
+}
+
+static void unlock_mutexes(void)
+{
+ ethr_mutex *m = forksafe_mtx.prev;
+ do {
+
+ safe_mutex_unlock(&m->pt_mtx);
+
+ m = m->prev;
+
+ } while (m->next != &forksafe_mtx);
+}
+
+#if ETHR_INIT_MUTEX_IN_CHILD_AT_FORK
+
+static void reinit_mutexes(void)
+{
+ ethr_mutex *m = forksafe_mtx.prev;
+ do {
+ pthread_mutexattr_t *attrp = NULL;
+
+#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+ if (m->is_rec_mtx) {
+ if (rec_mtx_attr_need_init) {
+ int res = init_rec_mtx_attr();
+ if (res != 0)
+ abort();
+ }
+ attrp = &rec_mtx_attr;
+ }
+#endif
+ if (pthread_mutex_init(&m->pt_mtx, attrp) != 0)
+ abort();
+
+ m = m->prev;
+
+ } while (m->next != &forksafe_mtx);
+}
+
+#endif
+
+static int
+init_forksafe(void)
+{
+ static int init_done = 0;
+ int res = 0;
+
+ if (init_done)
+ return res;
+
+ forksafe_mtx.prev = &forksafe_mtx;
+ forksafe_mtx.next = &forksafe_mtx;
+
+ res = pthread_atfork(lock_mutexes,
+ unlock_mutexes,
+#if ETHR_INIT_MUTEX_IN_CHILD_AT_FORK
+ reinit_mutexes
+#else
+ unlock_mutexes
+#endif
+ );
+
+ init_done = 1;
+ return res;
+}
+
+#endif
+
+
+#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+
+#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE)
+
+#define SET_REC_MUTEX_ATTR(AP) \
+ pthread_mutexattr_settype((AP), PTHREAD_MUTEX_RECURSIVE);
+
+#elif defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
+
+#define SET_REC_MUTEX_ATTR(AP) \
+ pthread_mutexattr_setkind_np((AP), PTHREAD_MUTEX_RECURSIVE_NP);
+
+#else
+
+#error "Don't know how to set recursive mutex attributes"
+
+#endif
+
+static int
+init_rec_mtx_attr(void)
+{
+ int res, mres;
+ static pthread_mutex_t attrinit_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+ mres = pthread_mutex_lock(&attrinit_mtx);
+ if (mres != 0)
+ return mres;
+ /* Got here under race conditions; check again ... */
+ if (!rec_mtx_attr_need_init)
+ res = 0;
+ else {
+ res = pthread_mutexattr_init(&rec_mtx_attr);
+ if (res == 0) {
+ res = SET_REC_MUTEX_ATTR(&rec_mtx_attr);
+ if (res == 0)
+ rec_mtx_attr_need_init = 0;
+ else
+ (void) pthread_mutexattr_destroy(&rec_mtx_attr);
+ }
+ }
+
+ mres = pthread_mutex_unlock(&attrinit_mtx);
+ if (mres != 0)
+ return mres;
+ return res;
+}
+
+#endif /* #if ETHR_HAVE_ETHR_REC_MUTEX_INIT */
+
+static ETHR_INLINE void thr_exit_cleanup(void)
+{
+ run_exit_handlers();
+ safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
+ ASSERT(no_ethreads > 0);
+ no_ethreads--;
+ safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
+}
+
+static void *thr_wrapper(void *vtwd)
+{
+ void *res;
+ thr_wrap_data_ *twd = (thr_wrap_data_ *) vtwd;
+ void *(*thr_func)(void *) = twd->thr_func;
+ void *arg = twd->arg;
+
+ safe_mutex_lock(&twd->mtx);
+
+ if (thread_create_child_func)
+ (*thread_create_child_func)(twd->prep_func_res);
+
+ twd->initialized = 1;
+
+ safe_cond_signal(&twd->cnd);
+ safe_mutex_unlock(&twd->mtx);
+
+ res = (*thr_func)(arg);
+ thr_exit_cleanup();
+ return res;
+}
+
+
+/*
+ * ----------------------------------------------------------------------------
+ * Exported functions
+ * ----------------------------------------------------------------------------
+ */
+
+int
+ethr_init(ethr_init_data *id)
+{
+ int res;
+
+ if (!ethr_not_inited)
+ return EINVAL;
+
+ ethr_not_inited = 0;
+
+ res = init_common(id);
+ if (res != 0)
+ goto error;
+
+#if ETHR_HAVE_PTHREAD_ATFORK
+ init_forksafe();
+#endif
+
+ no_ethreads = 1;
+ res = ethr_mutex_init(&no_ethrs_mtx);
+ if (res != 0)
+ goto error;
+ res = ethr_mutex_set_forksafe(&no_ethrs_mtx);
+ if (res != 0 && res != ENOTSUP)
+ goto error;
+
+#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+ {
+ int i;
+ for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
+#ifdef ETHR_HAVE_PTHREAD_SPIN_LOCK
+ res = pthread_spin_init(&ethr_atomic_protection__[i].u.spnlck, 0);
+#else
+ res = ethr_mutex_init(&ethr_atomic_protection__[i].u.mtx);
+#endif
+ if (res != 0)
+ goto error;
+ }
+ }
+#endif
+
+ return 0;
+
+ error:
+ ethr_not_inited = 1;
+ return res;
+
+}
+
+int
+ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
+ ethr_thr_opts *opts)
+{
+ thr_wrap_data_ twd;
+ pthread_attr_t attr;
+ int res, dres;
+ int use_stack_size = (opts && opts->suggested_stack_size >= 0
+ ? opts->suggested_stack_size
+ : -1 /* Use system default */);
+
+#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
+ if (use_stack_size < 0)
+ use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
+#endif
+
+ twd.initialized = 0;
+ twd.thr_func = func;
+ twd.arg = arg;
+
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!tid || !func) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ /* Call prepare func if it exist */
+ if (thread_create_prepare_func)
+ twd.prep_func_res = (*thread_create_prepare_func)();
+ else
+ twd.prep_func_res = NULL;
+
+ /* Set som thread attributes */
+ res = pthread_attr_init(&attr);
+ if (res != 0)
+ goto cleanup_parent_func;
+ res = pthread_mutex_init(&twd.mtx, NULL);
+ if (res != 0)
+ goto cleanup_attr_destroy;
+ res = pthread_cond_init(&twd.cnd, NULL);
+ if (res != 0)
+ goto cleanup_mutex_destroy;
+
+ /* Schedule child thread in system scope (if possible) ... */
+ res = pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
+ if (res != 0 && res != ENOTSUP)
+ goto cleanup_cond_destroy;
+
+ if (use_stack_size >= 0) {
+ size_t suggested_stack_size = (size_t) use_stack_size;
+ size_t stack_size;
+#ifdef DEBUG
+ suggested_stack_size /= 2; /* Make sure we got margin */
+#endif
+#ifdef ETHR_STACK_GUARD_SIZE
+ /* The guard is at least on some platforms included in the stack size
+ passed when creating threads */
+ suggested_stack_size += ETHR_B2KW(ETHR_STACK_GUARD_SIZE);
+#endif
+ if (suggested_stack_size < min_stack_size)
+ stack_size = ETHR_KW2B(min_stack_size);
+ else if (suggested_stack_size > max_stack_size)
+ stack_size = ETHR_KW2B(max_stack_size);
+ else
+ stack_size = ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
+ (void) pthread_attr_setstacksize(&attr, stack_size);
+ }
+
+#ifdef ETHR_STACK_GUARD_SIZE
+ (void) pthread_attr_setguardsize(&attr, ETHR_STACK_GUARD_SIZE);
+#endif
+
+ /* Detached or joinable... */
+ res = pthread_attr_setdetachstate(&attr,
+ (opts && opts->detached
+ ? PTHREAD_CREATE_DETACHED
+ : PTHREAD_CREATE_JOINABLE));
+ if (res != 0)
+ goto cleanup_cond_destroy;
+
+ res = pthread_mutex_lock(&twd.mtx);
+
+ if (res != 0)
+ goto cleanup_cond_destroy;
+
+ safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
+ if (no_ethreads < ETHR_MAX_THREADS) {
+ no_ethreads++;
+ safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
+ }
+ else {
+ res = EAGAIN;
+ safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
+ goto cleanup_mutex_unlock;
+ }
+
+ res = pthread_create((pthread_t *) tid, &attr, thr_wrapper, (void *) &twd);
+
+ if (res != 0) {
+ safe_mutex_lock(&no_ethrs_mtx.pt_mtx);
+ ASSERT(no_ethreads > 0);
+ no_ethreads--;
+ safe_mutex_unlock(&no_ethrs_mtx.pt_mtx);
+ }
+ else {
+
+ /* Wait for child to initialize... */
+ while (!twd.initialized) {
+ res = pthread_cond_wait(&twd.cnd, &twd.mtx);
+ if (res != 0 && res != EINTR)
+ break;
+ }
+
+ }
+
+ /* Cleanup... */
+ cleanup_mutex_unlock:
+ dres = pthread_mutex_unlock(&twd.mtx);
+ if (res == 0)
+ res = dres;
+ cleanup_cond_destroy:
+ dres = pthread_cond_destroy(&twd.cnd);
+ if (res == 0)
+ res = dres;
+ cleanup_mutex_destroy:
+ dres = pthread_mutex_destroy(&twd.mtx);
+ if (res == 0)
+ res = dres;
+ cleanup_attr_destroy:
+ dres = pthread_attr_destroy(&attr);
+ if (res == 0)
+ res = dres;
+ cleanup_parent_func:
+ if (thread_create_parent_func)
+ (*thread_create_parent_func)(twd.prep_func_res);
+
+ return res;
+}
+
+int
+ethr_thr_join(ethr_tid tid, void **res)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_join((pthread_t) tid, res);
+}
+
+int
+ethr_thr_detach(ethr_tid tid)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_detach((pthread_t) tid);
+}
+
+void
+ethr_thr_exit(void *res)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return;
+ }
+#endif
+ thr_exit_cleanup();
+ pthread_exit(res);
+}
+
+ethr_tid
+ethr_self(void)
+{
+ return (ethr_tid) pthread_self();
+}
+
+int
+ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
+{
+ return pthread_equal((pthread_t) tid1, (pthread_t) tid2);
+}
+
+
+/*
+ * Mutex functions
+ */
+
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx) {
+ ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+ mtx->prev = NULL;
+ mtx->next = NULL;
+ mtx->is_rec_mtx = 0;
+ return pthread_mutex_init(&mtx->pt_mtx, NULL);
+}
+
+#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+
+int
+ethr_rec_mutex_init(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx) {
+ ASSERT(0);
+ return EINVAL;
+ }
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#endif
+ if (rec_mtx_attr_need_init)
+ init_rec_mtx_attr();
+
+ mtx->prev = NULL;
+ mtx->next = NULL;
+ mtx->is_rec_mtx = 1;
+ return pthread_mutex_init(&mtx->pt_mtx, &rec_mtx_attr);
+}
+
+#endif /* #if ETHR_HAVE_ETHR_REC_MUTEX_INIT */
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (mtx->next) {
+ ASSERT(mtx->prev);
+ ethr_mutex_unset_forksafe(mtx);
+ }
+#if ETHR_XCHK
+ mtx->initialized = 0;
+#endif
+ return pthread_mutex_destroy(&mtx->pt_mtx);
+}
+
+int ethr_mutex_set_forksafe(ethr_mutex *mtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+#if ETHR_HAVE_PTHREAD_ATFORK
+ res = pthread_mutex_lock(&forksafe_mtx.pt_mtx);
+ if (res != 0)
+ return res;
+ if (!forksafe_mtx.next) {
+ ASSERT(!forksafe_mtx.prev);
+ init_forksafe();
+ }
+ if (mtx->next) {
+ /* forksafe already set for this mutex */
+ ASSERT(mtx->prev);
+ }
+ else {
+ mtx->next = forksafe_mtx.next;
+ mtx->prev = &forksafe_mtx;
+ forksafe_mtx.next->prev = mtx;
+ forksafe_mtx.next = mtx;
+ }
+
+ res = pthread_mutex_unlock(&forksafe_mtx.pt_mtx);
+
+#else /* #if ETHR_HAVE_PTHREAD_ATFORK */
+ res = ENOTSUP;
+#endif /* #if ETHR_HAVE_PTHREAD_ATFORK */
+ return res;
+}
+
+int ethr_mutex_unset_forksafe(ethr_mutex *mtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+#if ETHR_HAVE_PTHREAD_ATFORK
+ res = pthread_mutex_lock(&forksafe_mtx.pt_mtx);
+ if (res != 0)
+ return res;
+ if (!forksafe_mtx.next) {
+ ASSERT(!forksafe_mtx.prev);
+ init_forksafe();
+ }
+ if (!mtx->next) {
+ /* forksafe already unset for this mutex */
+ ASSERT(!mtx->prev);
+ }
+ else {
+ mtx->prev->next = mtx->next;
+ mtx->next->prev = mtx->prev;
+ mtx->next = NULL;
+ mtx->prev = NULL;
+ }
+ res = pthread_mutex_unlock(&forksafe_mtx.pt_mtx);
+
+#else /* #if ETHR_HAVE_PTHREAD_ATFORK */
+ res = ENOTSUP;
+#endif /* #if ETHR_HAVE_PTHREAD_ATFORK */
+ return res;
+}
+
+int
+ethr_mutex_trylock(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_mutex_trylock__(mtx);
+}
+
+int
+ethr_mutex_lock(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_mutex_lock__(mtx);
+}
+
+int
+ethr_mutex_unlock(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_mutex_unlock__(mtx);
+}
+
+/*
+ * Condition variable functions
+ */
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd) {
+ ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = ETHR_COND_INITIALIZED;
+#endif
+ return pthread_cond_init(&cnd->pt_cnd, NULL);
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+ cnd->initialized = 0;
+#endif
+ return pthread_cond_destroy(&cnd->pt_cnd);
+}
+
+int
+ethr_cond_signal(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_cond_signal(&cnd->pt_cnd);
+}
+
+int
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd || cnd->initialized != ETHR_COND_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_cond_broadcast(&cnd->pt_cnd);
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd
+ || cnd->initialized != ETHR_COND_INITIALIZED
+ || !mtx
+ || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_cond_wait(&cnd->pt_cnd, &mtx->pt_mtx);
+}
+
+int
+ethr_cond_timedwait(ethr_cond *cnd, ethr_mutex *mtx, ethr_timeval *timeout)
+{
+ struct timespec to;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd
+ || cnd->initialized != ETHR_COND_INITIALIZED
+ || !mtx
+ || mtx->initialized != ETHR_MUTEX_INITIALIZED
+ || !timeout) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ to.tv_sec = timeout->tv_sec;
+ to.tv_nsec = timeout->tv_nsec;
+
+ return pthread_cond_timedwait(&cnd->pt_cnd, &mtx->pt_mtx, &to);
+}
+
+
+#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
+
+int
+ethr_rwmutex_init(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx) {
+ ASSERT(0);
+ return EINVAL;
+ }
+ rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
+#endif
+ return pthread_rwlock_init(&rwmtx->pt_rwlock, NULL);
+}
+
+int
+ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = pthread_rwlock_destroy(&rwmtx->pt_rwlock);
+#if ETHR_XCHK
+ rwmtx->initialized = 0;
+#endif
+ return res;
+}
+
+int
+ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwmutex_tryrlock__(rwmtx);
+}
+
+int
+ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwmutex_rlock__(rwmtx);
+}
+
+int
+ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwmutex_runlock__(rwmtx);
+}
+
+int
+ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwmutex_tryrwlock__(rwmtx);
+}
+
+int
+ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwmutex_rwlock__(rwmtx);
+}
+
+int
+ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwmutex_rwunlock__(rwmtx);
+}
+
+#endif /* #ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT */
+
+/*
+ * Current time
+ */
+
+int
+ethr_time_now(ethr_timeval *time)
+{
+ int res;
+ struct timeval tv;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!time) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ res = gettimeofday(&tv, NULL);
+ time->tv_sec = (long) tv.tv_sec;
+ time->tv_nsec = ((long) tv.tv_usec)*1000;
+ return res;
+}
+
+/*
+ * Thread specific data
+ */
+
+int
+ethr_tsd_key_create(ethr_tsd_key *keyp)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!keyp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_key_create((pthread_key_t *) keyp, NULL);
+}
+
+int
+ethr_tsd_key_delete(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_key_delete((pthread_key_t) key);
+}
+
+int
+ethr_tsd_set(ethr_tsd_key key, void *value)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return pthread_setspecific((pthread_key_t) key, value);
+}
+
+void *
+ethr_tsd_get(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return NULL;
+ }
+#endif
+ return pthread_getspecific((pthread_key_t) key);
+}
+
+/*
+ * Signal functions
+ */
+
+#if ETHR_HAVE_ETHR_SIG_FUNCS
+
+int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!set && !oset) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return pthread_sigmask(how, set, oset);
+}
+
+int ethr_sigwait(const sigset_t *set, int *sig)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!set || !sig) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (sigwait(set, sig) < 0)
+ return errno;
+ return 0;
+}
+
+#endif /* #if ETHR_HAVE_ETHR_SIG_FUNCS */
+
+#elif defined(ETHR_WIN32_THREADS)
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Native win32 threads implementation *
+\* */
+
+#define INVALID_TID -1
+
+/* The spin count values are more or less taken out of the blue */
+#define ETHR_MUTEX_SPIN_COUNT 5000
+#define ETHR_COND_SPIN_COUNT 1000
+
+ethr_tid serial_shift; /* Bits to shift serial when constructing a tid */
+ethr_tid last_serial; /* Last thread table serial used */
+ethr_tid last_ix; /* Last thread table index used */
+ethr_tid thr_ix_mask; /* Mask used to mask out thread table index from a tid */
+
+/* Event used for conditional variables. On per thread. */
+/*typedef struct cnd_wait_event__ cnd_wait_event_;*/
+struct cnd_wait_event__ {
+ HANDLE handle;
+ cnd_wait_event_ *prev;
+ cnd_wait_event_ *next;
+ int in_queue;
+};
+
+/* Thread specific data. Stored in the thread table */
+typedef struct {
+ ethr_tid thr_id;
+ HANDLE thr_handle;
+ ethr_tid joiner;
+ void *result;
+ cnd_wait_event_ wait_event;
+} thr_data_;
+
+/* Argument passed to thr_wrapper() */
+typedef struct {
+ void * (*func)(void *);
+ void * arg;
+ thr_data_ *ptd;
+ thr_data_ *td;
+ int res;
+ void *prep_func_res;
+} thr_wrap_data_;
+
+
+static CRITICAL_SECTION thr_table_cs; /* Critical section used to protect
+ the thread table from concurrent
+ accesses. */
+static CRITICAL_SECTION fake_static_init_cs; /* Critical section used to protect
+ initialazition of 'statically
+ initialized' mutexes */
+static thr_data_ * thr_table[ETHR_MAX_THREADS]; /* The thread table */
+
+static DWORD tls_own_thr_data;
+
+static thr_data_ main_thr_data;
+
+#define THR_IX(TID) ((TID) & thr_ix_mask)
+#define OWN_THR_DATA ((thr_data_ *) TlsGetValue(tls_own_thr_data))
+
+/*
+ * ----------------------------------------------------------------------------
+ * Static functions
+ * ----------------------------------------------------------------------------
+ */
+
+static int
+get_errno(void)
+{
+ switch (GetLastError()) {
+ case ERROR_INVALID_FUNCTION: return EINVAL; /* 1 */
+ case ERROR_FILE_NOT_FOUND: return ENOENT; /* 2 */
+ case ERROR_PATH_NOT_FOUND: return ENOENT; /* 3 */
+ case ERROR_TOO_MANY_OPEN_FILES: return EMFILE; /* 4 */
+ case ERROR_ACCESS_DENIED: return EACCES; /* 5 */
+ case ERROR_INVALID_HANDLE: return EBADF; /* 6 */
+ case ERROR_ARENA_TRASHED: return ENOMEM; /* 7 */
+ case ERROR_NOT_ENOUGH_MEMORY: return ENOMEM; /* 8 */
+ case ERROR_INVALID_BLOCK: return ENOMEM; /* 9 */
+ case ERROR_BAD_ENVIRONMENT: return E2BIG; /* 10 */
+ case ERROR_BAD_FORMAT: return ENOEXEC; /* 11 */
+ case ERROR_INVALID_ACCESS: return EINVAL; /* 12 */
+ case ERROR_INVALID_DATA: return EINVAL; /* 13 */
+ case ERROR_OUTOFMEMORY: return ENOMEM; /* 14 */
+ case ERROR_INVALID_DRIVE: return ENOENT; /* 15 */
+ case ERROR_CURRENT_DIRECTORY: return EACCES; /* 16 */
+ case ERROR_NOT_SAME_DEVICE: return EXDEV; /* 17 */
+ case ERROR_NO_MORE_FILES: return ENOENT; /* 18 */
+ case ERROR_WRITE_PROTECT: return EACCES; /* 19 */
+ case ERROR_BAD_UNIT: return EACCES; /* 20 */
+ case ERROR_NOT_READY: return EACCES; /* 21 */
+ case ERROR_BAD_COMMAND: return EACCES; /* 22 */
+ case ERROR_CRC: return EACCES; /* 23 */
+ case ERROR_BAD_LENGTH: return EACCES; /* 24 */
+ case ERROR_SEEK: return EACCES; /* 25 */
+ case ERROR_NOT_DOS_DISK: return EACCES; /* 26 */
+ case ERROR_SECTOR_NOT_FOUND: return EACCES; /* 27 */
+ case ERROR_OUT_OF_PAPER: return EACCES; /* 28 */
+ case ERROR_WRITE_FAULT: return EACCES; /* 29 */
+ case ERROR_READ_FAULT: return EACCES; /* 30 */
+ case ERROR_GEN_FAILURE: return EACCES; /* 31 */
+ case ERROR_SHARING_VIOLATION: return EACCES; /* 32 */
+ case ERROR_LOCK_VIOLATION: return EACCES; /* 33 */
+ case ERROR_WRONG_DISK: return EACCES; /* 34 */
+ case ERROR_SHARING_BUFFER_EXCEEDED: return EACCES; /* 36 */
+ case ERROR_BAD_NETPATH: return ENOENT; /* 53 */
+ case ERROR_NETWORK_ACCESS_DENIED: return EACCES; /* 65 */
+ case ERROR_BAD_NET_NAME: return ENOENT; /* 67 */
+ case ERROR_FILE_EXISTS: return EEXIST; /* 80 */
+ case ERROR_CANNOT_MAKE: return EACCES; /* 82 */
+ case ERROR_FAIL_I24: return EACCES; /* 83 */
+ case ERROR_INVALID_PARAMETER: return EINVAL; /* 87 */
+ case ERROR_NO_PROC_SLOTS: return EAGAIN; /* 89 */
+ case ERROR_DRIVE_LOCKED: return EACCES; /* 108 */
+ case ERROR_BROKEN_PIPE: return EPIPE; /* 109 */
+ case ERROR_DISK_FULL: return ENOSPC; /* 112 */
+ case ERROR_INVALID_TARGET_HANDLE: return EBADF; /* 114 */
+ case ERROR_WAIT_NO_CHILDREN: return ECHILD; /* 128 */
+ case ERROR_CHILD_NOT_COMPLETE: return ECHILD; /* 129 */
+ case ERROR_DIRECT_ACCESS_HANDLE: return EBADF; /* 130 */
+ case ERROR_NEGATIVE_SEEK: return EINVAL; /* 131 */
+ case ERROR_SEEK_ON_DEVICE: return EACCES; /* 132 */
+ case ERROR_DIR_NOT_EMPTY: return ENOTEMPTY;/* 145 */
+ case ERROR_NOT_LOCKED: return EACCES; /* 158 */
+ case ERROR_BAD_PATHNAME: return ENOENT; /* 161 */
+ case ERROR_MAX_THRDS_REACHED: return EAGAIN; /* 164 */
+ case ERROR_LOCK_FAILED: return EACCES; /* 167 */
+ case ERROR_ALREADY_EXISTS: return EEXIST; /* 183 */
+ case ERROR_INVALID_STARTING_CODESEG: return ENOEXEC; /* 188 */
+ case ERROR_INVALID_STACKSEG: return ENOEXEC; /* 189 */
+ case ERROR_INVALID_MODULETYPE: return ENOEXEC; /* 190 */
+ case ERROR_INVALID_EXE_SIGNATURE: return ENOEXEC; /* 191 */
+ case ERROR_EXE_MARKED_INVALID: return ENOEXEC; /* 192 */
+ case ERROR_BAD_EXE_FORMAT: return ENOEXEC; /* 193 */
+ case ERROR_ITERATED_DATA_EXCEEDS_64k: return ENOEXEC; /* 194 */
+ case ERROR_INVALID_MINALLOCSIZE: return ENOEXEC; /* 195 */
+ case ERROR_DYNLINK_FROM_INVALID_RING: return ENOEXEC; /* 196 */
+ case ERROR_IOPL_NOT_ENABLED: return ENOEXEC; /* 197 */
+ case ERROR_INVALID_SEGDPL: return ENOEXEC; /* 198 */
+ case ERROR_AUTODATASEG_EXCEEDS_64k: return ENOEXEC; /* 199 */
+ case ERROR_RING2SEG_MUST_BE_MOVABLE: return ENOEXEC; /* 200 */
+ case ERROR_RELOC_CHAIN_XEEDS_SEGLIM: return ENOEXEC; /* 201 */
+ case ERROR_INFLOOP_IN_RELOC_CHAIN: return ENOEXEC; /* 202 */
+ case ERROR_FILENAME_EXCED_RANGE: return ENOENT; /* 206 */
+ case ERROR_NESTING_NOT_ALLOWED: return EAGAIN; /* 215 */
+ case ERROR_NOT_ENOUGH_QUOTA: return ENOMEM; /* 1816 */
+ default: return EINVAL;
+ }
+}
+
+static ETHR_INLINE thr_data_ *
+tid2thr(ethr_tid tid)
+{
+ ethr_tid ix;
+ thr_data_ *td;
+
+ if (tid < 0)
+ return NULL;
+ ix = THR_IX(tid);
+ if (ix >= ETHR_MAX_THREADS)
+ return NULL;
+ td = thr_table[ix];
+ if (!td)
+ return NULL;
+ if (td->thr_id != tid)
+ return NULL;
+ return td;
+}
+
+static ETHR_INLINE void
+new_tid(ethr_tid *new_tid, ethr_tid *new_serial, ethr_tid *new_ix)
+{
+ ethr_tid tmp_serial = last_serial;
+ ethr_tid tmp_ix = last_ix + 1;
+ ethr_tid start_ix = tmp_ix;
+
+
+ do {
+ if (tmp_ix >= ETHR_MAX_THREADS) {
+ tmp_serial++;
+ if ((tmp_serial << serial_shift) < 0)
+ tmp_serial = 0;
+ tmp_ix = 0;
+ }
+ if (!thr_table[tmp_ix]) {
+ *new_tid = (tmp_serial << serial_shift) | tmp_ix;
+ *new_serial = tmp_serial;
+ *new_ix = tmp_ix;
+ return;
+ }
+ tmp_ix++;
+ } while (tmp_ix != start_ix);
+
+ *new_tid = INVALID_TID;
+ *new_serial = INVALID_TID;
+ *new_ix = INVALID_TID;
+
+}
+
+
+static void thr_exit_cleanup(thr_data_ *td, void *res)
+{
+
+ ASSERT(td == OWN_THR_DATA);
+
+ run_exit_handlers();
+
+ EnterCriticalSection(&thr_table_cs);
+ CloseHandle(td->wait_event.handle);
+ if (td->thr_handle == INVALID_HANDLE_VALUE) {
+ /* We are detached; cleanup thread table */
+ ASSERT(td->joiner == INVALID_TID);
+ ASSERT(td == thr_table[THR_IX(td->thr_id)]);
+ thr_table[THR_IX(td->thr_id)] = NULL;
+ if (td != &main_thr_data)
+ (*freep)((void *) td);
+ }
+ else {
+ /* Save result and let joining thread cleanup */
+ td->result = res;
+ }
+ LeaveCriticalSection(&thr_table_cs);
+}
+
+static unsigned __stdcall thr_wrapper(LPVOID args)
+{
+ void *(*func)(void*) = ((thr_wrap_data_ *) args)->func;
+ void *arg = ((thr_wrap_data_ *) args)->arg;
+ thr_data_ *td = ((thr_wrap_data_ *) args)->td;
+
+ td->wait_event.handle = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (td->wait_event.handle == INVALID_HANDLE_VALUE
+ || !TlsSetValue(tls_own_thr_data, (LPVOID) td)) {
+ ((thr_wrap_data_ *) args)->res = get_errno();
+ if (td->wait_event.handle != INVALID_HANDLE_VALUE)
+ CloseHandle(td->wait_event.handle);
+ SetEvent(((thr_wrap_data_ *) args)->ptd->wait_event.handle);
+ _endthreadex((unsigned) 0);
+ ASSERT(0);
+ }
+
+ td->wait_event.prev = NULL;
+ td->wait_event.next = NULL;
+ td->wait_event.in_queue = 0;
+
+ if (thread_create_child_func)
+ (*thread_create_child_func)(((thr_wrap_data_ *) args)->prep_func_res);
+
+ ASSERT(td == OWN_THR_DATA);
+
+ ((thr_wrap_data_ *) args)->res = 0;
+ SetEvent(((thr_wrap_data_ *) args)->ptd->wait_event.handle);
+
+ thr_exit_cleanup(td, (*func)(arg));
+ return 0;
+}
+
+int
+ethr_fake_static_mutex_init(ethr_mutex *mtx)
+{
+ EnterCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
+ /* Got here under race conditions; check again... */
+ if (!mtx->initialized) {
+ if (!InitializeCriticalSectionAndSpinCount(&mtx->cs,
+ ETHR_MUTEX_SPIN_COUNT))
+ return get_errno();
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+ }
+ LeaveCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
+ return 0;
+}
+
+static int
+fake_static_cond_init(ethr_cond *cnd)
+{
+ EnterCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
+ /* Got here under race conditions; check again... */
+ if (!cnd->initialized) {
+ if (!InitializeCriticalSectionAndSpinCount(&cnd->cs,
+ ETHR_COND_SPIN_COUNT))
+ return get_errno();
+ cnd->queue = NULL;
+ cnd->queue_end = NULL;
+ cnd->initialized = ETHR_COND_INITIALIZED;
+ }
+ LeaveCriticalSection((CRITICAL_SECTION *) &fake_static_init_cs);
+ return 0;
+}
+
+#ifdef __GNUC__
+#define LL_LITERAL(X) X##LL
+#else
+#define LL_LITERAL(X) X##i64
+#endif
+
+#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
+
+static ETHR_INLINE void
+get_curr_time(long *sec, long *nsec)
+{
+ SYSTEMTIME t;
+ FILETIME ft;
+ LONGLONG lft;
+
+ GetSystemTime(&t);
+ SystemTimeToFileTime(&t, &ft);
+ memcpy(&lft, &ft, sizeof(lft));
+ *nsec = ((long) (lft % LL_LITERAL(10000000)))*100;
+ *sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
+}
+
+static cnd_wait_event_ *cwe_freelist;
+static CRITICAL_SECTION cwe_cs;
+
+static int
+alloc_cwe(cnd_wait_event_ **cwe_res)
+{
+ cnd_wait_event_ *cwe;
+ EnterCriticalSection(&cwe_cs);
+ cwe = cwe_freelist;
+ if (cwe) {
+ cwe_freelist = cwe->next;
+ LeaveCriticalSection(&cwe_cs);
+ }
+ else {
+ LeaveCriticalSection(&cwe_cs);
+ cwe = (*allocp)(sizeof(cnd_wait_event_));
+ if (!cwe)
+ return ENOMEM;
+ cwe->handle = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (cwe->handle == INVALID_HANDLE_VALUE) {
+ int res = get_errno();
+ (*freep)(cwe);
+ return res;
+ }
+ }
+ *cwe_res = cwe;
+ return 0;
+}
+
+static
+free_cwe(cnd_wait_event_ *cwe)
+{
+ EnterCriticalSection(&cwe_cs);
+ cwe->next = cwe_freelist;
+ cwe_freelist = cwe;
+ LeaveCriticalSection(&cwe_cs);
+}
+
+static ETHR_INLINE int
+condwait(ethr_cond *cnd,
+ ethr_mutex *mtx,
+ int with_timeout,
+ ethr_timeval *timeout)
+{
+ int res;
+ thr_data_ *td;
+ cnd_wait_event_ *cwe;
+ DWORD code;
+ long time; /* time until timeout in milli seconds */
+
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+
+ if (!mtx
+ || mtx->initialized != ETHR_MUTEX_INITIALIZED
+ || !cnd
+ || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)
+ || (with_timeout && !timeout)) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ td = OWN_THR_DATA;
+ if (td)
+ cwe = &td->wait_event;
+ else { /* A non-ethread thread */
+ res = alloc_cwe(&cwe);
+ if (res != 0)
+ return res;
+ }
+
+ if (!cnd->initialized)
+ fake_static_cond_init(cnd);
+ EnterCriticalSection(&cnd->cs);
+
+ ASSERT(!cwe->in_queue);
+ if (cnd->queue_end) {
+ ASSERT(cnd->queue);
+ cwe->prev = cnd->queue_end;
+ cwe->next = NULL;
+ cnd->queue_end->next = cwe;
+ cnd->queue_end = cwe;
+ }
+ else {
+ ASSERT(!cnd->queue);
+ cwe->prev = NULL;
+ cwe->next = NULL;
+ cnd->queue = cwe;
+ cnd->queue_end = cwe;
+ }
+ cwe->in_queue = 1;
+
+ LeaveCriticalSection(&cnd->cs);
+
+ LeaveCriticalSection(&mtx->cs);
+
+ if (!with_timeout)
+ time = INFINITE;
+ else {
+ long sec, nsec;
+ ASSERT(timeout);
+ get_curr_time(&sec, &nsec);
+ time = (timeout->tv_sec - sec)*1000;
+ time += (timeout->tv_nsec - nsec + 500)/1000000;
+ if (time < 0)
+ time = 0;
+ }
+
+ /* wait for event to signal */
+ code = WaitForSingleObject(cwe->handle, time);
+
+ EnterCriticalSection(&mtx->cs);
+
+ if (code == WAIT_OBJECT_0) {
+ /* We were woken by a signal or a broadcast ... */
+ res = 0;
+
+ /* ... no need to remove event from wait queue since this was
+ taken care of by the signal or broadcast */
+#ifdef DEBUG
+ EnterCriticalSection(&cnd->cs);
+ ASSERT(!cwe->in_queue);
+ LeaveCriticalSection(&cnd->cs);
+#endif
+
+ }
+ else {
+ /* We timed out... */
+ res = ETIMEDOUT;
+
+ /* ... probably have to remove event from wait queue ... */
+ EnterCriticalSection(&cnd->cs);
+
+ if (cwe->in_queue) { /* ... but we must check that we are in queue
+ since a signal or broadcast after timeout
+ may have removed us from the queue */
+ if (cwe->prev) {
+ cwe->prev->next = cwe->next;
+ }
+ else {
+ ASSERT(cnd->queue == cwe);
+ cnd->queue = cwe->next;
+ }
+
+ if (cwe->next) {
+ cwe->next->prev = cwe->prev;
+ }
+ else {
+ ASSERT(cnd->queue_end == cwe);
+ cnd->queue_end = cwe->prev;
+ }
+ cwe->in_queue = 0;
+ }
+
+ LeaveCriticalSection(&cnd->cs);
+
+ }
+
+ if (!td)
+ free_cwe(cwe);
+
+ return res;
+
+}
+
+
+/*
+ * ----------------------------------------------------------------------------
+ * Exported functions
+ * ----------------------------------------------------------------------------
+ */
+
+int
+ethr_init(ethr_init_data *id)
+{
+#ifdef _WIN32_WINNT
+ DWORD major = (_WIN32_WINNT >> 8) & 0xff;
+ DWORD minor = _WIN32_WINNT & 0xff;
+ OSVERSIONINFO os_version;
+#endif
+ int err = 0;
+ thr_data_ *td = &main_thr_data;
+ unsigned long i;
+
+ if (!ethr_not_inited)
+ return EINVAL;
+
+#ifdef _WIN32_WINNT
+ os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&os_version);
+ if (os_version.dwPlatformId != VER_PLATFORM_WIN32_NT
+ || os_version.dwMajorVersion < major
+ || (os_version.dwMajorVersion == major
+ && os_version.dwMinorVersion < minor))
+ return ENOTSUP;
+#endif
+
+ ASSERT(ETHR_MAX_THREADS > 0);
+ for (i = ETHR_MAX_THREADS - 1, serial_shift = 0;
+ i;
+ serial_shift++, i >>= 1);
+ thr_ix_mask = ~(~((ethr_tid) 0) << serial_shift);
+
+ tls_own_thr_data = TlsAlloc();
+ if (tls_own_thr_data == TLS_OUT_OF_INDEXES)
+ goto error;
+
+ last_serial = 0;
+ last_ix = 0;
+
+ td->thr_id = 0;
+ td->thr_handle = GetCurrentThread();
+ td->joiner = INVALID_TID;
+ td->result = NULL;
+ td->wait_event.handle = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (td->wait_event.handle == INVALID_HANDLE_VALUE)
+ goto error;
+ td->wait_event.prev = NULL;
+ td->wait_event.next = NULL;
+ td->wait_event.in_queue = 0;
+ thr_table[0] = td;
+
+ if (!TlsSetValue(tls_own_thr_data, (LPVOID) td))
+ goto error;
+
+ ASSERT(td == OWN_THR_DATA);
+
+
+ cwe_freelist = NULL;
+ if (!InitializeCriticalSectionAndSpinCount(&cwe_cs,
+ ETHR_MUTEX_SPIN_COUNT))
+ goto error;
+
+ for (i = 1; i < ETHR_MAX_THREADS; i++)
+ thr_table[i] = NULL;
+
+ if (!InitializeCriticalSectionAndSpinCount(&thr_table_cs,
+ ETHR_MUTEX_SPIN_COUNT))
+ goto error;
+ if (!InitializeCriticalSectionAndSpinCount(&fake_static_init_cs,
+ ETHR_MUTEX_SPIN_COUNT))
+ goto error;
+ ethr_not_inited = 0;
+
+ err = init_common(id);
+ if (err)
+ goto error;
+
+ return 0;
+
+ error:
+ ethr_not_inited = 1;
+ if (err == 0)
+ err = get_errno();
+ ASSERT(err != 0);
+ if (td->thr_handle != INVALID_HANDLE_VALUE)
+ CloseHandle(td->thr_handle);
+ if (td->wait_event.handle != INVALID_HANDLE_VALUE)
+ CloseHandle(td->wait_event.handle);
+ return err;
+}
+
+/*
+ * Thread functions.
+ */
+
+int
+ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
+ ethr_thr_opts *opts)
+{
+ int err = 0;
+ thr_wrap_data_ twd;
+ thr_data_ *my_td, *child_td = NULL;
+ ethr_tid child_tid, child_serial, child_ix;
+ DWORD code;
+ unsigned ID;
+ unsigned stack_size = 0; /* 0 = system default */
+ int use_stack_size = (opts && opts->suggested_stack_size >= 0
+ ? opts->suggested_stack_size
+ : -1 /* Use system default */);
+
+#ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
+ if (use_stack_size < 0)
+ use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
+#endif
+
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!tid || !func) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+
+ my_td = OWN_THR_DATA;
+ if (!my_td) {
+ /* Only ethreads are allowed to call this function */
+ ASSERT(0);
+ return EACCES;
+ }
+
+ if (use_stack_size >= 0) {
+ size_t suggested_stack_size = (size_t) use_stack_size;
+#ifdef DEBUG
+ suggested_stack_size /= 2; /* Make sure we got margin */
+#endif
+ if (suggested_stack_size < min_stack_size)
+ stack_size = (unsigned) ETHR_KW2B(min_stack_size);
+ else if (suggested_stack_size > max_stack_size)
+ stack_size = (unsigned) ETHR_KW2B(max_stack_size);
+ else
+ stack_size =
+ (unsigned) ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
+ }
+
+ EnterCriticalSection(&thr_table_cs);
+
+ /* Call prepare func if it exist */
+ if (thread_create_prepare_func)
+ twd.prep_func_res = (*thread_create_prepare_func)();
+ else
+ twd.prep_func_res = NULL;
+
+ /* Find a new thread id to use */
+ new_tid(&child_tid, &child_serial, &child_ix);
+ if (child_tid == INVALID_TID) {
+ err = EAGAIN;
+ goto error;
+ }
+
+ ASSERT(child_ix == THR_IX(child_tid));
+
+ *tid = child_tid;
+
+ ASSERT(!thr_table[child_ix]);
+
+ /* Alloc thread data */
+ thr_table[child_ix] = child_td = (thr_data_ *) (*allocp)(sizeof(thr_data_));
+ if (!child_td) {
+ err = ENOMEM;
+ goto error;
+ }
+
+ /* Init thread data */
+
+ child_td->thr_id = child_tid;
+ child_td->thr_handle = INVALID_HANDLE_VALUE;
+ child_td->joiner = INVALID_TID;
+ child_td->result = NULL;
+ /* 'child_td->wait_event' is initialized by child thread */
+
+
+ /* Init thread wrapper data */
+
+ twd.func = func;
+ twd.arg = arg;
+ twd.ptd = my_td;
+ twd.td = child_td;
+ twd.res = 0;
+
+ ASSERT(!my_td->wait_event.in_queue);
+
+ /* spawn the thr_wrapper function */
+ child_td->thr_handle = (HANDLE) _beginthreadex(NULL,
+ stack_size,
+ thr_wrapper,
+ (LPVOID) &twd,
+ 0,
+ &ID);
+ if (child_td->thr_handle == (HANDLE) 0) {
+ child_td->thr_handle = INVALID_HANDLE_VALUE;
+ goto error;
+ }
+
+ ASSERT(child_td->thr_handle != INVALID_HANDLE_VALUE);
+
+ /* Wait for child to finish initialization */
+ code = WaitForSingleObject(my_td->wait_event.handle, INFINITE);
+ if (twd.res || code != WAIT_OBJECT_0) {
+ err = twd.res;
+ goto error;
+ }
+
+ if (opts && opts->detached) {
+ CloseHandle(child_td->thr_handle);
+ child_td->thr_handle = INVALID_HANDLE_VALUE;
+ }
+
+ last_serial = child_serial;
+ last_ix = child_ix;
+
+ ASSERT(thr_table[child_ix] == child_td);
+
+ if (thread_create_parent_func)
+ (*thread_create_parent_func)(twd.prep_func_res);
+
+ LeaveCriticalSection(&thr_table_cs);
+
+ return 0;
+
+ error:
+
+ if (err == 0)
+ err = get_errno();
+ ASSERT(err != 0);
+
+ if (thread_create_parent_func)
+ (*thread_create_parent_func)(twd.prep_func_res);
+
+ if (child_ix != INVALID_TID) {
+
+ if (child_td) {
+ ASSERT(thr_table[child_ix] == child_td);
+
+ if (child_td->thr_handle != INVALID_HANDLE_VALUE) {
+ WaitForSingleObject(child_td->thr_handle, INFINITE);
+ CloseHandle(child_td->thr_handle);
+ }
+
+ (*freep)((void *) child_td);
+ thr_table[child_ix] = NULL;
+ }
+ }
+
+ *tid = INVALID_TID;
+
+ LeaveCriticalSection(&thr_table_cs);
+ return err;
+}
+
+int ethr_thr_join(ethr_tid tid, void **res)
+{
+ int err = 0;
+ DWORD code;
+ thr_data_ *td;
+ thr_data_ *my_td;
+
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ my_td = OWN_THR_DATA;
+
+ if (!my_td) {
+ /* Only ethreads are allowed to call this function */
+ ASSERT(0);
+ return EACCES;
+ }
+
+ EnterCriticalSection(&thr_table_cs);
+
+ td = tid2thr(tid);
+ if (!td)
+ err = ESRCH;
+ else if (td->thr_handle == INVALID_HANDLE_VALUE /* i.e. detached */
+ || td->joiner != INVALID_TID) /* i.e. someone else is joining */
+ err = EINVAL;
+ else if (my_td == td)
+ err = EDEADLK;
+ else
+ td->joiner = my_td->thr_id;
+
+ LeaveCriticalSection(&thr_table_cs);
+
+ if (err)
+ goto error;
+
+ /* Wait for thread to terminate */
+ code = WaitForSingleObject(td->thr_handle, INFINITE);
+ if (code != WAIT_OBJECT_0)
+ goto error;
+
+ EnterCriticalSection(&thr_table_cs);
+
+ ASSERT(td == tid2thr(tid));
+ ASSERT(td->thr_handle != INVALID_HANDLE_VALUE);
+ ASSERT(td->joiner == my_td->thr_id);
+
+ if (res)
+ *res = td->result;
+
+ CloseHandle(td->thr_handle);
+ ASSERT(td == thr_table[THR_IX(td->thr_id)]);
+ thr_table[THR_IX(td->thr_id)] = NULL;
+ if (td != &main_thr_data)
+ (*freep)((void *) td);
+
+ LeaveCriticalSection(&thr_table_cs);
+
+ return 0;
+
+ error:
+ if (err == 0)
+ err = get_errno();
+ ASSERT(err != 0);
+ return err;
+}
+
+
+int
+ethr_thr_detach(ethr_tid tid)
+{
+ int res;
+ DWORD code;
+ thr_data_ *td;
+
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+
+ if (!OWN_THR_DATA) {
+ /* Only ethreads are allowed to call this function */
+ ASSERT(0);
+ return EACCES;
+ }
+
+ EnterCriticalSection(&thr_table_cs);
+
+ td = tid2thr(tid);
+ if (!td)
+ res = ESRCH;
+ if (td->thr_handle == INVALID_HANDLE_VALUE /* i.e. detached */
+ || td->joiner != INVALID_TID) /* i.e. someone is joining */
+ res = EINVAL;
+ else {
+ res = 0;
+ CloseHandle(td->thr_handle);
+ td->thr_handle = INVALID_HANDLE_VALUE;
+ }
+
+ LeaveCriticalSection(&thr_table_cs);
+
+ return res;
+}
+
+
+void
+ethr_thr_exit(void *res)
+{
+ thr_data_ *td;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return;
+ }
+#endif
+ td = OWN_THR_DATA;
+ if (!td) {
+ /* Only ethreads are allowed to call this function */
+ ASSERT(0);
+ return;
+ }
+ thr_exit_cleanup(td, res);
+ _endthreadex((unsigned) 0);
+}
+
+ethr_tid
+ethr_self(void)
+{
+ thr_data_ *td;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return INVALID_TID;
+ }
+#endif
+ /* It is okay for non-ethreads (i.e. native win32 threads) to call
+ ethr_self(). They will however be returned the INVALID_TID. */
+ td = OWN_THR_DATA;
+ if (!td)
+ return INVALID_TID;
+ return td->thr_id;
+}
+
+int
+ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
+{
+ /* INVALID_TID does not equal any tid, not even the INVALID_TID */
+ return tid1 == tid2 && tid1 != INVALID_TID;
+}
+
+/*
+ * Mutex functions.
+ */
+
+int
+ethr_mutex_init(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (!InitializeCriticalSectionAndSpinCount(&mtx->cs, ETHR_MUTEX_SPIN_COUNT))
+ return get_errno();
+ mtx->initialized = ETHR_MUTEX_INITIALIZED;
+#if ETHR_XCHK
+ mtx->is_rec_mtx = 0;
+#endif
+ return 0;
+}
+
+int
+ethr_rec_mutex_init(ethr_mutex *mtx)
+{
+ int res;
+ res = ethr_mutex_init(mtx);
+#if ETHR_XCHK
+ mtx->is_rec_mtx = 1;
+#endif
+ return res;
+}
+
+int
+ethr_mutex_destroy(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ DeleteCriticalSection(&mtx->cs);
+ mtx->initialized = 0;
+ return 0;
+}
+
+int ethr_mutex_set_forksafe(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return 0; /* No fork() */
+}
+
+int ethr_mutex_unset_forksafe(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+ return 0; /* No fork() */
+}
+
+int
+ethr_mutex_trylock(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx
+ || (mtx->initialized && mtx->initialized != ETHR_MUTEX_INITIALIZED)) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (!mtx->initialized) {
+ int res = ethr_fake_static_mutex_init(mtx);
+ if (res != 0)
+ return res;
+ }
+ return ethr_mutex_trylock__(mtx);
+}
+
+int
+ethr_mutex_lock(ethr_mutex *mtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx
+ || (mtx->initialized && mtx->initialized != ETHR_MUTEX_INITIALIZED)) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_mutex_lock__(mtx);
+}
+
+int
+ethr_mutex_unlock(ethr_mutex *mtx)
+{
+#if ETHR_XCHK
+ int res;
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!mtx || mtx->initialized != ETHR_MUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_mutex_unlock__(mtx);
+}
+
+/*
+ * Condition variable functions.
+ */
+
+int
+ethr_cond_init(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (!InitializeCriticalSectionAndSpinCount(&cnd->cs, ETHR_COND_SPIN_COUNT))
+ return get_errno();
+ cnd->queue = NULL;
+ cnd->queue_end = NULL;
+ cnd->initialized = ETHR_COND_INITIALIZED;
+ return 0;
+}
+
+int
+ethr_cond_destroy(ethr_cond *cnd)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd
+ || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)
+ || cnd->queue) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ DeleteCriticalSection(&cnd->cs);
+ cnd->initialized = 0;
+ return 0;
+}
+
+int
+ethr_cond_signal(ethr_cond *cnd)
+{
+ cnd_wait_event_ *cwe;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd
+ || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (!cnd->initialized) {
+ int res = fake_static_cond_init(cnd);
+ if (res != 0)
+ return res;
+ }
+ EnterCriticalSection(&cnd->cs);
+ cwe = cnd->queue;
+ if (cwe) {
+ ASSERT(cwe->in_queue);
+ SetEvent(cnd->queue->handle);
+ if (cwe->next)
+ cwe->next->prev = NULL;
+ else {
+ ASSERT(cnd->queue_end == cnd->queue);
+ cnd->queue_end = NULL;
+ }
+ cnd->queue = cwe->next;
+ cwe->in_queue = 0;
+ }
+ LeaveCriticalSection(&cnd->cs);
+ return 0;
+}
+
+int
+ethr_cond_broadcast(ethr_cond *cnd)
+{
+ cnd_wait_event_ *cwe;
+
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!cnd
+ || (cnd->initialized && cnd->initialized != ETHR_COND_INITIALIZED)) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (!cnd->initialized) {
+ int res = fake_static_cond_init(cnd);
+ if (res != 0)
+ return res;
+ }
+ EnterCriticalSection(&cnd->cs);
+ for (cwe = cnd->queue; cwe; cwe = cwe->next) {
+ ASSERT(cwe->in_queue);
+ SetEvent(cwe->handle);
+ cwe->in_queue = 0;
+ }
+ cnd->queue = NULL;
+ cnd->queue_end = NULL;
+ LeaveCriticalSection(&cnd->cs);
+ return 0;
+
+}
+
+int
+ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
+{
+ return condwait(cnd, mtx, 0, NULL);
+}
+
+int
+ethr_cond_timedwait(ethr_cond *cnd, ethr_mutex *mtx, ethr_timeval *timeout)
+{
+ return condwait(cnd, mtx, 1, timeout);
+}
+
+int
+ethr_time_now(ethr_timeval *time)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!time) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ get_curr_time(&time->tv_sec, &time->tv_nsec);
+ return 0;
+}
+
+/*
+ * Thread specific data
+ */
+
+int
+ethr_tsd_key_create(ethr_tsd_key *keyp)
+{
+ DWORD key;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!keyp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ key = TlsAlloc();
+ if (key == TLS_OUT_OF_INDEXES)
+ return get_errno();
+ *keyp = (ethr_tsd_key) key;
+ return 0;
+}
+
+int
+ethr_tsd_key_delete(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+ if (!TlsFree((DWORD) key))
+ return get_errno();
+ return 0;
+}
+
+int
+ethr_tsd_set(ethr_tsd_key key, void *value)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+#endif
+ if (!TlsSetValue((DWORD) key, (LPVOID) value))
+ return get_errno();
+ return 0;
+}
+
+void *
+ethr_tsd_get(ethr_tsd_key key)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return NULL;
+ }
+#endif
+ return (void *) TlsGetValue((DWORD) key);
+}
+
+/* Misc */
+
+#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
+
+int
+ethr_do_spinlock_init(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ if (InitializeCriticalSectionAndSpinCount(&lock->cs, INT_MAX))
+ return 0;
+ else
+ return get_errno();
+}
+
+int
+ethr_do_rwlock_init(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ lock->counter = 0;
+ if (InitializeCriticalSectionAndSpinCount(&lock->cs, INT_MAX))
+ return 0;
+ else
+ return get_errno();
+}
+
+#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
+
+#else
+#error "Missing thread implementation"
+#endif
+
+/* Atomics */
+
+int
+ethr_atomic_init(ethr_atomic_t *var, long i)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!var) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_init__(var, i);
+}
+
+int
+ethr_atomic_set(ethr_atomic_t *var, long i)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!var) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_set__(var, i);
+}
+
+int
+ethr_atomic_read(ethr_atomic_t *var, long *i)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!var || !i) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_read__(var, i);
+}
+
+
+int
+ethr_atomic_addtest(ethr_atomic_t *var, long incr, long *testp)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!var || !testp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_addtest__(var, incr, testp);
+}
+
+int
+ethr_atomic_inctest(ethr_atomic_t *incp, long *testp)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!incp || !testp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_inctest__(incp, testp);
+}
+
+int
+ethr_atomic_dectest(ethr_atomic_t *decp, long *testp)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!decp || !testp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_dectest__(decp, testp);
+}
+
+int
+ethr_atomic_add(ethr_atomic_t *var, long incr)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!var) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_add__(var, incr);
+}
+
+int
+ethr_atomic_inc(ethr_atomic_t *incp)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!incp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_inc__(incp);
+}
+
+int
+ethr_atomic_dec(ethr_atomic_t *decp)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!decp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_dec__(decp);
+}
+
+int
+ethr_atomic_and_old(ethr_atomic_t *var, long mask, long *old)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!var || !old) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_and_old__(var, mask, old);
+}
+
+int
+ethr_atomic_or_old(ethr_atomic_t *var, long mask, long *old)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!var || !old) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_or_old__(var, mask, old);
+}
+
+int
+ethr_atomic_xchg(ethr_atomic_t *var, long new, long *old)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!var || !old) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_xchg__(var, new, old);
+}
+
+int
+ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected, long *old)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!var || !old) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_atomic_cmpxchg__(var, new, expected, old);
+}
+
+/* Spinlocks and rwspinlocks */
+
+int
+ethr_spinlock_init(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spinlock_init__(lock);
+}
+
+int
+ethr_spinlock_destroy(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spinlock_destroy__(lock);
+}
+
+
+int
+ethr_spin_unlock(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spin_unlock__(lock);
+}
+
+int
+ethr_spin_lock(ethr_spinlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_spin_lock__(lock);
+}
+
+int
+ethr_rwlock_init(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwlock_init__(lock);
+}
+
+int
+ethr_rwlock_destroy(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_rwlock_destroy__(lock);
+}
+
+int
+ethr_read_unlock(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_read_unlock__(lock);
+}
+
+int
+ethr_read_lock(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_read_lock__(lock);
+}
+
+int
+ethr_write_unlock(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_write_unlock__(lock);
+}
+
+int
+ethr_write_lock(ethr_rwlock_t *lock)
+{
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!lock) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ return ethr_write_lock__(lock);
+}
+
+
+int
+ethr_gate_init(ethr_gate *gp)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!gp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_init(&gp->mtx);
+ if (res != 0)
+ return res;
+ res = ethr_cond_init(&gp->cnd);
+ if (res != 0) {
+ ethr_mutex_destroy(&gp->mtx);
+ return res;
+ }
+ gp->open = 0;
+ return 0;
+}
+
+int
+ethr_gate_destroy(ethr_gate *gp)
+{
+ int res, dres;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!gp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_destroy(&gp->mtx);
+ dres = ethr_cond_destroy(&gp->cnd);
+ if (res == 0)
+ res = dres;
+ gp->open = 0;
+ return res;
+}
+
+int
+ethr_gate_close(ethr_gate *gp)
+{
+ int res;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!gp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_lock__(&gp->mtx);
+ if (res != 0)
+ return res;
+ gp->open = 0;
+ res = ethr_mutex_unlock__(&gp->mtx);
+ return res;
+}
+
+int
+ethr_gate_let_through(ethr_gate *gp, unsigned no)
+{
+ int res, ures;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!gp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_lock__(&gp->mtx);
+ if (res != 0)
+ return res;
+ gp->open += no;
+ res = (gp->open == 1
+ ? ethr_cond_signal(&gp->cnd)
+ : ethr_cond_broadcast(&gp->cnd));
+ ures = ethr_mutex_unlock__(&gp->mtx);
+ if (res != 0)
+ res = ures;
+ return res;
+}
+
+int
+ethr_gate_swait(ethr_gate *gp, int spincount)
+{
+ int res, ures, n;
+#if ETHR_XCHK
+ if (ethr_not_inited) {
+ ASSERT(0);
+ return EACCES;
+ }
+ if (!gp) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ n = spincount;
+ res = ethr_mutex_lock__(&gp->mtx);
+ if (res != 0)
+ return res;
+ while (n >= 0 && !gp->open) {
+ res = ethr_mutex_unlock__(&gp->mtx);
+ if (res != 0)
+ return res;
+ res = ethr_mutex_lock__(&gp->mtx);
+ if (res != 0)
+ return res;
+ n--;
+ }
+ while (!gp->open) {
+ res = ethr_cond_wait(&gp->cnd, &gp->mtx);
+ if (res != 0 && res != EINTR)
+ goto done;
+ }
+ gp->open--;
+ done:
+ ures = ethr_mutex_unlock__(&gp->mtx);
+ if (res == 0)
+ res = ures;
+ return res;
+}
+
+
+int
+ethr_gate_wait(ethr_gate *gp)
+{
+ return ethr_gate_swait(gp, 0);
+}
+
+
+/* rwmutex fallback */
+#ifdef ETHR_USE_RWMTX_FALLBACK
+
+int
+ethr_rwmutex_init(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (!rwmtx) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_init(&rwmtx->mtx);
+ if (res != 0)
+ return res;
+ ethr_cond_init(&rwmtx->rcnd);
+ if (res != 0)
+ goto error_cleanup1;
+ res = ethr_cond_init(&rwmtx->wcnd);
+ if (res != 0)
+ goto error_cleanup2;
+ rwmtx->readers = 0;
+ rwmtx->waiting_readers = 0;
+ rwmtx->waiting_writers = 0;
+#if ETHR_XCHK
+ rwmtx->initialized = ETHR_RWMUTEX_INITIALIZED;
+#endif
+ return 0;
+ error_cleanup2:
+ ethr_cond_destroy(&rwmtx->rcnd);
+ error_cleanup1:
+ ethr_mutex_destroy(&rwmtx->mtx);
+ return res;
+}
+
+int
+ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
+{
+ int res, pres;
+#if ETHR_XCHK
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+ rwmtx->initialized = 0;
+#endif
+ res = ethr_mutex_destroy(&rwmtx->mtx);
+ pres = ethr_cond_destroy(&rwmtx->rcnd);
+ if (res == 0)
+ res = pres;
+ pres = ethr_cond_destroy(&rwmtx->wcnd);
+ if (res == 0)
+ res = pres;
+ return res;
+}
+
+int
+ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_trylock__(&rwmtx->mtx);
+ if (res != 0)
+ return res;
+ if (!rwmtx->waiting_writers) {
+ res = ethr_mutex_unlock__(&rwmtx->mtx);
+ if (res == 0)
+ return EBUSY;
+ return res;
+ }
+ rwmtx->readers++;
+ return ethr_mutex_unlock__(&rwmtx->mtx);
+}
+
+int
+ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_lock__(&rwmtx->mtx);
+ if (res != 0)
+ return res;
+ while (rwmtx->waiting_writers) {
+ rwmtx->waiting_readers++;
+ res = ethr_cond_wait(&rwmtx->rcnd, &rwmtx->mtx);
+ rwmtx->waiting_readers--;
+ if (res != 0 && res != EINTR) {
+ (void) ethr_mutex_unlock__(&rwmtx->mtx);
+ return res;
+ }
+ }
+ rwmtx->readers++;
+ return ethr_mutex_unlock__(&rwmtx->mtx);
+}
+
+int
+ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
+{
+ int res, ures;
+#if ETHR_XCHK
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_lock__(&rwmtx->mtx);
+ if (res != 0)
+ return res;
+ rwmtx->readers--;
+ if (!rwmtx->readers && rwmtx->waiting_writers)
+ res = ethr_cond_signal(&rwmtx->wcnd);
+ ures = ethr_mutex_unlock__(&rwmtx->mtx);
+ if (res == 0)
+ res = ures;
+ return res;
+}
+
+int
+ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_trylock__(&rwmtx->mtx);
+ if (res != 0)
+ return res;
+ if (!rwmtx->readers && !rwmtx->waiting_writers)
+ return 0;
+ else {
+ res = ethr_mutex_unlock__(&rwmtx->mtx);
+ if (res == 0)
+ return EBUSY;
+ return res;
+ }
+}
+
+int
+ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
+{
+ int res;
+#if ETHR_XCHK
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = ethr_mutex_lock__(&rwmtx->mtx);
+ if (res != 0)
+ return res;
+ if (!rwmtx->readers && !rwmtx->waiting_writers)
+ return 0;
+
+ while (rwmtx->readers) {
+ rwmtx->waiting_writers++;
+ res = ethr_cond_wait(&rwmtx->wcnd, &rwmtx->mtx);
+ rwmtx->waiting_writers--;
+ if (res != 0 && res != EINTR) {
+ (void) ethr_rwmutex_rwunlock(rwmtx);
+ return res;
+ }
+ }
+ return 0;
+}
+
+int
+ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
+{
+ int res, ures;
+#if ETHR_XCHK
+ if (!rwmtx || rwmtx->initialized != ETHR_RWMUTEX_INITIALIZED) {
+ ASSERT(0);
+ return EINVAL;
+ }
+#endif
+ res = 0;
+ if (rwmtx->waiting_writers)
+ res = ethr_cond_signal(&rwmtx->wcnd);
+ else if (rwmtx->waiting_readers)
+ res = ethr_cond_broadcast(&rwmtx->rcnd);
+ ures = ethr_mutex_unlock__(&rwmtx->mtx);
+ if (res == 0)
+ res = ures;
+ return res;
+}
+
+#endif /* #ifdef ETHR_USE_RWMTX_FALLBACK */
+
+void
+ethr_compiler_barrier(void)
+{
+
+}
+
+#ifdef DEBUG
+
+#include <stdio.h>
+int ethr_assert_failed(char *f, int l, char *a)
+{
+ fprintf(stderr, "%s:%d: Assertion failed: %s\n", f, l, a);
+ abort();
+ return 0;
+}
+
+#endif
+
+