aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/beam/erl_process.c28
-rw-r--r--erts/emulator/test/Makefile1
-rw-r--r--erts/emulator/test/mtx_SUITE.erl473
-rw-r--r--erts/emulator/test/mtx_SUITE_data/Makefile.src30
-rw-r--r--erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c692
5 files changed, 1214 insertions, 10 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index f252c2cbe2..fc950af8ce 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -1842,6 +1842,9 @@ do { \
static void
check_balance(ErtsRunQueue *c_rq)
{
+#if ERTS_MAX_PROCESSES >= (1 << 27)
+# error check_balance() assumes ERTS_MAX_PROCESS < (1 << 27)
+#endif
ErtsRunQueueBalance avg = {0};
Sint64 scheds_reds, full_scheds_reds;
int forced, active, current_active, oowc, half_full_scheds, full_scheds,
@@ -1965,12 +1968,14 @@ check_balance(ErtsRunQueue *c_rq)
run_queue_info[qix].prio[pix].avail = 0;
}
else {
- int xreds = 0;
- int procreds = treds;
- procreds -= run_queue_info[qix].prio[ERTS_PORT_PRIO_LEVEL].reds;
+ Sint64 xreds = 0;
+ Sint64 procreds = treds;
+ procreds -=
+ ((Sint64)
+ run_queue_info[qix].prio[ERTS_PORT_PRIO_LEVEL].reds);
for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
- int av;
+ Sint64 av;
if (xreds == 0)
av = 100;
@@ -1981,9 +1986,10 @@ check_balance(ErtsRunQueue *c_rq)
if (av == 0)
av = 1;
}
- run_queue_info[qix].prio[pix].avail = av;
+ run_queue_info[qix].prio[pix].avail = (int) av;
+ ASSERT(run_queue_info[qix].prio[pix].avail >= 0);
if (pix < PRIORITY_NORMAL) /* ie., max or high */
- xreds += run_queue_info[qix].prio[pix].reds;
+ xreds += (Sint64) run_queue_info[qix].prio[pix].reds;
}
run_queue_info[qix].prio[ERTS_PORT_PRIO_LEVEL].avail = 100;
}
@@ -2088,7 +2094,8 @@ check_balance(ErtsRunQueue *c_rq)
if (max_len != 0) {
int avail = avg.prio[pix].avail;
if (avail != 0) {
- max_len = ((100*max_len - 1) / avail) + 1;
+ max_len = (int) ((100*((Sint64) max_len) - 1)
+ / ((Sint64) avail)) + 1;
avg.prio[pix].max_len = max_len;
ASSERT(max_len >= 0);
}
@@ -2105,9 +2112,10 @@ check_balance(ErtsRunQueue *c_rq)
|| run_queue_info[qix].prio[pix].avail == 0)
limit = 0;
else
- limit = (((avg.prio[pix].max_len
- * run_queue_info[qix].prio[pix].avail) - 1)
- / 100 + 1);
+ limit = (int) (((((Sint64) avg.prio[pix].max_len)
+ * ((Sint64) run_queue_info[qix].prio[pix].avail))
+ - 1)
+ / 100 + 1);
run_queue_info[qix].prio[pix].migration_limit = limit;
}
}
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index a4c02da626..7259e1b84d 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -83,6 +83,7 @@ MODULES= \
receive_SUITE \
ref_SUITE \
register_SUITE \
+ mtx_SUITE \
save_calls_SUITE \
send_term_SUITE \
sensitive_SUITE \
diff --git a/erts/emulator/test/mtx_SUITE.erl b/erts/emulator/test/mtx_SUITE.erl
new file mode 100644
index 0000000000..ae77fe4d89
--- /dev/null
+++ b/erts/emulator/test/mtx_SUITE.erl
@@ -0,0 +1,473 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% Stress tests of rwmutex implementation.
+%%
+%% Author: Rickard Green
+%%
+-module(mtx_SUITE).
+
+%%-define(line_trace,true).
+
+-include("test_server.hrl").
+
+-export([all/1, init_per_suite/1, end_per_suite/1, init_per_testcase/2, fin_per_testcase/2]).
+
+-export([long_rwlock/1,
+ hammer_ets_rwlock/1,
+ hammer_rwlock/1,
+ hammer_rwlock_check/1,
+ hammer_tryrwlock/1,
+ hammer_tryrwlock_check/1,
+ hammer_sched_long_rwlock/1,
+ hammer_sched_long_rwlock_check/1,
+ hammer_sched_long_freqread_rwlock/1,
+ hammer_sched_long_freqread_rwlock_check/1,
+ hammer_sched_long_tryrwlock/1,
+ hammer_sched_long_tryrwlock_check/1,
+ hammer_sched_long_freqread_tryrwlock/1,
+ hammer_sched_long_freqread_tryrwlock_check/1,
+ hammer_sched_rwlock/1,
+ hammer_sched_rwlock_check/1,
+ hammer_sched_freqread_rwlock/1,
+ hammer_sched_freqread_rwlock_check/1,
+ hammer_sched_tryrwlock/1,
+ hammer_sched_tryrwlock_check/1,
+ hammer_sched_freqread_tryrwlock/1,
+ hammer_sched_freqread_tryrwlock_check/1]).
+
+init_per_suite(Config) when is_list(Config) ->
+ DataDir = ?config(data_dir, Config),
+ Lib = filename:join([DataDir, atom_to_list(?MODULE)]),
+ ok = erlang:load_nif(Lib, none),
+ Config.
+
+end_per_suite(Config) when is_list(Config) ->
+ Config.
+
+init_per_testcase(_Case, Config) ->
+ Dog = ?t:timetrap(?t:minutes(15)),
+ [{watchdog, Dog}|Config].
+
+fin_per_testcase(_Func, Config) ->
+ Dog = ?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog).
+
+all(suite) ->
+ [
+ long_rwlock,
+ hammer_rwlock_check,
+ hammer_rwlock,
+ hammer_tryrwlock_check,
+ hammer_tryrwlock,
+ hammer_ets_rwlock,
+ hammer_sched_long_rwlock_check,
+ hammer_sched_long_rwlock,
+ hammer_sched_long_freqread_rwlock_check,
+ hammer_sched_long_freqread_rwlock,
+ hammer_sched_long_tryrwlock_check,
+ hammer_sched_long_tryrwlock,
+ hammer_sched_long_freqread_tryrwlock_check,
+ hammer_sched_long_freqread_tryrwlock,
+ hammer_sched_rwlock_check,
+ hammer_sched_rwlock,
+ hammer_sched_freqread_rwlock_check,
+ hammer_sched_freqread_rwlock,
+ hammer_sched_tryrwlock_check,
+ hammer_sched_tryrwlock,
+ hammer_sched_freqread_tryrwlock_check,
+ hammer_sched_freqread_tryrwlock
+ ].
+
+long_rwlock(Config) when is_list(Config) ->
+ statistics(runtime),
+ LLRes = long_rw_test(),
+ {_, RunTime} = statistics(runtime),
+ %% A very short run time is expected, since
+ %% threads in the test mostly wait
+ ?t:format("RunTime=~p~n", [RunTime]),
+ ?line true = RunTime < 100,
+ ?line RunTimeStr = "Run-time during test was "++integer_to_list(RunTime)++" ms.",
+ case LLRes of
+ ok ->
+ {comment, RunTimeStr};
+ {comment, Comment} ->
+ {comment, Comment ++ " " ++ RunTimeStr}
+ end.
+
+hammer_rwlock(Config) when is_list(Config) ->
+ hammer_rw_test(false).
+
+hammer_rwlock_check(Config) when is_list(Config) ->
+ hammer_rw_test(true).
+
+hammer_tryrwlock(Config) when is_list(Config) ->
+ hammer_tryrw_test(false).
+
+hammer_tryrwlock_check(Config) when is_list(Config) ->
+ hammer_tryrw_test(true).
+
+hammer_sched_rwlock(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(false, false, true, 0, 0).
+
+hammer_sched_rwlock_check(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(false, true, true, 0, 0).
+
+hammer_sched_freqread_rwlock(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(true, false, true, 0, 0).
+
+hammer_sched_freqread_rwlock_check(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(true, true, true, 0, 0).
+
+hammer_sched_tryrwlock(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(false, false, false, 0, 100).
+
+hammer_sched_tryrwlock_check(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(false, true, false, 0, 100).
+
+hammer_sched_freqread_tryrwlock(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(true, false, false, 0, 100).
+
+hammer_sched_freqread_tryrwlock_check(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(true, true, false, 0, 100).
+
+hammer_sched_long_rwlock(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(false, false, true, 100, 0).
+
+hammer_sched_long_rwlock_check(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(false, true, true, 100, 0).
+
+hammer_sched_long_freqread_rwlock(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(true, false, true, 100, 0).
+
+hammer_sched_long_freqread_rwlock_check(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(true, true, true, 100, 0).
+
+hammer_sched_long_tryrwlock(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(false, false, false, 100, 100).
+
+hammer_sched_long_tryrwlock_check(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(false, true, false, 100, 100).
+
+hammer_sched_long_freqread_tryrwlock(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(true, false, false, 100, 100).
+
+hammer_sched_long_freqread_tryrwlock_check(Config) when is_list(Config) ->
+ hammer_sched_rwlock_test(true, true, false, 100, 100).
+
+hammer_sched_rwlock_test(FreqRead, LockCheck, Blocking, WaitLocked, WaitUnlocked) ->
+ case create_rwlock(FreqRead, LockCheck) of
+ enotsup ->
+ {skipped, "Not supported."};
+ RWLock ->
+ Onln = erlang:system_info(schedulers_online),
+ NWPs = case Onln div 3 of
+ 1 -> case Onln < 4 of
+ true -> 1;
+ false -> 2
+ end;
+ X -> X
+ end,
+ NRPs = Onln - NWPs,
+ NoLockOps = ((((50000000 div Onln)
+ div case {Blocking, WaitLocked} of
+ {false, 0} -> 1;
+ _ -> 10
+ end)
+ div (case WaitLocked == 0 of
+ true -> 1;
+ false -> WaitLocked*250
+ end))
+ div handicap()),
+ ?t:format("NoLockOps=~p~n", [NoLockOps]),
+ Sleep = case Blocking of
+ true -> NoLockOps;
+ false -> NoLockOps div 10
+ end,
+ WPs = lists:map(
+ fun (Sched) ->
+ spawn_opt(
+ fun () ->
+ io:format("Writer on scheduler ~p.~n",
+ [Sched]),
+ Sched = erlang:system_info(scheduler_id),
+ receive go -> gone end,
+ hammer_sched_rwlock_proc(RWLock,
+ Blocking,
+ true,
+ WaitLocked,
+ WaitUnlocked,
+ NoLockOps,
+ Sleep),
+ Sched = erlang:system_info(scheduler_id)
+ end,
+ [link, {scheduler, Sched}])
+ end,
+ lists:seq(1, NWPs)),
+ RPs = lists:map(
+ fun (Sched) ->
+ spawn_opt(
+ fun () ->
+ io:format("Reader on scheduler ~p.~n",
+ [Sched]),
+ Sched = erlang:system_info(scheduler_id),
+ receive go -> gone end,
+ hammer_sched_rwlock_proc(RWLock,
+ Blocking,
+ false,
+ WaitLocked,
+ WaitUnlocked,
+ NoLockOps,
+ Sleep),
+ Sched = erlang:system_info(scheduler_id)
+ end,
+ [link, {scheduler, Sched}])
+ end,
+ lists:seq(NWPs + 1, NWPs + NRPs)),
+ Procs = WPs ++ RPs,
+ case {Blocking, WaitLocked} of
+ {_, 0} -> ok;
+ {false, _} -> ok;
+ _ -> statistics(runtime)
+ end,
+ lists:foreach(fun (P) -> P ! go end, Procs),
+ lists:foreach(fun (P) ->
+ M = erlang:monitor(process, P),
+ receive
+ {'DOWN', M, process, P, _} ->
+ ok
+ end
+ end,
+ Procs),
+ case {Blocking, WaitLocked} of
+ {_, 0} -> ok;
+ {false, _} -> ok;
+ _ ->
+ {_, RunTime} = statistics(runtime),
+ ?t:format("RunTime=~p~n", [RunTime]),
+ ?line true = RunTime < 500,
+ {comment,
+ "Run-time during test was "
+ ++ integer_to_list(RunTime)
+ ++ " ms."}
+ end
+ end.
+
+hammer_sched_rwlock_proc(_RWLock,
+ _Blocking,
+ _WriteOp,
+ _WaitLocked,
+ _WaitUnlocked,
+ 0,
+ _Sleep) ->
+ ok;
+hammer_sched_rwlock_proc(RWLock,
+ Blocking,
+ WriteOp,
+ WaitLocked,
+ WaitUnlocked,
+ Times,
+ Sleep) when Times rem Sleep == 0 ->
+ rwlock_op(RWLock, Blocking, WriteOp, WaitLocked, WaitUnlocked),
+ hammer_sched_rwlock_proc(RWLock,
+ Blocking,
+ WriteOp,
+ WaitLocked,
+ WaitUnlocked,
+ Times - 1,
+ Sleep);
+hammer_sched_rwlock_proc(RWLock,
+ Blocking,
+ WriteOp,
+ WaitLocked,
+ WaitUnlocked,
+ Times,
+ Sleep) ->
+ rwlock_op(RWLock, Blocking, WriteOp, WaitLocked, 0),
+ hammer_sched_rwlock_proc(RWLock,
+ Blocking,
+ WriteOp,
+ WaitLocked,
+ WaitUnlocked,
+ Times - 1,
+ Sleep).
+
+-define(HAMMER_ETS_RWLOCK_REPEAT_TIMES, 1).
+-define(HAMMER_ETS_RWLOCK_TSIZE, 500).
+
+hammer_ets_rwlock(Config) when is_list(Config) ->
+ {Ops, Procs} = case handicap() of
+ 1 -> {20000, 500};
+ 2 -> {20000, 50};
+ 3 -> {2000, 50};
+ _ -> {200, 50}
+ end,
+ ?t:format("Procs=~p~nOps=~p~n", [Procs, Ops]),
+ lists:foreach(fun (XOpts) ->
+ ?t:format("Running with extra opts: ~p", [XOpts]),
+ hammer_ets_rwlock_test(XOpts, true, 2, Ops,
+ Procs, false)
+ end,
+ [[],
+ [{read_concurrency, true}],
+ [{write_concurrency, true}],
+ [{read_concurrency, true},{write_concurrency, true}]]),
+ ok.
+
+%% Aux funcs
+
+long_rw_test() ->
+ exit(no_nif_implementation).
+
+hammer_rw_test(_Arg) ->
+ exit(no_nif_implementation).
+
+hammer_tryrw_test(_Arg) ->
+ exit(no_nif_implementation).
+
+create_rwlock(_FreqRead, _LockCheck) ->
+ exit(no_nif_implementation).
+
+rwlock_op(_RWLock, _Blocking, _WriteOp, _WaitLocked, _WaitUnlocked) ->
+ exit(no_nif_implementation).
+
+hammer_ets_rwlock_put_data() ->
+ put(?MODULE, {"here are some", data, "to store", make_ref()}).
+
+hammer_ets_rwlock_get_data() ->
+ get(?MODULE).
+
+hammer_ets_rwlock_ops(_T, _UW, _N, _C, _SC, 0) ->
+ ok;
+hammer_ets_rwlock_ops(T, UW, N, C, SC, Tot) when N >= ?HAMMER_ETS_RWLOCK_TSIZE ->
+ hammer_ets_rwlock_ops(T, UW, 0, C, SC, Tot);
+hammer_ets_rwlock_ops(T, UW, N, 0, SC, Tot) ->
+ case UW of
+ true ->
+ true = ets:insert(T, {N, Tot, hammer_ets_rwlock_get_data()});
+ false ->
+ [{N, _, _}] = ets:lookup(T, N)
+ end,
+ hammer_ets_rwlock_ops(T, UW, N+1, SC, SC, Tot-1);
+hammer_ets_rwlock_ops(T, UW, N, C, SC, Tot) ->
+ case UW of
+ false ->
+ true = ets:insert(T, {N, Tot, hammer_ets_rwlock_get_data()});
+ true ->
+ [{N, _, _}] = ets:lookup(T, N)
+ end,
+ hammer_ets_rwlock_ops(T, UW, N+1, C-1, SC, Tot-1).
+
+hammer_ets_rwlock_init(T, N) when N < ?HAMMER_ETS_RWLOCK_TSIZE ->
+ ets:insert(T, {N, N, N}),
+ hammer_ets_rwlock_init(T, N+1);
+hammer_ets_rwlock_init(_T, _N) ->
+ ok.
+
+hammer_ets_rwlock_test(XOpts, UW, C, N, NP, SC) ->
+ receive after 100 -> ok end,
+ {TP, TM} = spawn_monitor(
+ fun () ->
+ _L = repeat_list(
+ fun () ->
+ Caller = self(),
+ T = fun () ->
+ Parent = self(),
+ hammer_ets_rwlock_put_data(),
+ T=ets:new(x, [public | XOpts]),
+ hammer_ets_rwlock_init(T, 0),
+ Ps0 = repeat_list(
+ fun () ->
+ spawn_link(
+ fun () ->
+ hammer_ets_rwlock_put_data(),
+ receive go -> ok end,
+ hammer_ets_rwlock_ops(T, UW, N, C, C, N),
+ Parent ! {done, self()},
+ receive after infinity -> ok end
+ end)
+ end,
+ NP - case SC of
+ false -> 0;
+ _ -> 1
+ end),
+ Ps = case SC of
+ false -> Ps0;
+ _ -> [spawn_link(fun () ->
+ hammer_ets_rwlock_put_data(),
+ receive go -> ok end,
+ hammer_ets_rwlock_ops(T, UW, N, SC, SC, N),
+ Parent ! {done, self()},
+ receive after infinity -> ok end
+ end) | Ps0]
+ end,
+ Start = now(),
+ lists:foreach(fun (P) -> P ! go end, Ps),
+ lists:foreach(fun (P) -> receive {done, P} -> ok end end, Ps),
+ Stop = now(),
+ lists:foreach(fun (P) ->
+ unlink(P),
+ exit(P, bang),
+ M = erlang:monitor(process, P),
+ receive
+ {'DOWN', M, process, P, _} -> ok
+ end
+ end, Ps),
+ Res = timer:now_diff(Stop, Start)/1000000,
+ Caller ! {?MODULE, self(), Res}
+ end,
+ TP = spawn_link(T),
+ receive
+ {?MODULE, TP, Res} ->
+ Res
+ end
+ end,
+ ?HAMMER_ETS_RWLOCK_REPEAT_TIMES)
+ end),
+ receive
+ {'DOWN', TM, process, TP, _} -> ok
+ end.
+
+repeat_list(Fun, N) ->
+ repeat_list(Fun, N, []).
+
+repeat_list(_Fun, 0, Acc) ->
+ Acc;
+repeat_list(Fun, N, Acc) ->
+ repeat_list(Fun, N-1, [Fun()|Acc]).
+
+
+handicap() ->
+ X0 = case catch (erlang:system_info(logical_processors_available) >=
+ erlang:system_info(schedulers_online)) of
+ true -> 1;
+ _ -> 2
+ end,
+ case erlang:system_info(build_type) of
+ opt ->
+ X0;
+ ReallySlow when ReallySlow == debug;
+ ReallySlow == valgrind;
+ ReallySlow == purify ->
+ X0*3;
+ _Slow ->
+ X0*2
+ end.
+
diff --git a/erts/emulator/test/mtx_SUITE_data/Makefile.src b/erts/emulator/test/mtx_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..b6c843269c
--- /dev/null
+++ b/erts/emulator/test/mtx_SUITE_data/Makefile.src
@@ -0,0 +1,30 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2010. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+include @erts_lib_include_internal_generated@@[email protected]
+include @erts_lib_include_internal_generated@@DS@erts_internal.mk
+
+NIF_LIBS = mtx_SUITE@dll@
+
+SHLIB_EXTRA_CFLAGS = $(ETHR_DEFS) -I@erts_lib_include_internal@ -I@erts_lib_include_internal_generated@
+LIBS = @ERTS_LIBS@
+
+all: $(NIF_LIBS)
+
+@SHLIB_RULES@
diff --git a/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c b/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
new file mode 100644
index 0000000000..818023211c
--- /dev/null
+++ b/erts/emulator/test/mtx_SUITE_data/mtx_SUITE.c
@@ -0,0 +1,692 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Stress tests of rwmutex implementation.
+ *
+ * Author: Rickard Green
+ */
+
+#include "erl_nif.h"
+
+#ifdef __WIN32__
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# endif
+# include <windows.h>
+#else
+# include "ethread.h"
+# include "erl_misc_utils.h"
+# include <unistd.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+
+static int
+fail(const char *file, int line, const char *function, const char *assertion);
+
+#undef ASSERT
+#define ASSERT(X) ((void) ((X) ? 1 : fail(__FILE__, __LINE__, __func__, #X)))
+
+#ifdef __WIN32__
+/*
+ * We cannot access the ethread symbols directly; test
+ * what we got in the nif api instead...
+ */
+#define HAVE_FREQREAD_SUPPORT 0
+#define RWMUTEX_T ErlNifRWLock
+#define RWMUTEX_CREATE(FR) enif_rwlock_create("dummy")
+#define RWMUTEX_DESTROY enif_rwlock_destroy
+#define RWMUTEX_WLOCK enif_rwlock_rwlock
+#define RWMUTEX_TRYWLOCK enif_rwlock_tryrwlock
+#define RWMUTEX_WUNLOCK enif_rwlock_rwunlock
+#define RWMUTEX_TRYRLOCK enif_rwlock_tryrlock
+#define RWMUTEX_RLOCK enif_rwlock_rlock
+#define RWMUTEX_RUNLOCK enif_rwlock_runlock
+#define THR_ID ErlNifTid
+#define THR_CREATE(A, B, C, D) enif_thread_create("dummy", (A), (B), (C), (D))
+#define THR_JOIN enif_thread_join
+#define ATOMIC_T volatile LONG
+#define ATOMIC_INIT(VarP, Val) (*(VarP) = (Val))
+#define ATOMIC_SET(VarP, Val) (*(VarP) = (Val))
+#define ATOMIC_READ(VarP) (*(VarP))
+#define ATOMIC_INC InterlockedIncrement
+#define ATOMIC_DEC InterlockedDecrement
+
+#else
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+# define HAVE_FREQREAD_SUPPORT 1
+#else
+# define HAVE_FREQREAD_SUPPORT 0
+#endif
+
+#define RWMUTEX_T ethr_rwmutex
+static ethr_rwmutex *
+RWMUTEX_CREATE(int freqread)
+{
+ ethr_rwmutex *rwmtx = enif_alloc(sizeof(ethr_rwmutex));
+ ethr_rwmutex_opt rwmtx_opt = ETHR_RWMUTEX_OPT_DEFAULT_INITER;
+ if (freqread)
+ rwmtx_opt.type = ETHR_RWMUTEX_TYPE_FREQUENT_READ;
+ ASSERT(rwmtx);
+ ASSERT(ethr_rwmutex_init_opt(rwmtx, &rwmtx_opt) == 0);
+ return rwmtx;
+}
+static void
+RWMUTEX_DESTROY(ethr_rwmutex *rwmtx)
+{
+ ASSERT(ethr_rwmutex_destroy(rwmtx) == 0);
+ enif_free(rwmtx);
+}
+#define RWMUTEX_TRYWLOCK ethr_rwmutex_tryrwlock
+#define RWMUTEX_WLOCK ethr_rwmutex_rwlock
+#define RWMUTEX_WUNLOCK ethr_rwmutex_rwunlock
+#define RWMUTEX_TRYRLOCK ethr_rwmutex_tryrlock
+#define RWMUTEX_RLOCK ethr_rwmutex_rlock
+#define RWMUTEX_RUNLOCK ethr_rwmutex_runlock
+#define THR_ID ethr_tid
+#define THR_CREATE ethr_thr_create
+#define THR_JOIN ethr_thr_join
+#define ATOMIC_T ethr_atomic_t
+#define ATOMIC_INIT ethr_atomic_init
+#define ATOMIC_SET ethr_atomic_set
+#define ATOMIC_READ ethr_atomic_read
+#define ATOMIC_INC ethr_atomic_inc
+#define ATOMIC_DEC ethr_atomic_dec
+
+#endif
+
+
+#if !defined(__func__)
+# if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L
+# if !defined(__GNUC__) || __GNUC__ < 2
+# define __func__ "[unknown_function]"
+# else
+# define __func__ __FUNCTION__
+# endif
+# endif
+#endif
+
+static void milli_sleep(int ms);
+static int get_bool(ErlNifEnv* env, ERL_NIF_TERM term);
+
+/*
+ * Long rwlock testcase
+ */
+
+#define LONG_RW_NO_W_THREADS 6
+#define LONG_RW_NO_THREADS 20
+#define LONG_RW_NO_WLOCK_COUNT 100
+
+typedef struct {
+ RWMUTEX_T *rwlock;
+ ATOMIC_T *is_wlocked;
+ ATOMIC_T *is_rlocked;
+ int *stop;
+ int *count;
+ int sleep;
+} long_rw_t;
+
+static void *
+long_rw_w(void *varg)
+{
+ long_rw_t *arg = varg;
+ int stop = 0;
+ do {
+ RWMUTEX_WLOCK(arg->rwlock);
+ ASSERT(!ATOMIC_READ(arg->is_wlocked));
+ ATOMIC_SET(arg->is_wlocked, 1);
+ ASSERT(!ATOMIC_READ(arg->is_rlocked));
+ milli_sleep(arg->sleep);
+ if (++(*arg->count) > LONG_RW_NO_WLOCK_COUNT)
+ stop = *arg->stop = 1;
+ ATOMIC_SET(arg->is_wlocked, 0);
+ ASSERT(!ATOMIC_READ(arg->is_rlocked));
+ RWMUTEX_WUNLOCK(arg->rwlock);
+ } while (!stop);
+ return NULL;
+}
+
+static void *
+long_rw_r(void *varg)
+{
+ long_rw_t *arg = varg;
+ int stop;
+ do {
+ RWMUTEX_RLOCK(arg->rwlock);
+ ASSERT(!ATOMIC_READ(arg->is_wlocked));
+ ATOMIC_INC(arg->is_rlocked);
+ milli_sleep(arg->sleep);
+ stop = *arg->stop;
+ ATOMIC_DEC(arg->is_rlocked);
+ ASSERT(!ATOMIC_READ(arg->is_wlocked));
+ RWMUTEX_RUNLOCK(arg->rwlock);
+ } while (!stop);
+ return NULL;
+}
+
+
+static ERL_NIF_TERM long_rw_test(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
+{
+ int res, freqread, i, count, stop;
+ ATOMIC_T is_wlocked, is_rlocked;
+ THR_ID tid[LONG_RW_NO_THREADS];
+ long_rw_t arg;
+ long_rw_t targ[LONG_RW_NO_THREADS];
+
+ ATOMIC_INIT(&is_wlocked, 0);
+ ATOMIC_INIT(&is_rlocked, 0);
+
+ freqread = 0;
+
+ arg.is_wlocked = &is_wlocked;
+ arg.is_rlocked = &is_rlocked;
+ arg.count = &count;
+ arg.stop = &stop;
+
+ restart:
+
+ stop = 0;
+ count = 0;
+
+ arg.rwlock = RWMUTEX_CREATE(freqread);
+
+ ASSERT(arg.rwlock);
+
+ for (i = 0; i < LONG_RW_NO_W_THREADS; i++) {
+ targ[i] = arg;
+ targ[i].sleep = 100 + i*10;
+ ASSERT(THR_CREATE(&tid[i], long_rw_w, &targ[i], NULL) == 0);
+ }
+ for (; i < LONG_RW_NO_THREADS; i++) {
+ targ[i] = arg;
+ targ[i].sleep = 100;
+ ASSERT(THR_CREATE(&tid[i], long_rw_r, &targ[i], NULL) == 0);
+ }
+ for (i = 0; i < LONG_RW_NO_THREADS; i++)
+ ASSERT(THR_JOIN(tid[i], NULL) == 0);
+
+ ASSERT(!ATOMIC_READ(arg.is_wlocked));
+ ASSERT(!ATOMIC_READ(arg.is_rlocked));
+
+ RWMUTEX_DESTROY(arg.rwlock);
+
+ if (HAVE_FREQREAD_SUPPORT && !freqread) {
+ freqread = 1;
+ goto restart;
+ }
+
+ if (freqread)
+ return enif_make_atom(env, "ok");
+ else
+ return enif_make_tuple2(env,
+ enif_make_atom(env,
+ "comment"),
+ enif_make_string(env,
+ "No frequent read test made.",
+ ERL_NIF_LATIN1));
+}
+
+/*
+ * Hammer rwlock testcase
+ */
+
+#define HAMMER_RW_NO_W_THREADS 6
+#define HAMMER_RW_NO_THREADS 20
+#define HAMMER_RW_NO_WLOCK_COUNT 1000000
+
+typedef struct {
+ RWMUTEX_T *rwlock;
+ ATOMIC_T is_locked;
+ int lock_check;
+ int stop;
+ int count;
+} hammer_rw_t;
+
+static void *
+hammer_rw_w(void *varg)
+{
+ hammer_rw_t *arg = varg;
+ int stop = 0;
+ do {
+ RWMUTEX_WLOCK(arg->rwlock);
+ if (arg->lock_check) {
+ ASSERT(!ATOMIC_READ(&arg->is_locked));
+ ATOMIC_SET(&arg->is_locked, -1);
+ }
+ if (++arg->count > HAMMER_RW_NO_WLOCK_COUNT)
+ stop = arg->stop = 1;
+ if (arg->lock_check) {
+ ASSERT(ATOMIC_READ(&arg->is_locked) == -1);
+ ATOMIC_SET(&arg->is_locked, 0);
+ }
+ RWMUTEX_WUNLOCK(arg->rwlock);
+ } while (!stop);
+ return NULL;
+}
+
+static void *
+hammer_rw_r(void *varg)
+{
+ hammer_rw_t *arg = varg;
+ int stop;
+ do {
+ RWMUTEX_RLOCK(arg->rwlock);
+ if (arg->lock_check) {
+ ASSERT(ATOMIC_READ(&arg->is_locked) >= 0);
+ ATOMIC_INC(&arg->is_locked);
+ }
+ stop = arg->stop;
+ if (arg->lock_check) {
+ ASSERT(ATOMIC_READ(&arg->is_locked) > 0);
+ ATOMIC_DEC(&arg->is_locked);
+ }
+ RWMUTEX_RUNLOCK(arg->rwlock);
+ } while (!stop);
+ return NULL;
+}
+
+
+static ERL_NIF_TERM hammer_rw_test(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
+{
+ hammer_rw_t arg;
+ char buf[10];
+ int res, freqread, i;
+ THR_ID tid[HAMMER_RW_NO_THREADS];
+
+ if (argc != 1)
+ goto badarg;
+
+ arg.lock_check = get_bool(env, argv[0]);
+ if (arg.lock_check < 0)
+ goto badarg;
+
+ ATOMIC_INIT(&arg.is_locked, 0);
+
+ freqread = 0;
+
+ restart:
+ arg.stop = 0;
+ arg.count = 0;
+
+ arg.rwlock = RWMUTEX_CREATE(freqread);
+
+ ASSERT(arg.rwlock);
+
+ for (i = 0; i < HAMMER_RW_NO_W_THREADS; i++)
+ ASSERT(THR_CREATE(&tid[i], hammer_rw_w, &arg, NULL) == 0);
+ for (; i < HAMMER_RW_NO_THREADS; i++)
+ ASSERT(THR_CREATE(&tid[i], hammer_rw_r, &arg, NULL) == 0);
+ for (i = 0; i < HAMMER_RW_NO_THREADS; i++)
+ ASSERT(THR_JOIN(tid[i], NULL) == 0);
+
+ ASSERT(!ATOMIC_READ(&arg.is_locked));
+
+ RWMUTEX_DESTROY(arg.rwlock);
+
+ if (HAVE_FREQREAD_SUPPORT && !freqread) {
+ freqread = 1;
+ goto restart;
+ }
+
+ if (freqread)
+ return enif_make_atom(env, "ok");
+ else
+ return enif_make_tuple2(env,
+ enif_make_atom(env,
+ "comment"),
+ enif_make_string(env,
+ "No frequent read test made.",
+ ERL_NIF_LATIN1));
+ badarg:
+ return enif_make_badarg(env);
+}
+
+/*
+ * Hammer try rwlock testcase
+ */
+
+#define HAMMER_TRYRW_NO_W_THREADS 10
+#define HAMMER_TRYRW_NO_THREADS 20
+#define HAMMER_TRYRW_NO_WLOCK_COUNT 10000000
+#define HAMMER_TRYRW_NO_RLOCK_COUNT 10000000
+#define HAMMER_TRYRW_NO_WLOCK_WAIT_COUNT ((10*HAMMER_TRYRW_NO_WLOCK_COUNT)/8)
+#define HAMMER_TRYRW_NO_RLOCK_WAIT_COUNT ((10*HAMMER_TRYRW_NO_RLOCK_COUNT)/8)
+
+typedef struct {
+ RWMUTEX_T *rwlock;
+ ATOMIC_T is_locked;
+ int lock_check;
+ int w_count;
+ ATOMIC_T r_count;
+} hammer_tryrw_t;
+
+static void *
+hammer_tryrw_w(void *varg)
+{
+ hammer_tryrw_t *arg = varg;
+ int stop = 0;
+ int wait = 0;
+ do {
+ while (EBUSY == RWMUTEX_TRYWLOCK(arg->rwlock));
+ if (arg->lock_check) {
+ ASSERT(!ATOMIC_READ(&arg->is_locked));
+ ATOMIC_SET(&arg->is_locked, -1);
+ }
+ if (++arg->w_count > HAMMER_TRYRW_NO_WLOCK_COUNT)
+ stop = 1;
+ else if (arg->w_count > HAMMER_TRYRW_NO_RLOCK_WAIT_COUNT)
+ wait = 1;
+ if (arg->lock_check) {
+ ASSERT(ATOMIC_READ(&arg->is_locked) == -1);
+ ATOMIC_SET(&arg->is_locked, 0);
+ }
+ RWMUTEX_WUNLOCK(arg->rwlock);
+ if (wait)
+ milli_sleep(1);
+ } while (!stop);
+ return NULL;
+}
+
+static void *
+hammer_tryrw_r(void *varg)
+{
+ hammer_tryrw_t *arg = varg;
+ long r_count;
+ int stop = 0;
+ int wait = 0;
+ do {
+ while (EBUSY == RWMUTEX_TRYRLOCK(arg->rwlock));
+ if (arg->lock_check) {
+ ASSERT(ATOMIC_READ(&arg->is_locked) >= 0);
+ ATOMIC_INC(&arg->is_locked);
+ }
+ ATOMIC_INC(&arg->r_count);
+ r_count = ATOMIC_READ(&arg->r_count);
+ if (r_count > HAMMER_TRYRW_NO_RLOCK_COUNT)
+ stop = 1;
+ else if (r_count > HAMMER_TRYRW_NO_RLOCK_WAIT_COUNT)
+ wait = 1;
+ if (arg->lock_check) {
+ ASSERT(ATOMIC_READ(&arg->is_locked) > 0);
+ ATOMIC_DEC(&arg->is_locked);
+ }
+ RWMUTEX_RUNLOCK(arg->rwlock);
+ if (wait)
+ milli_sleep(1);
+ } while (!stop);
+ return NULL;
+}
+
+
+static ERL_NIF_TERM hammer_tryrw_test(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
+{
+ hammer_tryrw_t arg;
+ char buf[10];
+ int res, freqread, i;
+ THR_ID tid[HAMMER_TRYRW_NO_THREADS];
+
+ if (argc != 1)
+ goto badarg;
+
+ arg.lock_check = get_bool(env, argv[0]);
+ if (arg.lock_check < 0)
+ goto badarg;
+
+ ATOMIC_INIT(&arg.is_locked, 0);
+ freqread = 0;
+
+ restart:
+
+ arg.w_count = 0;
+ ATOMIC_INIT(&arg.r_count, 0);
+
+ arg.rwlock = RWMUTEX_CREATE(freqread);
+
+ ASSERT(arg.rwlock);
+
+ for (i = 0; i < HAMMER_TRYRW_NO_W_THREADS; i++)
+ ASSERT(THR_CREATE(&tid[i], hammer_tryrw_w, &arg, NULL) == 0);
+ for (; i < HAMMER_TRYRW_NO_THREADS; i++)
+ ASSERT(THR_CREATE(&tid[i], hammer_tryrw_r, &arg, NULL) == 0);
+ for (i = 0; i < HAMMER_TRYRW_NO_THREADS; i++)
+ ASSERT(THR_JOIN(tid[i], NULL) == 0);
+
+ ASSERT(!ATOMIC_READ(&arg.is_locked));
+
+ RWMUTEX_DESTROY(arg.rwlock);
+
+ if (HAVE_FREQREAD_SUPPORT && !freqread) {
+ freqread = 1;
+ goto restart;
+ }
+
+ if (freqread)
+ return enif_make_atom(env, "ok");
+ else
+ return enif_make_tuple2(env,
+ enif_make_atom(env,
+ "comment"),
+ enif_make_string(env,
+ "No frequent read test made.",
+ ERL_NIF_LATIN1));
+ badarg:
+ return enif_make_badarg(env);
+}
+
+typedef struct {
+ int lock_check;
+ ATOMIC_T is_locked;
+ RWMUTEX_T *rwlock;
+} rwlock_resource_t;
+
+static void
+rwlock_destructor(ErlNifEnv* env, void* obj)
+{
+ rwlock_resource_t *rwlr = obj;
+ if (rwlr->lock_check)
+ ASSERT(!ATOMIC_READ(&rwlr->is_locked));
+ RWMUTEX_DESTROY(rwlr->rwlock);
+}
+
+/*
+ * create_rwlock(FreqRead, LockCheck)
+ */
+
+static ERL_NIF_TERM
+create_rwlock(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
+{
+ int lock_check, freqread;
+ ERL_NIF_TERM rwlock_term;
+ rwlock_resource_t *rwlr;
+ char buf[100];
+
+ if (argc != 2)
+ goto badarg;
+
+ freqread = get_bool(env, argv[0]);
+ if (freqread < 0)
+ goto badarg;
+
+ if (!HAVE_FREQREAD_SUPPORT && freqread)
+ return enif_make_atom(env, "enotsup");
+
+ lock_check = get_bool(env, argv[1]);
+ if (lock_check < 0)
+ goto badarg;
+
+ rwlr = enif_alloc_resource(enif_priv_data(env), sizeof(rwlock_resource_t));
+ rwlr->lock_check = lock_check;
+ ATOMIC_INIT(&rwlr->is_locked, 0);
+ rwlr->rwlock = RWMUTEX_CREATE(freqread);
+ rwlock_term = enif_make_resource(env, rwlr);
+ enif_release_resource(rwlr);
+ return rwlock_term;
+
+ badarg:
+ return enif_make_badarg(env);
+}
+
+/*
+ * rwlock_op(RWLock, Blocking, WriteOp, WaitTime)
+ */
+
+static ERL_NIF_TERM
+rwlock_op(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
+{
+ rwlock_resource_t *rwlr;
+ int blocking, write, wait_locked, wait_unlocked;
+
+ if (argc != 5)
+ goto badarg;
+
+ if (!enif_get_resource(env, argv[0], enif_priv_data(env), (void **) &rwlr))
+ goto badarg;
+
+ blocking = get_bool(env, argv[1]);
+ if (blocking < 0)
+ goto badarg;
+
+ write = get_bool(env, argv[2]);
+ if (write < 0)
+ goto badarg;
+
+ if (!enif_get_int(env, argv[3], &wait_locked))
+ goto badarg;
+ if (wait_locked < 0)
+ goto badarg;
+
+ if (!enif_get_int(env, argv[4], &wait_unlocked))
+ goto badarg;
+ if (wait_unlocked < 0)
+ goto badarg;
+
+ if (write) {
+ if (blocking)
+ RWMUTEX_WLOCK(rwlr->rwlock);
+ else
+ while (EBUSY == RWMUTEX_TRYWLOCK(rwlr->rwlock));
+ if (rwlr->lock_check) {
+ ASSERT(!ATOMIC_READ(&rwlr->is_locked));
+ ATOMIC_SET(&rwlr->is_locked, -1);
+ }
+ }
+ else {
+ if (blocking)
+ RWMUTEX_RLOCK(rwlr->rwlock);
+ else
+ while (EBUSY == RWMUTEX_TRYRLOCK(rwlr->rwlock));
+ if (rwlr->lock_check) {
+ ASSERT(ATOMIC_READ(&rwlr->is_locked) >= 0);
+ ATOMIC_INC(&rwlr->is_locked);
+ }
+ }
+
+ if (wait_locked)
+ milli_sleep(wait_locked);
+
+ if (write) {
+ if (rwlr->lock_check) {
+ ASSERT(ATOMIC_READ(&rwlr->is_locked) == -1);
+ ATOMIC_SET(&rwlr->is_locked, 0);
+ }
+ RWMUTEX_WUNLOCK(rwlr->rwlock);
+ }
+ else {
+ if (rwlr->lock_check) {
+ ASSERT(ATOMIC_READ(&rwlr->is_locked) > 0);
+ ATOMIC_DEC(&rwlr->is_locked);
+ }
+ RWMUTEX_RUNLOCK(rwlr->rwlock);
+ }
+
+ if (wait_unlocked)
+ milli_sleep(wait_unlocked);
+
+ return enif_make_atom(env, "ok");
+ badarg:
+ return enif_make_badarg(env);
+}
+
+static int load_nif_lib(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ *priv_data = enif_open_resource_type(env,
+ NULL,
+ "rwlock_resource",
+ rwlock_destructor,
+ ERL_NIF_RT_CREATE,
+ NULL);
+ if (*priv_data)
+ return 0;
+ else
+ return -1;
+}
+
+/*
+ * 0 -> false
+ * >0 -> true
+ * <0 -> error
+ */
+
+static int
+get_bool(ErlNifEnv* env, ERL_NIF_TERM term)
+{
+ int res;
+ char buf[10];
+
+ res = enif_get_atom(env, term, buf, sizeof(buf), ERL_NIF_LATIN1);
+ if (res == 0)
+ return -1;
+ if (strcmp("false", buf) == 0)
+ return 0;
+ else if (strcmp("true", buf) == 0)
+ return 1;
+ else
+ return -1;
+}
+
+static int
+fail(const char *file, int line, const char *function, const char *assertion)
+{
+ fprintf(stderr, "%s:%d: Assertion failed in %s(): %s\n",
+ file, line, function, assertion);
+ abort();
+}
+
+static void
+milli_sleep(int ms)
+{
+#ifdef __WIN32__
+ Sleep(ms);
+#else
+ while (erts_milli_sleep(ms) != 0);
+#endif
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"long_rw_test", 0, long_rw_test},
+ {"hammer_rw_test", 1, hammer_rw_test},
+ {"hammer_tryrw_test", 1, hammer_tryrw_test},
+ {"create_rwlock", 2, create_rwlock},
+ {"rwlock_op", 5, rwlock_op}
+};
+
+ERL_NIF_INIT(mtx_SUITE, nif_funcs, load_nif_lib, NULL, NULL, NULL)