aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--erts/emulator/beam/beam_emu.c31
-rw-r--r--erts/emulator/beam/erl_alloc.c27
-rw-r--r--erts/emulator/beam/erl_alloc.h5
-rw-r--r--erts/emulator/beam/erl_alloc_util.c32
-rw-r--r--erts/emulator/beam/erl_alloc_util.h3
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c20
-rw-r--r--erts/emulator/beam/erl_process.c16
-rw-r--r--erts/emulator/beam/erl_process.h25
-rw-r--r--erts/emulator/beam/erl_threads.h67
-rw-r--r--erts/emulator/beam/sys.h15
-rw-r--r--erts/emulator/sys/common/erl_poll.c284
-rw-r--r--erts/emulator/sys/unix/sys.c24
-rw-r--r--erts/emulator/sys/win32/erl_poll.c382
-rw-r--r--erts/emulator/sys/win32/sys_interrupt.c8
-rw-r--r--erts/emulator/utils/loaded44
-rw-r--r--lib/compiler/src/v3_kernel_pp.erl37
-rw-r--r--lib/kernel/src/inet6_tcp_dist.erl6
-rw-r--r--lib/stdlib/test/supervisor_SUITE.erl6
18 files changed, 624 insertions, 408 deletions
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 12a8022861..af7ed064d3 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -321,6 +321,7 @@ extern int count_instructions;
# define POST_BIF_GC_SWAPIN_0(_p, _res) \
ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
PROCESS_MAIN_CHK_LOCKS((_p)); \
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC((_p)); \
if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
_res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
E = (_p)->stop; \
@@ -328,6 +329,7 @@ extern int count_instructions;
HTOP = HEAP_TOP((_p))
# define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC((_p)); \
ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
PROCESS_MAIN_CHK_LOCKS((_p)); \
if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
@@ -367,6 +369,7 @@ extern int count_instructions;
reg[0] = r(0); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
r(0) = reg[0]; \
SWAPIN; \
@@ -420,6 +423,7 @@ extern int count_instructions;
reg[0] = r(0); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
r(0) = reg[0]; \
SWAPIN; \
@@ -442,6 +446,7 @@ extern int count_instructions;
reg[0] = r(0); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
r(0) = reg[0]; \
SWAPIN; \
@@ -464,6 +469,7 @@ extern int count_instructions;
reg[Live] = Extra; \
PROCESS_MAIN_CHK_LOCKS(c_p); \
FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
if (Live > 0) { \
r(0) = reg[0]; \
@@ -1214,7 +1220,9 @@ void process_main(void)
do_schedule1:
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
c_p = schedule(c_p, reds_used);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
#ifdef DEBUG
pid = c_p->id;
#endif
@@ -1664,6 +1672,7 @@ void process_main(void)
SWAPOUT;
PROCESS_MAIN_CHK_LOCKS(c_p);
FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
SWAPIN;
}
@@ -1782,6 +1791,7 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
},
{
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
r(0) = reg[0];
SWAPIN;
@@ -1840,6 +1850,7 @@ void process_main(void)
CANCEL_TIMER(c_p);
free_message(msgp);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
NextPF(0, next);
@@ -2230,6 +2241,7 @@ void process_main(void)
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
result = (*bf)(c_p, arg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
@@ -2258,6 +2270,7 @@ void process_main(void)
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
result = (*bf)(c_p, arg);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
@@ -2287,6 +2300,7 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
result = (*bf)(c_p, reg, live);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
SWAPIN;
@@ -2322,6 +2336,7 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
result = (*bf)(c_p, reg, live);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
SWAPIN;
@@ -2360,6 +2375,7 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
result = (*bf)(c_p, reg, live);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
SWAPIN;
@@ -2394,6 +2410,7 @@ void process_main(void)
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
result = (*bf)(c_p, tmp_arg1, tmp_arg2);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
@@ -2417,6 +2434,7 @@ void process_main(void)
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
result = (*bf)(c_p, tmp_arg1, tmp_arg2);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_HOLE_CHECK(c_p);
if (is_value(result)) {
@@ -3287,6 +3305,7 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
bif_nif_arity = I[-1];
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
{
@@ -3300,6 +3319,7 @@ void process_main(void)
}
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
goto apply_bif_or_nif_epilogue;
OpCase(apply_bif):
@@ -3326,6 +3346,7 @@ void process_main(void)
bif_nif_arity = I[-1];
ASSERT(bif_nif_arity <= 3);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
switch (bif_nif_arity) {
case 3:
{
@@ -3334,6 +3355,7 @@ void process_main(void)
nif_bif_result = (*bf)(c_p, r(0), x(1), x(2), I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
is_non_value(nif_bif_result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
}
break;
@@ -3344,6 +3366,7 @@ void process_main(void)
nif_bif_result = (*bf)(c_p, r(0), x(1), I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
is_non_value(nif_bif_result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
}
break;
@@ -3354,6 +3377,7 @@ void process_main(void)
nif_bif_result = (*bf)(c_p, r(0), I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
is_non_value(nif_bif_result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
}
break;
@@ -3364,6 +3388,7 @@ void process_main(void)
nif_bif_result = (*bf)(c_p, I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
is_non_value(nif_bif_result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
break;
}
@@ -4590,6 +4615,7 @@ void process_main(void)
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg,
0, &c_p->tracer_proc);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
@@ -4601,6 +4627,7 @@ void process_main(void)
/* SWAPOUT, SWAPIN was done and r(0) was saved above */
PROCESS_MAIN_CHK_LOCKS(c_p);
FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
r(0) = reg[0];
SWAPIN;
@@ -4690,6 +4717,7 @@ void process_main(void)
reg[0] = r(0);
PROCESS_MAIN_CHK_LOCKS(c_p);
FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
r(0) = reg[0];
}
@@ -4793,6 +4821,7 @@ void process_main(void)
/* SWAPOUT was done and r(0) was saved above */
PROCESS_MAIN_CHK_LOCKS(c_p);
FCALLS -= erts_garbage_collect(c_p, need, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
r(0) = reg[0];
SWAPIN;
@@ -6239,6 +6268,7 @@ hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg)
c_p->fvalue = NIL;
PROCESS_MAIN_CHK_LOCKS(c_p);
erts_garbage_collect_hibernate(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
@@ -6484,6 +6514,7 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
if (HEAP_LIMIT(p) - HEAP_TOP(p) <= needed) {
PROCESS_MAIN_CHK_LOCKS(p);
erts_garbage_collect(p, needed, reg, num_free);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
PROCESS_MAIN_CHK_LOCKS(p);
}
hp = p->htop;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index e85e2d7e3f..323c422c6d 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1411,6 +1411,33 @@ void erts_alloc_reg_scheduler_id(Uint id)
erts_tsd_set(thr_ix_key, (void *)(long) ix);
}
+static void
+no_verify(Allctr_t *allctr)
+{
+
+}
+
+erts_alloc_verify_func_t
+erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr)
+{
+ if (erts_allctrs_info[ERTS_ALC_A_TEMPORARY].alloc_util
+ && erts_allctrs_info[ERTS_ALC_A_TEMPORARY].thr_spec) {
+ ErtsAllocatorThrSpec_t *tspec;
+ tspec = &erts_allctr_thr_spec[ERTS_ALC_A_TEMPORARY];
+ if (!tspec->all_thr_safe) {
+ int ix = erts_alc_get_thr_ix();
+
+ if (ix < tspec->size) {
+ *allctr = tspec->allctr[ix];
+ return erts_alcu_verify_unused;
+ }
+ }
+ }
+
+ *allctr = NULL;
+ return no_verify;
+}
+
__decl_noreturn void
erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
{
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index 3e96c76dbf..dd4cc22171 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -236,6 +236,11 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)
#endif /* #if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__) */
+typedef void (*erts_alloc_verify_func_t)(Allctr_t *);
+
+erts_alloc_verify_func_t
+erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr);
+
#ifndef ERTS_CACHE_LINE_SIZE
/* Assume a cache line size of 64 bytes */
# define ERTS_CACHE_LINE_SIZE ((UWord) 64)
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 8b184899c9..c09f0bbd77 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -3368,6 +3368,38 @@ erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2)
* Debug functions *
\* */
+void
+erts_alcu_verify_unused(Allctr_t *allctr)
+{
+ UWord no;
+
+ no = allctr->sbcs.curr_mseg.no;
+ no += allctr->sbcs.curr_sys_alloc.no;
+ no += allctr->mbcs.blocks.curr.no;
+
+ if (no) {
+ UWord sz = allctr->sbcs.blocks.curr.size;
+ sz += allctr->mbcs.blocks.curr.size;
+ erl_exit(ERTS_ABORT_EXIT,
+ "%salloc() used when expected to be unused!\n"
+ "Total amount of blocks allocated: %bpu\n"
+ "Total amount of bytes allocated: %bpu\n",
+ allctr->name_prefix, no, sz);
+ }
+}
+
+void
+erts_alcu_verify_unused_ts(Allctr_t *allctr)
+{
+#ifdef USE_THREADS
+ erts_mtx_lock(&allctr->mutex);
+#endif
+ erts_alcu_verify_unused(allctr);
+#ifdef USE_THREADS
+ erts_mtx_unlock(&allctr->mutex);
+#endif
+}
+
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
static void
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index f2b951bca6..5a8db5e29e 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -333,6 +333,9 @@ struct Allctr_t_ {
int erts_alcu_start(Allctr_t *, AllctrInit_t *);
void erts_alcu_stop(Allctr_t *);
+void erts_alcu_verify_unused(Allctr_t *);
+void erts_alcu_verify_unused_ts(Allctr_t *allctr);
+
unsigned long erts_alcu_test(unsigned long, unsigned long, unsigned long);
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 8a6b4d8d6c..bcf8bcf270 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -630,13 +630,13 @@ write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
int
erts_init_scheduler_bind_type_string(char *how)
{
- if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP)
+ if (sys_strcmp(how, "u") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_NONE;
+ else if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP)
return ERTS_INIT_SCHED_BIND_TYPE_NOT_SUPPORTED;
-
- if (!system_cpudata && !user_cpudata)
+ else if (!system_cpudata && !user_cpudata)
return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY;
-
- if (sys_strcmp(how, "db") == 0)
+ else if (sys_strcmp(how, "db") == 0)
cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND;
else if (sys_strcmp(how, "s") == 0)
cpu_bind_order = ERTS_CPU_BIND_SPREAD;
@@ -652,11 +652,8 @@ erts_init_scheduler_bind_type_string(char *how)
cpu_bind_order = ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD;
else if (sys_strcmp(how, "ns") == 0)
cpu_bind_order = ERTS_CPU_BIND_NO_SPREAD;
- else if (sys_strcmp(how, "u") == 0)
- cpu_bind_order = ERTS_CPU_BIND_NONE;
else
return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_BAD_TYPE;
-
return ERTS_INIT_SCHED_BIND_TYPE_SUCCESS;
}
@@ -724,6 +721,11 @@ erts_bind_schedulers(Process *c_p, Eterm how)
erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP) {
+ if (cpu_bind_order == ERTS_CPU_BIND_NONE
+ && ERTS_IS_ATOM_STR("unbound", how)) {
+ res = bound_schedulers_term(ERTS_CPU_BIND_NONE);
+ goto done;
+ }
ERTS_BIF_PREP_ERROR(res, c_p, EXC_NOTSUP);
}
else {
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index ddfc27a93f..e4a871fd7e 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -2752,6 +2752,15 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
/* init port tasks */
erts_port_task_init();
+
+#ifndef ERTS_SMP
+#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
+ erts_scheduler_data->verify_unused_temp_alloc
+ = erts_alloc_get_verify_unused_temp_alloc(
+ &erts_scheduler_data->verify_unused_temp_alloc_data);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
+#endif
+#endif
}
ErtsRunQueue *
@@ -3711,6 +3720,13 @@ sched_thread_func(void *vesdp)
}
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
+ ((ErtsSchedulerData *) vesdp)->verify_unused_temp_alloc
+ = erts_alloc_get_verify_unused_temp_alloc(
+ &((ErtsSchedulerData *) vesdp)->verify_unused_temp_alloc_data);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
+#endif
+
process_main();
/* No schedulers should *ever* terminate */
erl_exit(ERTS_ABORT_EXIT, "Scheduler thread number %bpu terminated\n",
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 8479e56710..e871a9834a 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -28,6 +28,12 @@
#define ERTS_INCLUDE_SCHEDULER_INTERNALS
#endif
+/* #define ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC */
+
+#if !defined(ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC) && defined(DEBUG)
+# define ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
+#endif
+
typedef struct process Process;
#include "sys.h"
@@ -421,6 +427,11 @@ struct ErtsSchedulerData_ {
/* NOTE: These fields are modified under held mutexes by other threads */
erts_smp_atomic32_t chk_cpu_bind; /* Only used when common run queue */
#endif
+
+#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
+ erts_alloc_verify_func_t verify_unused_temp_alloc;
+ Allctr_t *verify_unused_temp_alloc_data;
+#endif
};
typedef union {
@@ -1145,6 +1156,20 @@ Uint erts_debug_nbalance(void);
# define ERTS_PROC_GET_SCHDATA(PROC) (erts_scheduler_data)
#endif
+#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
+# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(P) \
+do { \
+ ErtsSchedulerData *esdp__ = ((P) \
+ ? ERTS_PROC_GET_SCHDATA((Process *) (P)) \
+ : erts_get_scheduler_data()); \
+ if (esdp__) \
+ esdp__->verify_unused_temp_alloc( \
+ esdp__->verify_unused_temp_alloc_data); \
+} while (0)
+#else
+# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(ESDP)
+#endif
+
#if defined(ERTS_SMP) || defined(USE_THREADS)
ErtsSchedulerData *erts_get_scheduler_data(void);
#else
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 84a20b51f2..8c9cace0c5 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -36,6 +36,16 @@
#include "erl_lock_count.h"
#include "erl_term.h"
+#if defined(__GLIBC__) && (__GLIBC__ << 16) + __GLIBC_MINOR__ < (2 << 16) + 4
+/*
+ * pthread_mutex_destroy() may return EBUSY when it shouldn't :( We have
+ * only seen this bug in glibc versions before 2.4. Note that condition
+ * variables, rwmutexes, spinlocks, and rwspinlocks also may be effected by
+ * this bug since these implementations may use mutexes internally.
+ */
+# define ERTS_THR_HAVE_BUSY_DESTROY_BUG
+#endif
+
#define ERTS_THR_MEMORY_BARRIER ETHR_MEMORY_BARRIER
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -175,6 +185,9 @@ typedef struct { int gcc_is_buggy; } erts_rwlock_t;
#endif /* #ifdef USE_THREADS */
#define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
+#define ERTS_AINT_T_MIN ((((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
+#define ERTS_AINT32_T_MAX (~(((erts_aint32_t) 1) << (sizeof(erts_aint32_t)*8-1)))
+#define ERTS_AINT32_T_MIN ((((erts_aint32_t) 1) << (sizeof(erts_aint32_t)*8-1)))
ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id);
@@ -551,8 +564,16 @@ erts_mtx_destroy(erts_mtx_t *mtx)
erts_lcnt_destroy_lock(&mtx->lcnt);
#endif
res = ethr_mutex_destroy(&mtx->mtx);
- if (res)
+ if (res != 0) {
+#ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG
+ if (res == EBUSY) {
+ char *warn = "Ignoring busy mutex destroy. "
+ "Most likely a bug in pthread implementation.";
+ erts_send_warning_to_logger_str_nogl(warn);
+ }
+#endif
erts_thr_fatal_error(res, "destroy mutex");
+ }
#endif
}
@@ -647,8 +668,16 @@ erts_cnd_destroy(erts_cnd_t *cnd)
{
#ifdef USE_THREADS
int res = ethr_cond_destroy(cnd);
- if (res)
+ if (res != 0) {
+#ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG
+ if (res == EBUSY) {
+ char *warn = "Ignoring busy cond destroy. "
+ "Most likely a bug in pthread implementation.";
+ erts_send_warning_to_logger_str_nogl(warn);
+ }
+#endif
erts_thr_fatal_error(res, "destroy condition variable");
+ }
#endif
}
@@ -774,8 +803,16 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
erts_lcnt_destroy_lock(&rwmtx->lcnt);
#endif
res = ethr_rwmutex_destroy(&rwmtx->rwmtx);
- if (res != 0)
+ if (res != 0) {
+#ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG
+ if (res == EBUSY) {
+ char *warn = "Ignoring busy rwmutex destroy. "
+ "Most likely a bug in pthread implementation.";
+ erts_send_warning_to_logger_str_nogl(warn);
+ }
+#endif
erts_thr_fatal_error(res, "destroy rwmutex");
+ }
#endif
}
@@ -1452,8 +1489,16 @@ erts_spinlock_destroy(erts_spinlock_t *lock)
erts_lcnt_destroy_lock(&lock->lcnt);
#endif
res = ethr_spinlock_destroy(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "destroy spinlock");
+ if (res != 0) {
+#ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG
+ if (res == EBUSY) {
+ char *warn = "Ignoring busy spinlock destroy. "
+ "Most likely a bug in pthread implementation.";
+ erts_send_warning_to_logger_str_nogl(warn);
+ }
+#endif
+ erts_thr_fatal_error(res, "destroy rwlock");
+ }
#else
(void)lock;
#endif
@@ -1562,8 +1607,16 @@ erts_rwlock_destroy(erts_rwlock_t *lock)
erts_lcnt_destroy_lock(&lock->lcnt);
#endif
res = ethr_rwlock_destroy(&lock->rwlck);
- if (res)
+ if (res != 0) {
+#ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG
+ if (res == EBUSY) {
+ char *warn = "Ignoring busy rwlock destroy. "
+ "Most likely a bug in pthread implementation.";
+ erts_send_warning_to_logger_str_nogl(warn);
+ }
+#endif
erts_thr_fatal_error(res, "destroy rwlock");
+ }
#else
(void)lock;
#endif
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index b1cd683b3b..ff828ae889 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -331,12 +331,16 @@ typedef unsigned char byte;
(((size_t) 8) - (((size_t) (X)) & ((size_t) 7)))
#include "erl_lock_check.h"
+
+/* needed by erl_smp.h */
+int erts_send_warning_to_logger_str_nogl(char *);
+
#include "erl_smp.h"
#ifdef ERTS_WANT_BREAK_HANDLING
# ifdef ERTS_SMP
-extern erts_smp_atomic_t erts_break_requested;
-# define ERTS_BREAK_REQUESTED ((int) erts_smp_atomic_read(&erts_break_requested))
+extern erts_smp_atomic32_t erts_break_requested;
+# define ERTS_BREAK_REQUESTED ((int) erts_smp_atomic32_read(&erts_break_requested))
# else
extern volatile int erts_break_requested;
# define ERTS_BREAK_REQUESTED erts_break_requested
@@ -349,8 +353,8 @@ void erts_do_break_handling(void);
# define ERTS_GOT_SIGUSR1 0
# else
# ifdef ERTS_SMP
-extern erts_smp_atomic_t erts_got_sigusr1;
-# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic_read(&erts_got_sigusr1))
+extern erts_smp_atomic32_t erts_got_sigusr1;
+# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic32_read(&erts_got_sigusr1))
# else
extern volatile int erts_got_sigusr1;
# define ERTS_GOT_SIGUSR1 erts_got_sigusr1
@@ -517,7 +521,8 @@ int erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *);
int erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *);
int erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *);
int erts_send_info_to_logger_str_nogl(char *);
-int erts_send_warning_to_logger_str_nogl(char *);
+/* needed by erl_smp.h (declared above)
+ int erts_send_warning_to_logger_str_nogl(char *); */
int erts_send_error_to_logger_str_nogl(char *);
typedef struct preload {
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 4d0ca97889..77ac2de5f6 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -124,25 +124,11 @@
erts_smp_mtx_unlock(&(PS)->mtx)
#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->polled, (erts_aint_t) 1))
+ ((int) erts_atomic32_xchg(&(PS)->polled, (erts_aint32_t) 1))
#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 0)
+ erts_atomic32_set(&(PS)->polled, (erts_aint32_t) 0)
#define ERTS_POLLSET_IS_POLLED(PS) \
- ((int) erts_smp_atomic_read(&(PS)->polled))
-
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) set_poller_woken_chk((PS))
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
-do { \
- ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 1); \
-} while (0)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
-do { \
- erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 0); \
- ERTS_THR_MEMORY_BARRIER; \
-} while (0)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
- ((int) erts_smp_atomic_read(&(PS)->woken))
+ ((int) erts_atomic32_read(&(PS)->polled))
#else
@@ -152,69 +138,21 @@ do { \
#define ERTS_POLLSET_UNSET_POLLED(PS)
#define ERTS_POLLSET_IS_POLLED(PS) 0
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
-
-/*
- * Ideally, the ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) operation would
- * be atomic. This operation isn't, but we will do okay anyway. The
- * "woken check" is only an optimization. The only requirement we have:
- * If (PS)->woken is set to a value != 0 when interrupting, we have to
- * write on the the wakeup pipe at least once. Multiple writes are okay.
- */
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) ((PS)->woken++)
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) ((PS)->woken = 1, (void) 0)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) ((PS)->woken = 0, (void) 0)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) ((PS)->woken)
-
-#else
-
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) 1
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) 1
-
-#endif
-
#endif
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic_set(&(PS)->have_update_requests, (erts_aint_t) 1)
+ erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 1)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic_set(&(PS)->have_update_requests, (erts_aint_t) 0)
+ erts_smp_atomic32_set(&(PS)->have_update_requests, (erts_aint32_t) 0)
#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \
- ((int) erts_smp_atomic_read(&(PS)->have_update_requests))
+ ((int) erts_smp_atomic32_read(&(PS)->have_update_requests))
#else
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS)
#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) 0
#endif
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
-
-#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
-#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) ((PS)->interrupt = 0, (void) 0)
-#define ERTS_POLLSET_SET_INTERRUPTED(PS) ((PS)->interrupt = 1, (void) 0)
-#define ERTS_POLLSET_IS_INTERRUPTED(PS) ((PS)->interrupt)
-
-#else
-
-#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
-#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
-do { \
- erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 0); \
- ERTS_THR_MEMORY_BARRIER; \
-} while (0)
-#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
-do { \
- ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 1); \
-} while (0)
-#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
- ((int) erts_smp_atomic_read(&(PS)->interrupt))
-
-#endif
-
#if ERTS_POLL_USE_FALLBACK
# if ERTS_POLL_USE_POLL
# define ERTS_POLL_NEED_FALLBACK(PS) ((PS)->no_poll_fds > 1)
@@ -318,14 +256,12 @@ struct ErtsPollSet_ {
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
ErtsPollSetUpdateRequestsBlock update_requests;
ErtsPollSetUpdateRequestsBlock *curr_upd_req_block;
- erts_smp_atomic_t have_update_requests;
+ erts_smp_atomic32_t have_update_requests;
#endif
#ifdef ERTS_SMP
- erts_smp_atomic_t polled;
- erts_smp_atomic_t woken;
+ erts_atomic32_t polled;
erts_smp_mtx_t mtx;
#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- volatile int woken;
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
int wake_fds[2];
@@ -333,12 +269,12 @@ struct ErtsPollSet_ {
#if ERTS_POLL_USE_FALLBACK
int fallback_used;
#endif
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
- volatile int interrupt;
-#else
- erts_smp_atomic_t interrupt;
+#ifdef ERTS_SMP
+ erts_atomic32_t wakeup_state;
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ volatile int wakeup_state;
#endif
- erts_smp_atomic_t timeout;
+ erts_smp_atomic32_t timeout;
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
erts_smp_atomic_t no_avoided_wakeups;
erts_smp_atomic_t no_avoided_interrupts;
@@ -346,34 +282,6 @@ struct ErtsPollSet_ {
#endif
};
-static ERTS_INLINE int
-unset_interrupted_chk(ErtsPollSet ps)
-{
- int res;
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
- /* This operation isn't atomic, but we have no need at all for an
- atomic operation here... */
- res = ps->interrupt;
- ps->interrupt = 0;
-#else
- res = (int) erts_smp_atomic_xchg(&ps->interrupt, (erts_aint_t) 0);
- ERTS_THR_MEMORY_BARRIER;
-#endif
- return res;
-
-}
-
-#ifdef ERTS_SMP
-
-static ERTS_INLINE int
-set_poller_woken_chk(ErtsPollSet ps)
-{
- ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic_xchg(&ps->woken, (erts_aint_t) 1);
-}
-
-#endif
-
void erts_silence_warn_unused_result(long unused);
static void fatal_error(char *format, ...);
static void fatal_error_async_signal_safe(char *error_str);
@@ -430,6 +338,63 @@ static void check_poll_status(ErtsPollSet ps);
static void print_misc_debug_info(void);
#endif
+#define ERTS_POLL_NOT_WOKEN 0
+#define ERTS_POLL_WOKEN -1
+#define ERTS_POLL_WOKEN_INTR 1
+
+static ERTS_INLINE void
+reset_wakeup_state(ErtsPollSet ps)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ ps->wakeup_state = 0;
+#endif
+}
+
+static ERTS_INLINE int
+is_woken(ErtsPollSet ps)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN;
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ return ps->wakeup_state != ERTS_POLL_NOT_WOKEN;
+#else
+ return 0;
+#endif
+}
+
+static ERTS_INLINE int
+is_interrupted_reset(ErtsPollSet ps)
+{
+#ifdef ERTS_SMP
+ return (erts_atomic32_xchg(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN)
+ == ERTS_POLL_WOKEN_INTR);
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ int res = ps->wakeup_state == ERTS_POLL_WOKEN_INTR;
+ ps->wakeup_state = ERTS_POLL_NOT_WOKEN;
+ return res;
+#else
+ return 0;
+#endif
+}
+
+static ERTS_INLINE void
+woke_up(ErtsPollSet ps)
+{
+#ifdef ERTS_SMP
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ if (wakeup_state == ERTS_POLL_NOT_WOKEN)
+ (void) erts_atomic32_cmpxchg(&ps->wakeup_state,
+ ERTS_POLL_WOKEN,
+ ERTS_POLL_NOT_WOKEN);
+ ASSERT(erts_atomic32_read(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN);
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ if (ps->wakeup_state == ERTS_POLL_NOT_WOKEN)
+ ps->wakeup_state = ERTS_POLL_WOKEN;
+#endif
+}
+
/*
* --- Wakeup pipe -----------------------------------------------------------
*/
@@ -437,14 +402,34 @@ static void print_misc_debug_info(void);
#if ERTS_POLL_USE_WAKEUP_PIPE
static ERTS_INLINE void
-wake_poller(ErtsPollSet ps)
+wake_poller(ErtsPollSet ps, int interrupted)
{
+ int wake;
+#ifdef ERTS_SMP
+ erts_aint32_t wakeup_state;
+ if (!interrupted)
+ wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state,
+ ERTS_POLL_WOKEN,
+ ERTS_POLL_NOT_WOKEN);
+ else {
+ /*
+ * We might unnecessarily write to the pipe, however,
+ * that isn't problematic.
+ */
+ wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
+ }
+ wake = wakeup_state == ERTS_POLL_NOT_WOKEN;
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ wake = ps->wakeup_state == ERTS_POLL_NOT_WOKEN;
+ ps->wakeup_state = interrupted ? ERTS_POLL_WOKEN_INTR : ERTS_POLL_NOT_WOKEN;
+#endif
/*
* NOTE: This function might be called from signal handlers in the
* non-smp case; therefore, it has to be async-signal safe in
* the non-smp case.
*/
- if (!ERTS_POLLSET_SET_POLLER_WOKEN_CHK(ps)) {
+ if (wake) {
ssize_t res;
if (ps->wake_fds[1] < 0)
return; /* Not initialized yet */
@@ -1387,9 +1372,7 @@ handle_update_requests(ErtsPollSet ps)
#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
static ERTS_INLINE ErtsPollEvents
-poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on,
- int *have_set_have_update_requests,
- int *do_wake)
+poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake)
{
ErtsPollEvents new_events;
@@ -1493,7 +1476,6 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps,
int len)
{
int i;
- int hshur = 0;
int do_wake;
int final_do_wake = 0;
@@ -1505,17 +1487,17 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps,
pcev[i].fd,
pcev[i].events,
pcev[i].on,
- &hshur,
&do_wake);
final_do_wake |= do_wake;
}
+ ERTS_POLLSET_UNLOCK(ps);
+
#ifdef ERTS_SMP
if (final_do_wake)
- wake_poller(ps);
+ wake_poller(ps, 0);
#endif /* ERTS_SMP */
- ERTS_POLLSET_UNLOCK(ps);
}
ErtsPollEvents
@@ -1526,20 +1508,20 @@ ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps,
int* do_wake) /* In: Wake up polling thread */
/* Out: Poller is woken */
{
- int hshur = 0;
ErtsPollEvents res;
ERTS_POLLSET_LOCK(ps);
- res = poll_control(ps, fd, events, on, &hshur, do_wake);
+ res = poll_control(ps, fd, events, on, do_wake);
+
+ ERTS_POLLSET_UNLOCK(ps);
#ifdef ERTS_SMP
if (*do_wake) {
- wake_poller(ps);
+ wake_poller(ps, 0);
}
#endif /* ERTS_SMP */
- ERTS_POLLSET_UNLOCK(ps);
return res;
}
@@ -1918,9 +1900,11 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
return 0;
}
else {
- erts_aint_t timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
+ long timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
+ if (timeout > ERTS_AINT32_T_MAX)
+ timeout = ERTS_AINT32_T_MAX;
ASSERT(timeout >= 0);
- erts_smp_atomic_set(&ps->timeout, timeout);
+ erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
#if ERTS_POLL_USE_FALLBACK
if (!(ps->fallback_used = ERTS_POLL_NEED_FALLBACK(ps))) {
@@ -2042,15 +2026,14 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
(int) tv->tv_sec*1000 + tv->tv_usec/1000);
#endif
- ERTS_POLLSET_UNSET_POLLER_WOKEN(ps);
if (ERTS_POLLSET_SET_POLLED_CHK(ps)) {
res = EINVAL; /* Another thread is in erts_poll_wait()
on this pollset... */
goto done;
}
- if (ERTS_POLLSET_IS_INTERRUPTED(ps)) {
- /* Interrupt use zero timeout */
+ if (is_woken(ps)) {
+ /* Use zero timeout */
itv.tv_sec = 0;
itv.tv_usec = 0;
tvp = &itv;
@@ -2067,7 +2050,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
ps_locked = 0;
res = check_fd_events(ps, tvp, no_fds, &ps_locked);
- ERTS_POLLSET_SET_POLLER_WOKEN(ps);
+ woke_up(ps);
if (res == 0) {
res = ETIMEDOUT;
@@ -2099,9 +2082,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
check_poll_result(pr, no_fds);
#endif
- res = (no_fds == 0
- ? (ERTS_POLLSET_UNSET_INTERRUPTED_CHK(ps) ? EINTR : EAGAIN)
- : 0);
+ res = (no_fds == 0 ? (is_interrupted_reset(ps) ? EINTR : EAGAIN) : 0);
*len = no_fds;
}
@@ -2112,7 +2093,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#endif
done:
- erts_smp_atomic_set(&ps->timeout, ERTS_AINT_T_MAX);
+ erts_smp_atomic32_set_relb(&ps->timeout, ERTS_AINT32_T_MAX);
#ifdef ERTS_POLL_DEBUG_PRINT
erts_printf("Leaving %s = erts_poll_wait()\n",
res == 0 ? "0" : erl_errno_id(res));
@@ -2128,20 +2109,17 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
void
ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
{
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
/*
* NOTE: This function might be called from signal handlers in the
* non-smp case; therefore, it has to be async-signal safe in
* the non-smp case.
*/
- if (set) {
- ERTS_POLLSET_SET_INTERRUPTED(ps);
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
- wake_poller(ps);
+ if (!set)
+ reset_wakeup_state(ps);
+ else
+ wake_poller(ps, 1);
#endif
- }
- else {
- ERTS_POLLSET_UNSET_INTERRUPTED(ps);
- }
}
/*
@@ -2154,13 +2132,12 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
int set,
long msec)
{
- if (set) {
- if (erts_smp_atomic_read(&ps->timeout) > (erts_aint_t) msec) {
- ERTS_POLLSET_SET_INTERRUPTED(ps);
#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
- wake_poller(ps);
-#endif
- }
+ if (!set)
+ reset_wakeup_state(ps);
+ else {
+ if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
+ wake_poller(ps, 1);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
else {
if (ERTS_POLLSET_IS_POLLED(ps))
@@ -2170,9 +2147,7 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
erts_smp_atomic_inc(&ps->no_interrupt_timed);
#endif
}
- else {
- ERTS_POLLSET_UNSET_INTERRUPTED(ps);
- }
+#endif
}
int
@@ -2283,14 +2258,16 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->update_requests.next = NULL;
ps->update_requests.len = 0;
ps->curr_upd_req_block = &ps->update_requests;
- erts_smp_atomic_init(&ps->have_update_requests, 0);
+ erts_smp_atomic32_init(&ps->have_update_requests, 0);
#endif
#ifdef ERTS_SMP
- erts_smp_atomic_init(&ps->polled, 0);
- erts_smp_atomic_init(&ps->woken, 0);
+ erts_atomic32_init(&ps->polled, 0);
erts_smp_mtx_init(&ps->mtx, "pollset");
+#endif
+#ifdef ERTS_SMP
+ erts_atomic32_init(&ps->wakeup_state, (erts_aint32_t) 0);
#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- ps->woken = 0;
+ ps->wakeup_state = 0;
#endif
#if ERTS_POLL_USE_WAKEUP_PIPE
create_wakeup_pipe(ps);
@@ -2312,12 +2289,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ps->internal_fd_limit = kp_fd + 1;
ps->kp_fd = kp_fd;
#endif
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
- ps->interrupt = 0;
-#else
- erts_smp_atomic_init(&ps->interrupt, 0);
-#endif
- erts_smp_atomic_init(&ps->timeout, ERTS_AINT_T_MAX);
+ erts_smp_atomic32_init(&ps->timeout, ERTS_AINT32_T_MAX);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
erts_smp_atomic_init(&ps->no_avoided_wakeups, 0);
erts_smp_atomic_init(&ps->no_avoided_interrupts, 0);
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index bfc04faa45..bc940d2084 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -160,14 +160,14 @@ static int debug_log = 0;
#endif
#ifdef ERTS_SMP
-erts_smp_atomic_t erts_got_sigusr1;
+erts_smp_atomic32_t erts_got_sigusr1;
#define ERTS_SET_GOT_SIGUSR1 \
- erts_smp_atomic_set(&erts_got_sigusr1, 1)
+ erts_smp_atomic32_set(&erts_got_sigusr1, 1)
#define ERTS_UNSET_GOT_SIGUSR1 \
- erts_smp_atomic_set(&erts_got_sigusr1, 0)
-static erts_smp_atomic_t have_prepared_crash_dump;
+ erts_smp_atomic32_set(&erts_got_sigusr1, 0)
+static erts_smp_atomic32_t have_prepared_crash_dump;
#define ERTS_PREPARED_CRASH_DUMP \
- ((int) erts_smp_atomic_xchg(&have_prepared_crash_dump, 1))
+ ((int) erts_smp_atomic32_xchg(&have_prepared_crash_dump, 1))
#else
volatile int erts_got_sigusr1;
#define ERTS_SET_GOT_SIGUSR1 (erts_got_sigusr1 = 1)
@@ -235,11 +235,11 @@ static int max_files = -1;
* a few variables used by the break handler
*/
#ifdef ERTS_SMP
-erts_smp_atomic_t erts_break_requested;
+erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -504,9 +504,9 @@ erts_sys_pre_init(void)
#endif
}
#ifdef ERTS_SMP
- erts_smp_atomic_init(&erts_break_requested, 0);
- erts_smp_atomic_init(&erts_got_sigusr1, 0);
- erts_smp_atomic_init(&have_prepared_crash_dump, 0);
+ erts_smp_atomic32_init(&erts_break_requested, 0);
+ erts_smp_atomic32_init(&erts_got_sigusr1, 0);
+ erts_smp_atomic32_init(&have_prepared_crash_dump, 0);
#else
erts_break_requested = 0;
erts_got_sigusr1 = 0;
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index d84ae2ede2..1f2877b682 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2007-2010. All Rights Reserved.
+ * Copyright Ericsson AB 2007-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -274,7 +274,6 @@ struct ErtsPollSet_ {
Waiter** waiter;
int allocated_waiters; /* Size ow waiter array */
int num_waiters; /* Number of waiter threads. */
- erts_atomic_t sys_io_ready; /* Tells us there is I/O ready (already). */
int restore_events; /* Tells us to restore waiters events
next time around */
HANDLE event_io_ready; /* To be used when waiting for io */
@@ -282,12 +281,11 @@ struct ErtsPollSet_ {
volatile int standby_wait_counter; /* Number of threads to wait for */
CRITICAL_SECTION standby_crit; /* CS to guard the counter */
HANDLE standby_wait_event; /* Event signalled when counte == 0 */
+ erts_atomic32_t wakeup_state;
#ifdef ERTS_SMP
- erts_smp_atomic_t woken;
erts_smp_mtx_t mtx;
- erts_smp_atomic_t interrupt;
#endif
- erts_smp_atomic_t timeout;
+ erts_smp_atomic32_t timeout;
};
#ifdef ERTS_SMP
@@ -296,126 +294,24 @@ struct ErtsPollSet_ {
erts_smp_mtx_lock(&(PS)->mtx)
#define ERTS_POLLSET_UNLOCK(PS) \
erts_smp_mtx_unlock(&(PS)->mtx)
-#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->polled, (erts_aint_t) 1))
-#define ERTS_POLLSET_SET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 1)
-#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 0)
-#define ERTS_POLLSET_IS_POLLED(PS) \
- ((int) erts_smp_atomic_read(&(PS)->polled))
-
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) set_poller_woken_chk((PS))
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
-do { \
- ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 1); \
-} while (0)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
-do { \
- erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 0); \
- ERTS_THR_MEMORY_BARRIER; \
-} while (0)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
- ((int) erts_smp_atomic_read(&(PS)->woken))
-
-#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
-#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
-do { \
- erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 0); \
- ERTS_THR_MEMORY_BARRIER; \
-} while (0)
-#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
-do { \
- ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 1); \
-} while (0)
-#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
- ((int) erts_smp_atomic_read(&(PS)->interrupt))
-
-static ERTS_INLINE int
-unset_interrupted_chk(ErtsPollSet ps)
-{
- int res = (int) erts_smp_atomic_xchg(&ps->interrupt, (erts_aint_t) 0);
- ERTS_THR_MEMORY_BARRIER;
- return res;
-
-}
-
-static ERTS_INLINE int
-set_poller_woken_chk(ErtsPollSet ps)
-{
- ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic_xchg(&ps->woken, (erts_aint_t) 1);
-}
#else
#define ERTS_POLLSET_LOCK(PS)
#define ERTS_POLLSET_UNLOCK(PS)
-#define ERTS_POLLSET_SET_POLLED_CHK(PS) 0
-#define ERTS_POLLSET_UNSET_POLLED(PS)
-#define ERTS_POLLSET_IS_POLLED(PS) 0
-#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) 1
-#define ERTS_POLLSET_SET_POLLER_WOKEN(PS)
-#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS)
-#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) 1
-
#endif
/*
- * While atomics are not yet implemented for windows in the common library...
- *
- * MSDN doc states that SMP machines and old compilers require
- * InterLockedExchange to properly read and write interlocked
- * variables, otherwise the processors might reschedule
- * the access and order of atomics access is destroyed...
- * While they only mention it in white-papers, the problem
- * in VS2003 is due to the IA64 arch, so we can still count
- * on the CPU not rescheduling the access to volatile in X86 arch using
- * even the slightly older compiler...
- *
- * So here's (hopefully) a subset of the generally working atomic
- * variable access...
- */
-
-#if defined(__GNUC__)
-# if defined(__i386__) || defined(__x86_64__)
-# define VOLATILE_IN_SEQUENCE 1
-# else
-# define VOLATILE_IN_SEQUENCE 0
-# endif
-#elif defined(_MSC_VER)
-# if _MSC_VER < 1300
-# define VOLATILE_IN_SEQUENCE 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define VOLATILE_IN_SEQUENCE 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define VOLATILE_IN_SEQUENCE 1
-# else
-# define VOLATILE_IN_SEQUENCE 0
-# endif
-# endif
-# endif
-#else
-# define VOLATILE_IN_SEQUENCE 0
-#endif
-
-
-
-/*
* Communication with sys_interrupt
*/
#ifdef ERTS_SMP
-extern erts_smp_atomic_t erts_break_requested;
+extern erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
#else
extern volatile int erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -424,7 +320,7 @@ extern volatile int erts_break_requested;
static erts_mtx_t break_waiter_lock;
static HANDLE break_happened_event;
-static erts_atomic_t break_waiter_state;
+static erts_atomic32_t break_waiter_state;
#define BREAK_WAITER_GOT_BREAK 1
#define BREAK_WAITER_GOT_HALT 2
@@ -467,29 +363,168 @@ do { \
wait_standby(PS); \
} while(0)
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
+#define ERTS_POLL_NOT_WOKEN ((erts_aint32_t) 0)
+#define ERTS_POLL_WOKEN_IO_READY ((erts_aint32_t) 1)
+#define ERTS_POLL_WOKEN_INTR ((erts_aint32_t) 2)
+#define ERTS_POLL_WOKEN_TIMEDOUT ((erts_aint32_t) 3)
static ERTS_INLINE int
-unset_interrupted_chk(ErtsPollSet ps)
+is_io_ready(ErtsPollSet ps)
{
- /* This operation isn't atomic, but we have no need at all for an
- atomic operation here... */
- int res = ps->interrupt;
- ps->interrupt = 0;
- return res;
+ return erts_atomic32_read(&ps->wakeup_state) == ERTS_POLL_WOKEN_IO_READY;
}
+static ERTS_INLINE void
+woke_up(ErtsPollSet ps)
+{
+ if (erts_atomic32_read(&ps->wakeup_state) == ERTS_POLL_NOT_WOKEN)
+ erts_atomic32_cmpxchg(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_TIMEDOUT,
+ ERTS_POLL_NOT_WOKEN);
+#ifdef DEBUG
+ {
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ switch (wakeup_state) {
+ case ERTS_POLL_WOKEN_IO_READY:
+ case ERTS_POLL_WOKEN_INTR:
+ case ERTS_POLL_WOKEN_TIMEDOUT:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
#endif
+}
+
+static ERTS_INLINE int
+wakeup_cause(ErtsPollSet ps)
+{
+ int res;
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ switch (wakeup_state) {
+ case ERTS_POLL_WOKEN_IO_READY:
+ res = 0;
+ break;
+ case ERTS_POLL_WOKEN_INTR:
+ res = EINTR;
+ break;
+ case ERTS_POLL_WOKEN_TIMEDOUT:
+ res = ETIMEDOUT;
+ break;
+ default:
+ res = 0;
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d: Internal error: Invalid wakeup_state=%d\n",
+ __FILE__, __LINE__, (int) wakeup_state);
+ }
+ return res;
+}
+
+static ERTS_INLINE DWORD
+poll_wait_timeout(ErtsPollSet ps, SysTimeval *tvp)
+{
+ time_t timeout = tvp->tv_sec * 1000 + tvp->tv_usec / 1000;
+
+ if (timeout <= 0) {
+ woke_up(ps);
+ return (DWORD) 0;
+ }
+
+ ResetEvent(ps->event_io_ready);
+ /*
+ * Since we don't know the internals of ResetEvent() we issue
+ * a memory barrier as a safety precaution ensuring that
+ * the load of wakeup_state wont be reordered with stores made
+ * by ResetEvent().
+ */
+ ERTS_THR_MEMORY_BARRIER;
+ if (erts_atomic32_read(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN)
+ return (DWORD) 0;
+
+ if (timeout > ERTS_AINT32_T_MAX) /* Also prevents DWORD overflow */
+ timeout = ERTS_AINT32_T_MAX;
+
+ erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout);
+ return (DWORD) timeout;
+}
-#ifdef ERTS_SMP
static ERTS_INLINE void
-wake_poller(ErtsPollSet ps)
+wake_poller(ErtsPollSet ps, int io_ready)
{
- if (!ERTS_POLLSET_SET_POLLER_WOKEN_CHK(ps)) {
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ if (io_ready) {
+ /* We may set the event multiple times. This is, however, harmless. */
+ erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
+ }
+ else {
+ while (wakeup_state != ERTS_POLL_WOKEN_IO_READY
+ && wakeup_state != ERTS_POLL_WOKEN_INTR) {
+ erts_aint32_t act = erts_atomic32_cmpxchg(&ps->wakeup_state,
+ ERTS_POLL_WOKEN_INTR,
+ wakeup_state);
+ if (act == wakeup_state) {
+ wakeup_state = act;
+ break;
+ }
+ wakeup_state = act;
+ }
+ }
+ if (wakeup_state == ERTS_POLL_NOT_WOKEN) {
+ /*
+ * Since we don't know the internals of SetEvent() we issue
+ * a memory barrier as a safety precaution ensuring that
+ * the store we just made to wakeup_state wont be reordered
+ * with loads in SetEvent().
+ */
+ ERTS_THR_MEMORY_BARRIER;
SetEvent(ps->event_io_ready);
}
}
-#endif
+
+static ERTS_INLINE void
+reset_io_ready(ErtsPollSet ps)
+{
+ erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
+}
+
+static ERTS_INLINE void
+restore_io_ready(ErtsPollSet ps)
+{
+ erts_atomic32_set(&ps->wakeup_state, ERTS_POLL_WOKEN_IO_READY);
+}
+
+/*
+ * notify_io_ready() is used by threads waiting for events, when
+ * notifying a poller thread about I/O ready.
+ */
+static ERTS_INLINE void
+notify_io_ready(ErtsPollSet ps)
+{
+ wake_poller(ps, 1);
+}
+
+static ERTS_INLINE void
+reset_interrupt(ErtsPollSet ps)
+{
+ /* We need to keep io-ready if set */
+ erts_aint32_t wakeup_state = erts_atomic32_read(&ps->wakeup_state);
+ while (wakeup_state != ERTS_POLL_WOKEN_IO_READY
+ && wakeup_state != ERTS_POLL_NOT_WOKEN) {
+ erts_aint32_t act = erts_atomic32_cmpxchg(&ps->wakeup_state,
+ ERTS_POLL_NOT_WOKEN,
+ wakeup_state);
+ if (wakeup_state == act)
+ break;
+ wakeup_state = act;
+ }
+}
+
+static ERTS_INLINE void
+set_interrupt(ErtsPollSet ps)
+{
+ wake_poller(ps, 0);
+}
static void setup_standby_wait(ErtsPollSet ps, int num_threads)
{
@@ -653,14 +688,14 @@ static void *break_waiter(void *param)
case WAIT_OBJECT_0:
ResetEvent(harr[0]);
erts_mtx_lock(&break_waiter_lock);
- erts_atomic_set(&break_waiter_state,BREAK_WAITER_GOT_BREAK);
+ erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_BREAK);
SetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
break;
case (WAIT_OBJECT_0+1):
ResetEvent(harr[1]);
erts_mtx_lock(&break_waiter_lock);
- erts_atomic_set(&break_waiter_state,BREAK_WAITER_GOT_HALT);
+ erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_HALT);
SetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
break;
@@ -767,12 +802,7 @@ event_happened:
consistency_check(w);
#endif
ASSERT(WAIT_OBJECT_0 < i && i < WAIT_OBJECT_0+w->active_events);
- if (!erts_atomic_xchg(&ps->sys_io_ready,1)) {
- HARDDEBUGF(("SET EventIoReady (%d)",erts_atomic_read(&ps->sys_io_ready)));
- SetEvent(ps->event_io_ready);
- } else {
- HARDDEBUGF(("DONT SET EventIoReady"));
- }
+ notify_io_ready(ps);
/*
* The main thread wont start working on our arrays untill we're
@@ -967,15 +997,10 @@ static int cancel_driver_select(ErtsPollSet ps, HANDLE event)
void erts_poll_interrupt(ErtsPollSet ps, int set /* bool */)
{
HARDTRACEF(("In erts_poll_interrupt(%d)",set));
-#ifdef ERTS_SMP
- if (set) {
- ERTS_POLLSET_SET_INTERRUPTED(ps);
- wake_poller(ps);
- }
- else {
- ERTS_POLLSET_UNSET_INTERRUPTED(ps);
- }
-#endif
+ if (!set)
+ reset_interrupt(ps);
+ else
+ set_interrupt(ps);
HARDTRACEF(("Out erts_poll_interrupt(%d)",set));
}
@@ -984,17 +1009,10 @@ void erts_poll_interrupt_timed(ErtsPollSet ps,
long msec)
{
HARDTRACEF(("In erts_poll_interrupt_timed(%d,%ld)",set,msec));
-#ifdef ERTS_SMP
- if (set) {
- if (erts_smp_atomic_read(&ps->timeout) > (erts_aint_t) msec) {
- ERTS_POLLSET_SET_INTERRUPTED(ps);
- wake_poller(ps);
- }
- }
- else {
- ERTS_POLLSET_UNSET_INTERRUPTED(ps);
- }
-#endif
+ if (!set)
+ reset_interrupt(ps);
+ else if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec)
+ set_interrupt(ps);
HARDTRACEF(("Out erts_poll_interrupt_timed"));
}
@@ -1068,10 +1086,8 @@ void erts_poll_controlv(ErtsPollSet ps,
int erts_poll_wait(ErtsPollSet ps,
ErtsPollResFd pr[],
int *len,
- SysTimeval *utvp)
+ SysTimeval *tvp)
{
- SysTimeval *tvp = utvp;
- SysTimeval itv;
int no_fds;
DWORD timeout;
EventData* ev;
@@ -1084,7 +1100,7 @@ int erts_poll_wait(ErtsPollSet ps,
HARDTRACEF(("In erts_poll_wait"));
ERTS_POLLSET_LOCK(ps);
- if (!erts_atomic_read(&ps->sys_io_ready) && ps->restore_events) {
+ if (!is_io_ready(ps) && ps->restore_events) {
HARDDEBUGF(("Restore events: %d",ps->num_waiters));
ps->restore_events = 0;
for (i = 0; i < ps->num_waiters; ++i) {
@@ -1102,7 +1118,7 @@ int erts_poll_wait(ErtsPollSet ps,
if (w->highwater != w->active_events) {
HARDDEBUGF(("Oups!"));
/* Oups, got signalled before we took the lock, can't reset */
- if(erts_atomic_read(&ps->sys_io_ready) == 0) {
+ if(!is_io_ready(ps)) {
erl_exit(1,"Internal error: "
"Inconsistent io structures in erl_poll.\n");
}
@@ -1127,39 +1143,27 @@ int erts_poll_wait(ErtsPollSet ps,
no_fds = ERTS_POLL_MAX_RES;
#endif
+ timeout = poll_wait_timeout(ps, tvp);
- ResetEvent(ps->event_io_ready);
- ERTS_POLLSET_UNSET_POLLER_WOKEN(ps);
-
-#ifdef ERTS_SMP
- if (ERTS_POLLSET_IS_INTERRUPTED(ps)) {
- /* Interrupt use zero timeout */
- itv.tv_sec = 0;
- itv.tv_usec = 0;
- tvp = &itv;
- }
-#endif
-
- timeout = tvp->tv_sec * 1000 + tvp->tv_usec / 1000;
/*HARDDEBUGF(("timeout = %ld",(long) timeout));*/
- erts_smp_atomic_set(&ps->timeout, timeout);
- if (timeout > 0 && ! erts_atomic_read(&ps->sys_io_ready) && ! erts_atomic_read(&break_waiter_state)) {
+ if (timeout > 0 && !erts_atomic32_read(&break_waiter_state)) {
HANDLE harr[2] = {ps->event_io_ready, break_happened_event};
int num_h = 2;
- HARDDEBUGF(("Start waiting %d [%d]",num_h, (long) timeout));
+ HARDDEBUGF(("Start waiting %d [%d]",num_h, (int) timeout));
ERTS_POLLSET_UNLOCK(ps);
WaitForMultipleObjects(num_h, harr, FALSE, timeout);
ERTS_POLLSET_LOCK(ps);
- HARDDEBUGF(("Stop waiting %d [%d]",num_h, (long) timeout));
+ HARDDEBUGF(("Stop waiting %d [%d]",num_h, (int) timeout));
+ woke_up(ps);
}
ERTS_UNSET_BREAK_REQUESTED;
- if(erts_atomic_read(&break_waiter_state)) {
+ if(erts_atomic32_read(&break_waiter_state)) {
erts_mtx_lock(&break_waiter_lock);
- break_state = erts_atomic_read(&break_waiter_state);
- erts_atomic_set(&break_waiter_state,0);
+ break_state = erts_atomic32_read(&break_waiter_state);
+ erts_atomic32_set(&break_waiter_state,0);
ResetEvent(break_happened_event);
erts_mtx_unlock(&break_waiter_lock);
switch (break_state) {
@@ -1174,15 +1178,13 @@ int erts_poll_wait(ErtsPollSet ps,
}
}
- ERTS_POLLSET_SET_POLLER_WOKEN(ps);
-
- if (!erts_atomic_read(&ps->sys_io_ready)) {
- res = EINTR;
- HARDDEBUGF(("EINTR!"));
- goto done;
+ res = wakeup_cause(ps);
+ if (res != 0) {
+ HARDDEBUGF(("%s!", res == EINTR ? "EINTR" : "ETIMEDOUT"));
+ goto done;
}
- erts_atomic_set(&ps->sys_io_ready,0);
+ reset_io_ready(ps);
n = ps->num_waiters;
@@ -1204,9 +1206,9 @@ int erts_poll_wait(ErtsPollSet ps,
if (num >= no_fds) {
w->highwater=j+1;
erts_mtx_unlock(&w->mtx);
- /* This might mean we still have data to report, set
- back the global flag! */
- erts_atomic_set(&ps->sys_io_ready,1);
+ /* This might mean we still have data to report,
+ restore flag indicating I/O ready! */
+ restore_io_ready(ps);
HARDDEBUGF(("To many FD's to report!"));
goto done;
}
@@ -1228,7 +1230,7 @@ int erts_poll_wait(ErtsPollSet ps,
erts_mtx_unlock(&w->mtx);
}
done:
- erts_smp_atomic_set(&ps->timeout, ERTS_AINT_T_MAX);
+ erts_smp_atomic32_set(&ps->timeout, ERTS_AINT32_T_MAX);
*len = num;
ERTS_POLLSET_UNLOCK(ps);
HARDTRACEF(("Out erts_poll_wait"));
@@ -1306,15 +1308,13 @@ ErtsPollSet erts_poll_create_pollset(void)
ps->standby_wait_counter = 0;
ps->event_io_ready = CreateManualEvent(FALSE);
ps->standby_wait_event = CreateManualEvent(FALSE);
- erts_atomic_init(&ps->sys_io_ready,0);
ps->restore_events = 0;
+ erts_atomic32_init(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
#ifdef ERTS_SMP
- erts_smp_atomic_init(&ps->woken, 0);
erts_smp_mtx_init(&ps->mtx, "pollset");
- erts_smp_atomic_init(&ps->interrupt, 0);
#endif
- erts_smp_atomic_init(&ps->timeout, ERTS_AINT_T_MAX);
+ erts_smp_atomic32_init(&ps->timeout, ERTS_AINT32_T_MAX);
HARDTRACEF(("Out erts_poll_create_pollset"));
return ps;
@@ -1366,7 +1366,7 @@ void erts_poll_init(void)
erts_mtx_init(&break_waiter_lock,"break_waiter_lock");
break_happened_event = CreateManualEvent(FALSE);
- erts_atomic_init(&break_waiter_state, 0);
+ erts_atomic32_init(&break_waiter_state, 0);
erts_thr_create(&thread, &break_waiter, NULL, NULL);
ERTS_UNSET_BREAK_REQUESTED;
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c
index 262f84babc..943c338794 100644
--- a/erts/emulator/sys/win32/sys_interrupt.c
+++ b/erts/emulator/sys/win32/sys_interrupt.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -31,11 +31,11 @@
#endif
#ifdef ERTS_SMP
-erts_smp_atomic_t erts_break_requested;
+erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
+ erts_smp_atomic32_set(&erts_break_requested, (erts_aint32_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
diff --git a/erts/emulator/utils/loaded b/erts/emulator/utils/loaded
new file mode 100644
index 0000000000..99a66e7fdb
--- /dev/null
+++ b/erts/emulator/utils/loaded
@@ -0,0 +1,44 @@
+%% -*- erlang -*-
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%% Run like:
+%% $ERL_TOP/bin/escript erts/emulator/utils/loaded
+
+-mode(compile).
+
+main(_) ->
+ LibDir = code:lib_dir(),
+ io:format("Library root is ~s\n", [LibDir]),
+ Wc = filename:join(LibDir, "*/ebin/*.beam"),
+ Beams = filelib:wildcard(Wc),
+ BeamFileSize = lists:sum([filelib:file_size(Beam) || Beam <- Beams]),
+ io:format("~w BEAM files containing ~w bytes\n",
+ [length(Beams),BeamFileSize]),
+ Ms = [list_to_atom(filename:rootname(filename:basename(Beam))) ||
+ Beam <- Beams],
+ [{module,_} = code:ensure_loaded(M) || M <- Ms],
+ <<"Current code: ",T/binary>> = erlang:system_info(loaded),
+ Digits = grab_digits(T),
+ io:format("~w modules comprising ~s words when loaded\n",
+ [length(Ms),Digits]).
+
+grab_digits(<<H,T/binary>>) when $0 =< H, H =< $9 ->
+ [H|grab_digits(T)];
+grab_digits(<<$\n,_/binary>>) -> [].
diff --git a/lib/compiler/src/v3_kernel_pp.erl b/lib/compiler/src/v3_kernel_pp.erl
index 9bd13f7032..e363a5387a 100644
--- a/lib/compiler/src/v3_kernel_pp.erl
+++ b/lib/compiler/src/v3_kernel_pp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -20,10 +20,12 @@
-module(v3_kernel_pp).
--include("v3_kernel.hrl").
-
-export([format/1]).
+%%-define(INCLUDE_ANNOTATIONS, 1).
+
+-include("v3_kernel.hrl").
+
%% These are "internal" structures in sys_kernel which are here for
%% debugging purposes.
-record(iset, {anno=[],vars,arg,body}).
@@ -62,22 +64,21 @@ format(Node, Ctxt) ->
end)
end.
-format_anno(Anno, Ctxt0, ObjFun) ->
- case annotations_enabled() of
- true ->
- Ctxt1 = ctxt_bump_indent(Ctxt0, 1),
- ["( ",
- ObjFun(Ctxt0),
- nl_indent(Ctxt1),
- "-| ",io_lib:write(Anno),
- " )"];
- false ->
- ObjFun(Ctxt0)
- end.
-%% By default, don't show annotations since they clutter up the output.
-annotations_enabled() ->
- false.
+-ifndef(INCLUDE_ANNOTATIONS).
+%% Don't include annotations (for readability).
+format_anno(_Anno, Ctxt, ObjFun) ->
+ ObjFun(Ctxt).
+-else.
+%% Include annotations (for debugging of annotations).
+format_anno(Anno, Ctxt0, ObjFun) ->
+ Ctxt1 = ctxt_bump_indent(Ctxt0, 1),
+ ["( ",
+ ObjFun(Ctxt0),
+ nl_indent(Ctxt1),
+ "-| ",io_lib:write(Anno),
+ " )"].
+-endif.
%% format_1(Kexpr, Context) -> string().
diff --git a/lib/kernel/src/inet6_tcp_dist.erl b/lib/kernel/src/inet6_tcp_dist.erl
index fab00bbb9f..b9c4fa607c 100644
--- a/lib/kernel/src/inet6_tcp_dist.erl
+++ b/lib/kernel/src/inet6_tcp_dist.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -162,8 +162,8 @@ do_accept(Kernel, AcceptPid, Socket, MyNode, Allowed, SetupTime) ->
inet:getll(S)
end,
f_address = fun get_remote_id/2,
- mf_tick = {?MODULE, tick},
- mf_getstat = {?MODULE,getstat}
+ mf_tick = fun ?MODULE:tick/1,
+ mf_getstat = fun ?MODULE:getstat/1
},
dist_util:handshake_other_started(HSData);
{false,IP} ->
diff --git a/lib/stdlib/test/supervisor_SUITE.erl b/lib/stdlib/test/supervisor_SUITE.erl
index 8aed93ce12..6e927da2ab 100644
--- a/lib/stdlib/test/supervisor_SUITE.erl
+++ b/lib/stdlib/test/supervisor_SUITE.erl
@@ -1310,7 +1310,7 @@ count_children_memory(Config) when is_list(Config) ->
%% count_children consumes memory using an accumulator function,
%% but the space can be reclaimed incrementally,
- %% which_children may generate garbage that will reclaimed later.
+ %% which_children may generate garbage that will be reclaimed later.
case (Size5 =< Size4) of
true -> ok;
false ->
@@ -1338,8 +1338,8 @@ count_children_allocator_test(MemoryState) ->
lists:all(fun(State) -> State == {e, true} end, AllocStates).
%-------------------------------------------------------------------------
do_not_save_start_parameters_for_temporary_children(doc) ->
- ["Temporary children shall not be restarted so they should not"
- "save start parameters, as it potentially can"
+ ["Temporary children shall not be restarted so they should not "
+ "save start parameters, as it potentially can "
"take up a huge amount of memory for no purpose."];
do_not_save_start_parameters_for_temporary_children(suite) ->
[];