aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--erts/doc/src/erlang.xml8
-rw-r--r--erts/doc/src/notes.xml7
-rw-r--r--erts/emulator/beam/beam_emu.c230
-rw-r--r--erts/emulator/beam/erl_bif_info.c4
-rw-r--r--erts/emulator/beam/erl_gc.c39
-rw-r--r--erts/emulator/beam/erl_hl_timer.c2
-rw-r--r--erts/emulator/beam/erl_lock_check.c1
-rw-r--r--erts/emulator/beam/erl_process.c229
-rw-r--r--erts/emulator/beam/erl_process.h2
-rw-r--r--erts/emulator/test/bif_SUITE.erl173
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl6
-rw-r--r--erts/emulator/test/process_SUITE.erl11
-rw-r--r--erts/etc/common/heart.c40
-rw-r--r--erts/etc/unix/etp-commands.in12
-rw-r--r--lib/inets/src/http_client/httpc.erl92
-rw-r--r--lib/inets/src/http_client/httpc_handler.erl674
-rw-r--r--lib/inets/test/httpc_SUITE.erl58
-rw-r--r--lib/kernel/doc/src/heart.xml17
-rw-r--r--lib/kernel/src/heart.erl15
-rw-r--r--lib/observer/src/crashdump_viewer.erl21
-rw-r--r--lib/observer/src/etop.erl24
-rw-r--r--lib/observer/src/etop_txt.erl24
-rw-r--r--lib/observer/src/observer_lib.erl14
-rw-r--r--lib/observer/src/observer_pro_wx.erl8
-rw-r--r--lib/runtime_tools/src/observer_backend.erl7
-rwxr-xr-xlib/sasl/test/release_handler_SUITE_data/start3
-rwxr-xr-xlib/sasl/test/release_handler_SUITE_data/start_client3
-rw-r--r--lib/stdlib/test/shell_SUITE.erl2
-rw-r--r--system/doc/efficiency_guide/advanced.xml12
-rw-r--r--system/doc/tutorial/c_portdriver.xmlsrc4
30 files changed, 992 insertions, 750 deletions
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 9646953518..112682d713 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -4899,7 +4899,9 @@ RealSystem = system + MissedSystem</code>
<p>Returns the current call stack back-trace (<em>stacktrace</em>)
of the process. The stack has the same format as returned by
<seealso marker="#get_stacktrace/0">
- <c>erlang:get_stacktrace/0</c></seealso>.</p>
+ <c>erlang:get_stacktrace/0</c></seealso>. The depth of the
+ stacktrace is truncated according to the <c>backtrace_depth</c>
+ system flag setting.</p>
</item>
<tag><c>{dictionary, <anno>Dictionary</anno>}</c></tag>
<item>
@@ -6611,7 +6613,9 @@ ok
<fsummary>Set system flag <c>backtrace_depth</c>.</fsummary>
<desc>
<p>Sets the maximum depth of call stack back-traces in the
- exit reason element of <c>'EXIT'</c> tuples.</p>
+ exit reason element of <c>'EXIT'</c> tuples. The flag
+ also limits the stacktrace depth returned by <c>process_info</c>
+ item <c>current_stacktrace.</c></p>
<p>Returns the old value of the flag.</p>
</desc>
</func>
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index da6190b685..11777f0014 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -145,13 +145,6 @@
<p>
Own Id: OTP-14051</p>
</item>
- <item>
- <p>
- Fixed a number of bugs that caused faulty stack-traces to
- be created on exception when a process was traced.</p>
- <p>
- Own Id: OTP-14055</p>
- </item>
</list>
</section>
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 59a9ea1417..66bccedd94 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1047,9 +1047,11 @@ static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
Eterm* reg, BifFunction bf) NOINLINE;
static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
Eterm* reg, Eterm func) NOINLINE;
-static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
+static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* apply(Process* p, Eterm module, Eterm function,
- Eterm args, Eterm* reg) NOINLINE;
+ Eterm args, Eterm* reg,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* call_fun(Process* p, int arity,
Eterm* reg, Eterm args) NOINLINE;
static BeamInstr* apply_fun(Process* p, Eterm fun,
@@ -3194,7 +3196,7 @@ do { \
OpCase(i_apply): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, NULL, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, I+1);
@@ -3208,7 +3210,7 @@ do { \
OpCase(i_apply_last_P): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, I, Arg(0));
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, (BeamInstr *) E[0]);
@@ -3223,7 +3225,7 @@ do { \
OpCase(i_apply_only): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, I, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_I(next);
@@ -3237,7 +3239,7 @@ do { \
BeamInstr *next;
HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
+ next = fixed_apply(c_p, reg, Arg(0), NULL, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, I+2);
@@ -3252,7 +3254,7 @@ do { \
BeamInstr *next;
HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
+ next = fixed_apply(c_p, reg, Arg(0), I, Arg(1));
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, (BeamInstr *) E[0]);
@@ -5844,12 +5846,13 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
s->depth = 0;
/*
- * If the failure was in a BIF other than 'error', 'exit' or
- * 'throw', find the bif-table index and save the argument
- * registers by consing up an arglist.
+ * If the failure was in a BIF other than 'error/1', 'error/2',
+ * 'exit/1' or 'throw/1', find the bif-table index and save the
+ * argument registers by consing up an arglist.
*/
- if (bf != NULL && bf != error_1 && bf != error_2 &&
- bf != exit_1 && bf != throw_1) {
+ if (bf != NULL && bf != error_1 && bf != error_2 && bf != exit_1
+ && bf != throw_1 && bf != wrap_error_1 && bf != wrap_error_2
+ && bf != wrap_exit_1 && bf != wrap_throw_1) {
int i;
int a = 0;
for (i = 0; i < BIF_SIZE; i++) {
@@ -5945,12 +5948,30 @@ erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
p->cp) {
/* Cannot follow cp here - code may be unloaded */
BeamInstr *cpp = p->cp;
+ int trace_cp;
if (cpp == beam_exception_trace || cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
+ trace_cp = 1;
} else if (cpp == beam_return_to_trace) {
/* Skip return_to_trace parameters */
ptr += 1;
+ trace_cp = 1;
+ }
+ else {
+ trace_cp = 0;
+ }
+ if (trace_cp && s->pc == cpp) {
+ /*
+ * If process 'cp' points to a return/exception trace
+ * instruction and 'cp' has been saved as 'pc' in
+ * stacktrace, we need to update 'pc' in stacktrace
+ * with the actual 'cp' located on the top of the
+ * stack; otherwise, we will lose the top stackframe
+ * when building the stack trace.
+ */
+ ASSERT(is_CP(p->stop[0]));
+ s->pc = cp_val(p->stop[0]);
}
}
while (ptr < STACK_START(p) && depth > 0) {
@@ -6070,12 +6091,15 @@ build_stacktrace(Process* c_p, Eterm exc) {
erts_set_current_function(&fi, s->current);
}
+ depth = s->depth;
/*
- * If fi.current is still NULL, default to the initial function
+ * If fi.current is still NULL, and we have no
+ * stack at all, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
if (fi.current == NULL) {
- erts_set_current_function(&fi, c_p->u.initial);
+ if (depth <= 0)
+ erts_set_current_function(&fi, c_p->u.initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
@@ -6085,10 +6109,9 @@ build_stacktrace(Process* c_p, Eterm exc) {
* Look up all saved continuation pointers and calculate
* needed heap space.
*/
- depth = s->depth;
stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
depth*sizeof(FunctionInfo));
- heap_size = fi.needed + 2;
+ heap_size = fi.current ? fi.needed + 2 : 0;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
if (stkp->current) {
@@ -6107,8 +6130,10 @@ build_stacktrace(Process* c_p, Eterm exc) {
res = CONS(hp, mfa, res);
hp += 2;
}
- hp = erts_build_mfa_item(&fi, hp, args, &mfa);
- res = CONS(hp, mfa, res);
+ if (fi.current) {
+ hp = erts_build_mfa_item(&fi, hp, args, &mfa);
+ res = CONS(hp, mfa, res);
+ }
erts_free(ERTS_ALC_T_TMP, (void *) stk);
return res;
@@ -6205,8 +6230,107 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity,
return ep;
}
+static ERTS_INLINE void
+apply_bif_error_adjustment(Process *p, Export *ep,
+ Eterm *reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
+{
+ /*
+ * I is only set when the apply is a tail call, i.e.,
+ * from the instructions i_apply_only, i_apply_last_P,
+ * and apply_last_IP.
+ */
+ if (I
+ && ep->code[3] == (BeamInstr) em_apply_bif
+ && (ep == bif_export[BIF_error_1]
+ || ep == bif_export[BIF_error_2]
+ || ep == bif_export[BIF_exit_1]
+ || ep == bif_export[BIF_throw_1])) {
+ /*
+ * We are about to tail apply one of the BIFs
+ * erlang:error/1, erlang:error/2, erlang:exit/1,
+ * or erlang:throw/1. Error handling of these BIFs is
+ * special!
+ *
+ * We need 'p->cp' to point into the calling
+ * function when handling the error after the BIF has
+ * been applied. This in order to get the topmost
+ * stackframe correct. Without the following adjustment,
+ * 'p->cp' will point into the function that called
+ * current function when handling the error. We add a
+ * dummy stackframe in order to achive this.
+ *
+ * Note that these BIFs unconditionally will cause
+ * an exception to be raised. That is, our modifications
+ * of 'p->cp' as well as the stack will be corrected by
+ * the error handling code.
+ *
+ * If we find an exception/return-to trace continuation
+ * pointer as the topmost continuation pointer, we do not
+ * need to do anything since the information already will
+ * be available for generation of the stacktrace.
+ */
+ int apply_only = stack_offset == 0;
+ BeamInstr *cpp;
+
+ if (apply_only) {
+ ASSERT(p->cp != NULL);
+ cpp = p->cp;
+ }
+ else {
+ ASSERT(is_CP(p->stop[0]));
+ cpp = cp_val(p->stop[0]);
+ }
+
+ if (cpp != beam_exception_trace
+ && cpp != beam_return_trace
+ && cpp != beam_return_to_trace) {
+ Uint need = stack_offset /* bytes */ / sizeof(Eterm);
+ if (need == 0)
+ need = 1; /* i_apply_only */
+ if (p->stop - p->htop < need)
+ erts_garbage_collect(p, (int) need, reg, arity+1);
+ p->stop -= need;
+
+ if (apply_only) {
+ /*
+ * Called from the i_apply_only instruction.
+ *
+ * 'p->cp' contains continuation pointer pointing
+ * into the function that called current function.
+ * We push that continuation pointer onto the stack,
+ * and set 'p->cp' to point into current function.
+ */
+
+ p->stop[0] = make_cp(p->cp);
+ p->cp = I;
+ }
+ else {
+ /*
+ * Called from an i_apply_last_p, or apply_last_IP,
+ * instruction.
+ *
+ * Calling instruction will after we return read
+ * a continuation pointer from the stack and write
+ * it to 'p->cp', and then remove the topmost
+ * stackframe of size 'stack_offset'.
+ *
+ * We have sized the dummy-stackframe so that it
+ * will be removed by the instruction we currently
+ * are executing, and leave the stackframe that
+ * normally would have been removed intact.
+ *
+ */
+ p->stop[0] = make_cp(I);
+ }
+ }
+ }
+}
+
static BeamInstr*
-apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+apply(
+Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg,
+BeamInstr *I, Uint stack_offset)
{
int arity;
Export* ep;
@@ -6231,21 +6355,54 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
return 0;
}
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- this = THE_NON_VALUE;
- if (is_not_atom(module)) {
- Eterm* tp;
+ while (1) {
+ Eterm m, f, a;
+ /* The module argument may be either an atom or an abstract module
+ * (currently implemented using tuples, but this might change).
+ */
+ this = THE_NON_VALUE;
+ if (is_not_atom(module)) {
+ Eterm* tp;
+
+ if (is_not_tuple(module)) goto error;
+ tp = tuple_val(module);
+ if (arityval(tp[0]) < 1) goto error;
+ this = module;
+ module = tp[1];
+ if (is_not_atom(module)) goto error;
+ }
- if (is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- this = module;
- module = tp[1];
- if (is_not_atom(module)) goto error;
+ if (module != am_erlang || function != am_apply)
+ break;
+
+ /* Adjust for multiple apply of apply/3... */
+
+ a = args;
+ if (is_list(a)) {
+ Eterm *consp = list_val(a);
+ m = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ f = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ a = CAR(consp);
+ if (is_nil(CDR(consp))) {
+ /* erlang:apply/3 */
+ module = m;
+ function = f;
+ args = a;
+ if (is_not_atom(f))
+ goto error;
+ continue;
+ }
+ }
+ }
+ }
+ break; /* != erlang:apply/3 */
}
-
/*
* Walk down the 3rd parameter of apply (the argument list) and copy
* the parameters to the x registers (reg[]). If the module argument
@@ -6283,12 +6440,14 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
static BeamInstr*
-fixed_apply(Process* p, Eterm* reg, Uint arity)
+fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
{
Export* ep;
Eterm module;
@@ -6318,6 +6477,10 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
if (is_not_atom(module)) goto error;
++arity;
}
+
+ /* Handle apply of apply/3... */
+ if (module == am_erlang && function == am_apply && arity == 3)
+ return apply(p, reg[0], reg[1], reg[2], reg, I, stack_offset);
/*
* Get the index into the export table, or failing that the export
@@ -6332,6 +6495,7 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 735aabbee3..88a052cad7 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1672,11 +1672,11 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
Eterm mfa;
Eterm res = NIL;
- depth = 8;
+ depth = erts_backtrace_depth;
sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
s->depth = 0;
- if (rp->i) {
+ if (depth > 0 && rp->i) {
s->trace[s->depth++] = rp->i;
depth--;
}
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index af799d09da..cb48b31b7e 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -176,6 +176,13 @@ typedef struct {
erts_smp_atomic32_t refc;
} ErtsGCInfoReq;
+#ifdef ERTS_DIRTY_SCHEDULERS
+static struct {
+ erts_mtx_t mtx;
+ ErtsGCInfo info;
+} dirty_gc;
+#endif
+
static ERTS_INLINE int
gc_cost(Uint gc_moved_live_words, Uint resize_moved_words)
{
@@ -259,6 +266,11 @@ erts_init_gc(void)
init_gc_info(&esdp->gc_info);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info");
+ init_gc_info(&dirty_gc.info);
+#endif
+
init_gcireq_alloc();
}
@@ -735,8 +747,19 @@ do_major_collection:
monitor_large_heap(p);
}
- esdp->gc_info.garbage_cols++;
- esdp->gc_info.reclaimed += reclaimed_now;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ dirty_gc.info.garbage_cols++;
+ dirty_gc.info.reclaimed += reclaimed_now;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+ else
+#endif
+ {
+ esdp->gc_info.garbage_cols++;
+ esdp->gc_info.reclaimed += reclaimed_now;
+ }
FLAGS(p) &= ~F_FORCE_GC;
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
@@ -3017,6 +3040,18 @@ reply_gc_info(void *vgcirp)
reclaimed = esdp->gc_info.reclaimed;
garbage_cols = esdp->gc_info.garbage_cols;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /*
+ * Add dirty schedulers info on requesting
+ * schedulers info
+ */
+ if (gcirp->req_sched == esdp->no) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ reclaimed += dirty_gc.info.reclaimed;
+ garbage_cols += dirty_gc.info.garbage_cols;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+#endif
sz = 0;
hpp = NULL;
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index d29d079fc5..38bebc7576 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -2666,7 +2666,7 @@ erts_start_timer_callback(ErtsMonotonicTime tmo,
tmo);
twt = tmo < ERTS_TIMER_WHEEL_MSEC;
- if (esdp)
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
start_callback_timer(esdp,
twt,
timeout_pos,
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 06266363b5..13a4b2cd93 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -129,6 +129,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "run_queue", "address" },
#ifdef ERTS_DIRTY_SCHEDULERS
{ "dirty_run_queue_sleep_list", "address" },
+ { "dirty_gc_info", NULL },
#endif
{ "process_table", NULL },
{ "cpu_info", NULL },
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index b345c35a7e..cd0a2b2a21 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -2060,6 +2060,7 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
#endif
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
+
if (!erts_thr_progress_has_reached_this(current, lop->later))
return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
awdp->later_op.first = lop->next;
@@ -6234,6 +6235,12 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
int queue;
erts_aint32_t dact, max_qbit;
+ /* Do not enqueue free process... */
+ if (actual & ERTS_PSFLG_FREE) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NOT;
+ }
+
/* Termination should be done on an ordinary scheduler */
if ((*newp) & ERTS_PSFLG_EXITING) {
*newp &= ~ERTS_PSFLGS_DIRTY_WORK;
@@ -6426,6 +6433,9 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
/*
* schedule_out_process() return with c_rq locked.
+ *
+ * Return non-zero value if caller should decrease
+ * reference count on the process when done with it...
*/
static ERTS_INLINE int
schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
@@ -6480,12 +6490,18 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 0;
-
+#if !defined(ERTS_SMP)
+ /* Decrement refc if process struct is free... */
+ return !!(n & ERTS_PSFLG_FREE);
+#else
+ /* Decrement refc if scheduled out from dirty scheduler... */
+ return !is_normal_sched;
+#endif
}
else {
Process* sched_p;
+ ASSERT(!(n & ERTS_PSFLG_FREE));
ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
@@ -6501,11 +6517,14 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(runq);
+ if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */
+
/* Enqueue the process */
enqueue_process(runq, (int) enq_prio, sched_p);
if (runq == c_rq)
- return 1;
+ return 0;
erts_smp_runq_unlock(runq);
@@ -6513,9 +6532,15 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 1;
+ /*
+ * Decrement refc if process is scheduled out by a
+ * dirty scheduler, and we have not just scheduled
+ * the process using the ordinary process struct
+ * on a dirty run-queue again...
+ */
+ return !is_normal_sched && (sched_p != p
+ || !ERTS_RUNQ_IX_IS_DIRTY(runq->ix));
}
-
}
static ERTS_INLINE void
@@ -6530,8 +6555,17 @@ add2runq(int enqueue, erts_aint32_t prio,
if (runq) {
Process *sched_p;
- if (enqueue > 0)
+ if (enqueue > 0) {
sched_p = proc;
+ /*
+ * Refc on process struct (i.e. true struct,
+ * not proxy-struct) increased while in a
+ * dirty run-queue or executing on a dirty
+ * scheduler.
+ */
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(proc);
+ }
else {
Process *pxy;
@@ -7055,12 +7089,18 @@ typedef struct {
} ErtsSchdlrSspndResume;
static void
-schdlr_sspnd_resume_proc(Eterm pid)
+schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid)
{
- Process *p = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_STATUS);
+ Process *p;
+ p = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_STATUS,
+ (sched_type != ERTS_SCHED_NORMAL
+ ? ERTS_P2P_FLG_INC_REFC
+ : 0));
if (p) {
resume_process(p, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ erts_proc_dec_refc(p);
}
}
@@ -7069,17 +7109,20 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
ErtsSchdlrSspndResume *resume)
{
if (is_internal_pid(resume->onln.chngr)) {
- schdlr_sspnd_resume_proc(resume->onln.chngr);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.chngr);
resume->onln.chngr = NIL;
}
if (is_internal_pid(resume->onln.nxt)) {
- schdlr_sspnd_resume_proc(resume->onln.nxt);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.nxt);
resume->onln.nxt = NIL;
}
while (resume->msb.chngrs) {
ErtsProcList *plp = resume->msb.chngrs;
resume->msb.chngrs = plp->next;
- schdlr_sspnd_resume_proc(plp->pid);
+ schdlr_sspnd_resume_proc(sched_type,
+ plp->pid);
proclist_destroy(plp);
}
}
@@ -7131,16 +7174,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1);
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- }
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
+ if (sched_type != ERTS_SCHED_NORMAL)
erts_smp_runq_unlock(esdp->run_queue);
- }
else {
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7153,8 +7188,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
sched_wall_time_change(esdp, 0);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
@@ -7198,15 +7233,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
(void) erts_proclist_fetch(&msb[i]->chngq, &end_plp);
/* resume processes that initiated the multi scheduling block... */
plp = msb[i]->chngq;
- while (plp) {
- erts_proclist_store_last(&msb[i]->blckrs,
- proclist_copy(plp));
- plp = plp->next;
- }
- if (end_plp)
+ if (plp) {
+ ASSERT(end_plp);
+ ASSERT(msb[i]->ongoing);
+ do {
+ erts_proclist_store_last(&msb[i]->blckrs,
+ proclist_copy(plp));
+ plp = plp->next;
+ } while (plp);
end_plp->next = resume.msb.chngrs;
- resume.msb.chngrs = msb[i]->chngq;
- msb[i]->chngq = NULL;
+ resume.msb.chngrs = msb[i]->chngq;
+ msb[i]->chngq = NULL;
+ }
}
}
}
@@ -7270,24 +7308,14 @@ suspend_scheduler(ErtsSchedulerData *esdp)
while (1) {
ErtsMonotonicTime current_time;
- erts_aint32_t qmask;
erts_aint32_t flgs;
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
-
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (qmask) {
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
+ if (sched_type != ERTS_SCHED_NORMAL)
aux_work = 0;
- }
else {
+ erts_aint32_t qmask;
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
@@ -7415,12 +7443,23 @@ suspend_scheduler(ErtsSchedulerData *esdp)
schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
- && schdlr_sspnd.online == schdlr_sspnd.active) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- }
-
+ if (changing) {
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && !schdlr_sspnd.msb.ongoing
+ && schdlr_sspnd.online == schdlr_sspnd.active) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB)
+ && !schdlr_sspnd.nmsb.ongoing
+ && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL)
+ == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL))) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_NMSB);
+ }
+ }
ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
ASSERT((sched_type == ERTS_SCHED_NORMAL
? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
@@ -7553,7 +7592,7 @@ abort_sched_onln_chng_waitq(Process *p)
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
if (is_internal_pid(resume))
- schdlr_sspnd_resume_proc(resume);
+ schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume);
}
ErtsSchedSuspendResult
@@ -7951,21 +7990,20 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
else { /* ------ UNBLOCK ------ */
if (p->flags & have_blckd_flg) {
- ErtsProcList *plps[2];
+ ErtsProcList **plpps[3] = {0};
ErtsProcList *plp;
- int limit = 0;
- plps[limit++] = erts_proclist_peek_first(msbp->blckrs);
- if (all)
- plps[limit++] = erts_proclist_peek_first(msbp->chngq);
+ plpps[0] = &msbp->blckrs;
+ if (all)
+ plpps[1] = &msbp->chngq;
- for (ix = 0; ix < limit; ix++) {
- plp = plps[ix];
+ for (ix = 0; plpps[ix]; ix++) {
+ plp = erts_proclist_peek_first(*plpps[ix]);
while (plp) {
ErtsProcList *tmp_plp = plp;
- plp = erts_proclist_peek_next(msbp->blckrs, plp);
+ plp = erts_proclist_peek_next(*plpps[ix], plp);
if (erts_proclist_same(tmp_plp, p)) {
- erts_proclist_remove(&msbp->blckrs, tmp_plp);
+ erts_proclist_remove(plpps[ix], tmp_plp);
proclist_destroy(tmp_plp);
if (!all)
break;
@@ -9587,40 +9625,45 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
esdp->reductions += reds;
- /* schedule_out_process() returns with rq locked! */
- schedule_out_process(rq, state, p, proxy_p, is_normal_sched);
- proxy_p = NULL;
+ {
+ int dec_refc;
- ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
- (int) ERTS_PSFLGS_GET_USR_PRIO(state),
- reds,
- actual_reds);
+ /* schedule_out_process() returns with rq locked! */
+ dec_refc = schedule_out_process(rq, state, p,
+ proxy_p, is_normal_sched);
+ proxy_p = NULL;
- esdp->current_process = NULL;
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
+
+ esdp->current_process = NULL;
#ifdef ERTS_SMP
- p->scheduler_data = NULL;
+ p->scheduler_data = NULL;
#endif
- erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_STATUS
- | ERTS_PROC_LOCK_TRACE));
+ erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS
+ | ERTS_PROC_LOCK_TRACE));
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
- if (state & ERTS_PSFLG_FREE) {
- if (!is_normal_sched) {
- ASSERT(p->flags & F_DELAYED_DEL_PROC);
- erts_proc_dec_refc(p);
- }
- else {
#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
-#else
- erts_proc_dec_refc(p);
+ if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ }
+ else {
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
+ }
+ }
#endif
- }
- }
+
+ if (dec_refc)
+ erts_proc_dec_refc(p);
+ }
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
@@ -9896,8 +9939,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
else {
+ Eterm pid = p->common.id;
proxy_p = p;
- p = erts_proc_lookup_raw(proxy_p->common.id);
+ p = (is_normal_sched
+ ? erts_proc_lookup_raw(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0,
+ ERTS_P2P_FLG_INC_REFC));
if (!p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
@@ -9929,6 +9976,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_FREE)))
#ifdef ERTS_DIRTY_SCHEDULERS
| (((state & (ERTS_PSFLG_RUNNING
+
| ERTS_PSFLG_FREE
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_EXITING))
@@ -9961,6 +10009,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* free and not queued by proxy */
erts_proc_dec_refc(p);
}
+ if (!is_normal_sched)
+ erts_proc_dec_refc(p);
goto pick_next_process;
}
state = new;
@@ -13091,15 +13141,14 @@ erts_continue_exit_process(Process *p)
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if (a & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
p->flags |= F_DELAYED_DEL_PROC;
delay_del_proc = 1;
/*
- * The dirty scheduler will also decrease
- * refc when done...
+ * The dirty scheduler decrease refc
+ * when done with the process...
*/
- erts_proc_inc_refc(p);
}
#endif
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 9f7084c127..68fbb10602 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1684,7 +1684,7 @@ ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list)
return NULL;
else {
ErtsProcList *res = *list;
- if (res == *list)
+ if (res->next == *list)
*list = NULL;
else
*list = res->next;
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index b8d89126fe..f70fb0e501 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -32,7 +32,8 @@
binary_to_atom/1,binary_to_existing_atom/1,
atom_to_binary/1,min_max/1, erlang_halt/1,
erl_crash_dump_bytes/1,
- is_builtin/1]).
+ is_builtin/1, error_stacktrace/1,
+ error_stacktrace_during_call_trace/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -44,8 +45,8 @@ all() ->
t_list_to_existing_atom, os_env, otp_7526,
display,
atom_to_binary, binary_to_atom, binary_to_existing_atom,
- erl_crash_dump_bytes,
- min_max, erlang_halt, is_builtin].
+ erl_crash_dump_bytes, min_max, erlang_halt, is_builtin,
+ error_stacktrace, error_stacktrace_during_call_trace].
%% Uses erlang:display to test that erts_printf does not do deep recursion
display(Config) when is_list(Config) ->
@@ -727,6 +728,172 @@ is_builtin(_Config) ->
ok.
+error_stacktrace(Config) when is_list(Config) ->
+ error_stacktrace_test().
+
+error_stacktrace_during_call_trace(Config) when is_list(Config) ->
+ Tracer = spawn_link(fun () ->
+ receive after infinity -> ok end
+ end),
+ Mprog = [{'_',[],[{exception_trace}]}],
+ erlang:trace_pattern({?MODULE,'_','_'}, Mprog, [local]),
+ 1 = erlang:trace_pattern({erlang,error,2}, Mprog, [local]),
+ 1 = erlang:trace_pattern({erlang,error,1}, Mprog, [local]),
+ erlang:trace(all, true, [call,return_to,timestamp,{tracer, Tracer}]),
+ try
+ error_stacktrace_test()
+ after
+ erlang:trace(all, false, [call,return_to,timestamp,{tracer, Tracer}]),
+ erlang:trace_pattern({erlang,error,2}, false, [local]),
+ erlang:trace_pattern({erlang,error,1}, false, [local]),
+ erlang:trace_pattern({?MODULE,'_','_'}, false, [local]),
+ unlink(Tracer),
+ exit(Tracer, kill),
+ Mon = erlang:monitor(process, Tracer),
+ receive
+ {'DOWN', Mon, process, Tracer, _} -> ok
+ end
+ end,
+ ok.
+
+
+error_stacktrace_test() ->
+ Types = [apply_const_last, apply_const, apply_last,
+ apply, double_apply_const_last, double_apply_const,
+ double_apply_last, double_apply, multi_apply_const_last,
+ multi_apply_const, multi_apply_last, multi_apply,
+ call_const_last, call_last, call_const, call],
+ lists:foreach(fun (Type) ->
+ {Pid, Mon} = spawn_monitor(
+ fun () ->
+ stk([a,b,c,d], Type, error_2)
+ end),
+ receive
+ {'DOWN', Mon, process, Pid, Reason} ->
+ {oops, Stack} = Reason,
+%% io:format("Type: ~p Stack: ~p~n",
+%% [Type, Stack]),
+ [{?MODULE, do_error_2, [Type], _},
+ {?MODULE, stk, 3, _},
+ {?MODULE, stk, 3, _}] = Stack
+ end
+ end,
+ Types),
+ lists:foreach(fun (Type) ->
+ {Pid, Mon} = spawn_monitor(
+ fun () ->
+ stk([a,b,c,d], Type, error_1)
+ end),
+ receive
+ {'DOWN', Mon, process, Pid, Reason} ->
+ {oops, Stack} = Reason,
+%% io:format("Type: ~p Stack: ~p~n",
+%% [Type, Stack]),
+ [{?MODULE, do_error_1, 1, _},
+ {?MODULE, stk, 3, _},
+ {?MODULE, stk, 3, _}] = Stack
+ end
+ end,
+ Types),
+ ok.
+
+stk([], Type, Func) ->
+ tail(Type, Func, jump),
+ ok;
+stk([_|L], Type, Func) ->
+ stk(L, Type, Func),
+ ok.
+
+tail(Type, Func, jump) ->
+ tail(Type, Func, do);
+tail(Type, error_1, do) ->
+ do_error_1(Type);
+tail(Type, error_2, do) ->
+ do_error_2(Type).
+
+do_error_2(apply_const_last) ->
+ erlang:apply(erlang, error, [oops, [apply_const_last]]);
+do_error_2(apply_const) ->
+ erlang:apply(erlang, error, [oops, [apply_const]]),
+ ok;
+do_error_2(apply_last) ->
+ erlang:apply(id(erlang), id(error), id([oops, [apply_last]]));
+do_error_2(apply) ->
+ erlang:apply(id(erlang), id(error), id([oops, [apply]])),
+ ok;
+do_error_2(double_apply_const_last) ->
+ erlang:apply(erlang, apply, [erlang, error, [oops, [double_apply_const_last]]]);
+do_error_2(double_apply_const) ->
+ erlang:apply(erlang, apply, [erlang, error, [oops, [double_apply_const]]]),
+ ok;
+do_error_2(double_apply_last) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(error), id([oops, [double_apply_last]])]);
+do_error_2(double_apply) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(error), id([oops, [double_apply]])]),
+ ok;
+do_error_2(multi_apply_const_last) ->
+ erlang:apply(erlang, apply, [erlang, apply, [erlang, apply, [erlang, error, [oops, [multi_apply_const_last]]]]]);
+do_error_2(multi_apply_const) ->
+ erlang:apply(erlang, apply, [erlang, apply, [erlang, apply, [erlang, error, [oops, [multi_apply_const]]]]]),
+ ok;
+do_error_2(multi_apply_last) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(error), id([oops, [multi_apply_last]])]]]);
+do_error_2(multi_apply) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(error), id([oops, [multi_apply]])]]]),
+ ok;
+do_error_2(call_const_last) ->
+ erlang:error(oops, [call_const_last]);
+do_error_2(call_last) ->
+ erlang:error(id(oops), id([call_last]));
+do_error_2(call_const) ->
+ erlang:error(oops, [call_const]),
+ ok;
+do_error_2(call) ->
+ erlang:error(id(oops), id([call])).
+
+
+do_error_1(apply_const_last) ->
+ erlang:apply(erlang, error, [oops]);
+do_error_1(apply_const) ->
+ erlang:apply(erlang, error, [oops]),
+ ok;
+do_error_1(apply_last) ->
+ erlang:apply(id(erlang), id(error), id([oops]));
+do_error_1(apply) ->
+ erlang:apply(id(erlang), id(error), id([oops])),
+ ok;
+do_error_1(double_apply_const_last) ->
+ erlang:apply(erlang, apply, [erlang, error, [oops]]);
+do_error_1(double_apply_const) ->
+ erlang:apply(erlang, apply, [erlang, error, [oops]]),
+ ok;
+do_error_1(double_apply_last) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(error), id([oops])]);
+do_error_1(double_apply) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(error), id([oops])]),
+ ok;
+do_error_1(multi_apply_const_last) ->
+ erlang:apply(erlang, apply, [erlang, apply, [erlang, apply, [erlang, error, [oops]]]]);
+do_error_1(multi_apply_const) ->
+ erlang:apply(erlang, apply, [erlang, apply, [erlang, apply, [erlang, error, [oops]]]]),
+ ok;
+do_error_1(multi_apply_last) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(error), id([oops])]]]);
+do_error_1(multi_apply) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(error), id([oops])]]]),
+ ok;
+do_error_1(call_const_last) ->
+ erlang:error(oops);
+do_error_1(call_last) ->
+ erlang:error(id(oops));
+do_error_1(call_const) ->
+ erlang:error(oops),
+ ok;
+do_error_1(call) ->
+ erlang:error(id(oops)).
+
+
+
%% Helpers
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index 6ba6301c7c..f7ff04430a 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -1090,8 +1090,7 @@ exception_nocatch() ->
{trace,t2,exception_from,{erlang,throw,1},
{error,{nocatch,Q2}}}],
exception_from, {error,{nocatch,Q2}}),
- expect({trace,T2,exit,{{nocatch,Q2},[{erlang,throw,[Q2],[]},
- {?MODULE,deep_4,1,
+ expect({trace,T2,exit,{{nocatch,Q2},[{?MODULE,deep_4,1,
Deep4LocThrow}]}}),
Q3 = {dump,[dump,{dump}]},
T3 =
@@ -1100,8 +1099,7 @@ exception_nocatch() ->
{trace,t3,exception_from,{erlang,error,1},
{error,Q3}}],
exception_from, {error,Q3}),
- expect({trace,T3,exit,{Q3,[{erlang,error,[Q3],[]},
- {?MODULE,deep_4,1,Deep4LocError}]}}),
+ expect({trace,T3,exit,{Q3,[{?MODULE,deep_4,1,Deep4LocError}]}}),
T4 =
exception_nocatch(?LINE, '=', [17,4711], 5, [],
exception_from, {error,{badmatch,4711}}),
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index 0f999e0efe..2289cbabc7 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -437,11 +437,22 @@ t_process_info(Config) when is_list(Config) ->
verify_loc(Line2, Res2),
pi_stacktrace([{?MODULE,t_process_info,1,?LINE}]),
+ verify_stacktrace_depth(),
+
Gleader = group_leader(),
{group_leader, Gleader} = process_info(self(), group_leader),
{'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')),
ok.
+verify_stacktrace_depth() ->
+ CS = current_stacktrace,
+ OldDepth = erlang:system_flag(backtrace_depth, 0),
+ {CS,[]} = erlang:process_info(self(), CS),
+ _ = erlang:system_flag(backtrace_depth, 8),
+ {CS,[{?MODULE,verify_stacktrace_depth,0,_},_|_]} =
+ erlang:process_info(self(), CS),
+ _ = erlang:system_flag(backtrace_depth, OldDepth).
+
pi_stacktrace(Expected0) ->
{Line,Res} = {?LINE,erlang:process_info(self(), current_stacktrace)},
{current_stacktrace,Stack} = Res,
diff --git a/erts/etc/common/heart.c b/erts/etc/common/heart.c
index d67b997d6d..bc353e384e 100644
--- a/erts/etc/common/heart.c
+++ b/erts/etc/common/heart.c
@@ -48,13 +48,10 @@
*
* HEART_BEATING
*
- * This program expects a heart beat messages. If it does not receive a
- * heart beat message from Erlang within heart_beat_timeout seconds, it
- * reboots the system. The variable heart_beat_timeout is exported (so
- * that it can be set from the shell in VxWorks, as is the variable
- * heart_beat_report_delay). When using Solaris, the system is rebooted
- * by executing the command stored in the environment variable
- * HEART_COMMAND.
+ * This program expects a heart beat message. If it does not receive a
+ * heart beat message from Erlang within heart_beat_timeout seconds, it
+ * reboots the system. The system is rebooted by executing the command
+ * stored in the environment variable HEART_COMMAND.
*
* BLOCKING DESCRIPTORS
*
@@ -149,27 +146,17 @@ struct msg {
/* Maybe interesting to change */
/* Times in seconds */
-#define HEART_BEAT_BOOT_DELAY 60 /* 1 minute */
#define SELECT_TIMEOUT 5 /* Every 5 seconds we reset the
watchdog timer */
/* heart_beat_timeout is the maximum gap in seconds between two
- consecutive heart beat messages from Erlang, and HEART_BEAT_BOOT_DELAY
- is the the extra delay that wd_keeper allows for, to give heart a
- chance to reboot in the "normal" way before the hardware watchdog
- enters the scene. heart_beat_report_delay is the time allowed for reporting
- before rebooting under VxWorks. */
+ consecutive heart beat messages from Erlang. */
int heart_beat_timeout = 60;
-int heart_beat_report_delay = 30;
-int heart_beat_boot_delay = HEART_BEAT_BOOT_DELAY;
/* All current platforms have a process identifier that
fits in an unsigned long and where 0 is an impossible or invalid value */
unsigned long heart_beat_kill_pid = 0;
-#define VW_WD_TIMEOUT (heart_beat_timeout+heart_beat_report_delay+heart_beat_boot_delay)
-#define SOL_WD_TIMEOUT (heart_beat_timeout+heart_beat_boot_delay)
-
/* reasons for reboot */
#define R_TIMEOUT (1)
#define R_CLOSED (2)
@@ -297,7 +284,6 @@ free_env_val(char *value)
static void get_arguments(int argc, char** argv) {
int i = 1;
int h;
- int w;
unsigned long p;
while (i < argc) {
@@ -313,15 +299,6 @@ static void get_arguments(int argc, char** argv) {
i++;
}
break;
- case 'w':
- if (strcmp(argv[i], "-wt") == 0)
- if (sscanf(argv[i+1],"%i",&w) ==1)
- if ((w > 10) && (w <= 65535)) {
- heart_beat_boot_delay = w;
- fprintf(stderr,"heart_beat_boot_delay = %d\n",w);
- i++;
- }
- break;
case 'p':
if (strcmp(argv[i], "-pid") == 0)
if (sscanf(argv[i+1],"%lu",&p) ==1){
@@ -347,7 +324,7 @@ static void get_arguments(int argc, char** argv) {
}
i++;
}
- debugf("arguments -ht %d -wt %d -pid %lu\n",h,w,p);
+ debugf("arguments -ht %d -pid %lu\n",h,p);
}
int main(int argc, char **argv) {
@@ -674,11 +651,6 @@ void win_system(char *command)
*/
static void
do_terminate(int erlin_fd, int reason) {
- /*
- When we get here, we have HEART_BEAT_BOOT_DELAY secs to finish
- (plus heart_beat_report_delay if under VxWorks), so we don't need
- to call wd_reset().
- */
int ret = 0, tmo=0;
char *tmo_env;
diff --git a/erts/etc/unix/etp-commands.in b/erts/etc/unix/etp-commands.in
index fa68bd26ee..f2babc48d2 100644
--- a/erts/etc/unix/etp-commands.in
+++ b/erts/etc/unix/etp-commands.in
@@ -1322,7 +1322,11 @@ define etp-stacktrace
set $etp_stacktrace_p = ($arg0)->stop
set $etp_stacktrace_end = ($arg0)->hend
printf "%% Stacktrace (%u): ", $etp_stacktrace_end-$etp_stacktrace_p
- etp ($arg0)->cp
+ if ($arg0)->cp == 0x0
+ printf "NULL\n"
+ else
+ etp ($arg0)->cp
+ end
while $etp_stacktrace_p < $etp_stacktrace_end
if ($etp_stacktrace_p[0] & 0x3) == 0x0
# Continuation pointer
@@ -1350,7 +1354,11 @@ define etp-stackdump
set $etp_stackdump_p = ($arg0)->stop
set $etp_stackdump_end = ($arg0)->hend
printf "%% Stackdump (%u): ", $etp_stackdump_end-$etp_stackdump_p
- etp ($arg0)->cp
+ if ($arg0)->cp == 0x0
+ printf "NULL\n"
+ else
+ etp ($arg0)->cp
+ end
while $etp_stackdump_p < $etp_stackdump_end
etp $etp_stackdump_p[0]
set $etp_stackdump_p++
diff --git a/lib/inets/src/http_client/httpc.erl b/lib/inets/src/http_client/httpc.erl
index 91d87289a2..bd5f6df39e 100644
--- a/lib/inets/src/http_client/httpc.erl
+++ b/lib/inets/src/http_client/httpc.erl
@@ -147,6 +147,26 @@ request(Method, Request, HttpOptions, Options) ->
request(Method, Request, HttpOptions, Options, default_profile()).
request(Method,
+ {Url, Headers, ContentType, TupleBody},
+ HTTPOptions, Options, Profile)
+ when ((Method =:= post) orelse (Method =:= patch) orelse (Method =:= put) orelse (Method =:= delete))
+ andalso (is_atom(Profile) orelse is_pid(Profile)) andalso
+ is_list(ContentType) andalso is_tuple(TupleBody)->
+ case check_body_gen(TupleBody) of
+ ok ->
+ do_request(Method, {Url, Headers, ContentType, TupleBody}, HTTPOptions, Options, Profile);
+ Error ->
+ Error
+ end;
+request(Method,
+ {Url, Headers, ContentType, Body},
+ HTTPOptions, Options, Profile)
+ when ((Method =:= post) orelse (Method =:= patch) orelse (Method =:= put) orelse (Method =:= delete))
+ andalso (is_atom(Profile) orelse is_pid(Profile)) andalso
+ is_list(ContentType) andalso (is_list(Body) orelse is_binary(Body)) ->
+ do_request(Method, {Url, Headers, ContentType, Body}, HTTPOptions, Options, Profile);
+
+request(Method,
{Url, Headers},
HTTPOptions, Options, Profile)
when (Method =:= options) orelse
@@ -155,12 +175,6 @@ request(Method,
(Method =:= delete) orelse
(Method =:= trace) andalso
(is_atom(Profile) orelse is_pid(Profile)) ->
- ?hcrt("request", [{method, Method},
- {url, Url},
- {headers, Headers},
- {http_options, HTTPOptions},
- {options, Options},
- {profile, Profile}]),
case uri_parse(Url, Options) of
{error, Reason} ->
{error, Reason};
@@ -172,21 +186,9 @@ request(Method,
handle_request(Method, Url, ParsedUrl, Headers, [], [],
HTTPOptions, Options, Profile)
end
- end;
-
-request(Method,
- {Url, Headers, ContentType, Body},
- HTTPOptions, Options, Profile)
- when ((Method =:= post) orelse (Method =:= patch) orelse (Method =:= put) orelse
- (Method =:= delete)) andalso (is_atom(Profile) orelse is_pid(Profile)) ->
- ?hcrt("request", [{method, Method},
- {url, Url},
- {headers, Headers},
- {content_type, ContentType},
- {body, Body},
- {http_options, HTTPOptions},
- {options, Options},
- {profile, Profile}]),
+ end.
+
+do_request(Method, {Url, Headers, ContentType, Body}, HTTPOptions, Options, Profile) ->
case uri_parse(Url, Options) of
{error, Reason} ->
{error, Reason};
@@ -196,7 +198,6 @@ request(Method,
HTTPOptions, Options, Profile)
end.
-
%%--------------------------------------------------------------------------
%% cancel_request(RequestId) -> ok
%% cancel_request(RequestId, Profile) -> ok
@@ -209,7 +210,6 @@ cancel_request(RequestId) ->
cancel_request(RequestId, Profile)
when is_atom(Profile) orelse is_pid(Profile) ->
- ?hcrt("cancel request", [{request_id, RequestId}, {profile, Profile}]),
httpc_manager:cancel_request(RequestId, profile_name(Profile)).
@@ -232,7 +232,6 @@ cancel_request(RequestId, Profile)
set_options(Options) ->
set_options(Options, default_profile()).
set_options(Options, Profile) when is_atom(Profile) orelse is_pid(Profile) ->
- ?hcrt("set options", [{options, Options}, {profile, Profile}]),
case validate_options(Options) of
{ok, Opts} ->
httpc_manager:set_options(Opts, profile_name(Profile));
@@ -272,7 +271,6 @@ get_options(all = _Options, Profile) ->
get_options(Options, Profile)
when (is_list(Options) andalso
(is_atom(Profile) orelse is_pid(Profile))) ->
- ?hcrt("get options", [{options, Options}, {profile, Profile}]),
case Options -- get_options() of
[] ->
try
@@ -314,9 +312,6 @@ store_cookies(SetCookieHeaders, Url) ->
store_cookies(SetCookieHeaders, Url, Profile)
when is_atom(Profile) orelse is_pid(Profile) ->
- ?hcrt("store cookies", [{set_cookie_headers, SetCookieHeaders},
- {url, Url},
- {profile, Profile}]),
try
begin
%% Since the Address part is not actually used
@@ -353,9 +348,6 @@ cookie_header(Url, Opts) when is_list(Opts) ->
cookie_header(Url, Opts, Profile)
when (is_list(Opts) andalso (is_atom(Profile) orelse is_pid(Profile))) ->
- ?hcrt("cookie header", [{url, Url},
- {opts, Opts},
- {profile, Profile}]),
try
begin
httpc_manager:which_cookies(Url, Opts, profile_name(Profile))
@@ -398,7 +390,6 @@ which_sessions() ->
which_sessions(default_profile()).
which_sessions(Profile) ->
- ?hcrt("which sessions", [{profile, Profile}]),
try
begin
httpc_manager:which_sessions(profile_name(Profile))
@@ -419,7 +410,6 @@ info() ->
info(default_profile()).
info(Profile) ->
- ?hcrt("info", [{profile, Profile}]),
try
begin
httpc_manager:info(profile_name(Profile))
@@ -440,7 +430,6 @@ reset_cookies() ->
reset_cookies(default_profile()).
reset_cookies(Profile) ->
- ?hcrt("reset cookies", [{profile, Profile}]),
try
begin
httpc_manager:reset_cookies(profile_name(Profile))
@@ -458,7 +447,6 @@ reset_cookies(Profile) ->
%% same behavior as active once for sockets.
%%-------------------------------------------------------------------------
stream_next(Pid) ->
- ?hcrt("stream next", [{handler, Pid}]),
httpc_handler:stream_next(Pid).
@@ -466,7 +454,6 @@ stream_next(Pid) ->
%%% Behaviour callbacks
%%%========================================================================
start_standalone(PropList) ->
- ?hcrt("start standalone", [{proplist, PropList}]),
case proplists:get_value(profile, PropList) of
undefined ->
{error, no_profile};
@@ -477,14 +464,11 @@ start_standalone(PropList) ->
end.
start_service(Config) ->
- ?hcrt("start service", [{config, Config}]),
httpc_profile_sup:start_child(Config).
stop_service(Profile) when is_atom(Profile) ->
- ?hcrt("stop service", [{profile, Profile}]),
httpc_profile_sup:stop_child(Profile);
stop_service(Pid) when is_pid(Pid) ->
- ?hcrt("stop service", [{pid, Pid}]),
case service_info(Pid) of
{ok, [{profile, Profile}]} ->
stop_service(Profile);
@@ -510,7 +494,6 @@ service_info(Pid) ->
%%%========================================================================
%%% Internal functions
%%%========================================================================
-
handle_request(Method, Url,
{Scheme, UserInfo, Host, Port, Path, Query},
Headers0, ContentType, Body0,
@@ -521,9 +504,6 @@ handle_request(Method, Url,
try
begin
- ?hcrt("begin processing", [{started, Started},
- {new_headers, NewHeaders0}]),
-
{NewHeaders, Body} =
case Body0 of
{chunkify, ProcessBody, Acc}
@@ -575,16 +555,13 @@ handle_request(Method, Url,
{ok, RequestId} ->
handle_answer(RequestId, Sync, Options);
{error, Reason} ->
- ?hcrd("request failed", [{reason, Reason}]),
{error, Reason}
end
end
catch
error:{noproc, _} ->
- ?hcrv("noproc", [{profile, Profile}]),
{error, {not_started, Profile}};
throw:Error ->
- ?hcrv("throw", [{error, Error}]),
Error
end.
@@ -620,15 +597,10 @@ handle_answer(RequestId, false, _) ->
handle_answer(RequestId, true, Options) ->
receive
{http, {RequestId, saved_to_file}} ->
- ?hcrt("received saved-to-file", [{request_id, RequestId}]),
{ok, saved_to_file};
{http, {RequestId, {_,_,_} = Result}} ->
- ?hcrt("received answer", [{request_id, RequestId},
- {result, Result}]),
return_answer(Options, Result);
{http, {RequestId, {error, Reason}}} ->
- ?hcrt("received error", [{request_id, RequestId},
- {reason, Reason}]),
{error, Reason}
end.
@@ -1257,18 +1229,14 @@ child_name(Pid, [{Name, Pid} | _]) ->
child_name(Pid, [_ | Children]) ->
child_name(Pid, Children).
-%% d(F) ->
-%% d(F, []).
-
-%% d(F, A) ->
-%% d(get(dbg), F, A).
-
-%% d(true, F, A) ->
-%% io:format(user, "~w:~w:" ++ F ++ "~n", [self(), ?MODULE | A]);
-%% d(_, _, _) ->
-%% ok.
-
host_address(Host, false) ->
Host;
host_address(Host, true) ->
string:strip(string:strip(Host, right, $]), left, $[).
+
+check_body_gen({Fun, _}) when is_function(Fun) ->
+ ok;
+check_body_gen({chunkify, Fun, _}) when is_function(Fun) ->
+ ok;
+check_body_gen(Gen) ->
+ {error, {bad_body_generator, Gen}}.
diff --git a/lib/inets/src/http_client/httpc_handler.erl b/lib/inets/src/http_client/httpc_handler.erl
index 2e7df8e424..c99200777b 100644
--- a/lib/inets/src/http_client/httpc_handler.erl
+++ b/lib/inets/src/http_client/httpc_handler.erl
@@ -32,7 +32,6 @@
%% Internal Application API
-export([
start_link/4,
- %% connect_and_send/2,
send/2,
cancel/2,
stream_next/1,
@@ -165,14 +164,12 @@ info(Pid) ->
%%--------------------------------------------------------------------
%% Request should not be streamed
stream(BodyPart, #request{stream = none} = Request, _) ->
- ?hcrt("stream - none", []),
{false, BodyPart, Request};
%% Stream to caller
stream(BodyPart, #request{stream = Self} = Request, Code)
when ?IS_STREAMED(Code) andalso
((Self =:= self) orelse (Self =:= {self, once})) ->
- ?hcrt("stream - self", [{stream, Self}, {code, Code}]),
httpc_response:send(Request#request.from,
{Request#request.id, stream, BodyPart}),
{true, <<>>, Request};
@@ -182,10 +179,8 @@ stream(BodyPart, #request{stream = Self} = Request, Code)
%% We keep this for backward compatibillity...
stream(BodyPart, #request{stream = Filename} = Request, Code)
when ?IS_STREAMED(Code) andalso is_list(Filename) ->
- ?hcrt("stream - filename", [{stream, Filename}, {code, Code}]),
case file:open(Filename, [write, raw, append, delayed_write]) of
{ok, Fd} ->
- ?hcrt("stream - file open ok", [{fd, Fd}]),
stream(BodyPart, Request#request{stream = Fd}, 200);
{error, Reason} ->
exit({stream_to_file_failed, Reason})
@@ -194,7 +189,6 @@ stream(BodyPart, #request{stream = Filename} = Request, Code)
%% Stream to file
stream(BodyPart, #request{stream = Fd} = Request, Code)
when ?IS_STREAMED(Code) ->
- ?hcrt("stream to file", [{stream, Fd}, {code, Code}]),
case file:write(Fd, BodyPart) of
ok ->
{true, <<>>, Request};
@@ -203,7 +197,6 @@ stream(BodyPart, #request{stream = Fd} = Request, Code)
end;
stream(BodyPart, Request,_) -> % only 200 and 206 responses can be streamed
- ?hcrt("stream - ignore", [{request, Request}]),
{false, BodyPart, Request}.
@@ -257,22 +250,148 @@ init([Parent, Request, Options, ProfileName]) ->
%% {stop, Reason, State} (terminate/2 is called)
%% Description: Handling call messages
%%--------------------------------------------------------------------
-handle_call(#request{address = Addr} = Request, _,
+handle_call(Request, From, State) ->
+ try do_handle_call(Request, From, State) of
+ Result ->
+ Result
+ catch
+ _:Reason ->
+ {stop, {shutdown, Reason} , State}
+ end.
+
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast(Msg, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling cast messages
+%%--------------------------------------------------------------------
+handle_cast(Msg, State) ->
+ try do_handle_cast(Msg, State) of
+ Result ->
+ Result
+ catch
+ _:Reason ->
+ {stop, {shutdown, Reason} , State}
+ end.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info(Info, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling all non call/cast messages
+%%--------------------------------------------------------------------
+handle_info(Info, State) ->
+ try do_handle_info(Info, State) of
+ Result ->
+ Result
+ catch
+ _:Reason ->
+ {stop, {shutdown, Reason} , State}
+ end.
+
+%%--------------------------------------------------------------------
+%% Function: terminate(Reason, State) -> _ (ignored by gen_server)
+%% Description: Shutdown the httpc_handler
+%%--------------------------------------------------------------------
+
+%% Init error there is no socket to be closed.
+terminate(normal,
+ #state{request = Request,
+ session = {send_failed, _} = Reason} = State) ->
+ maybe_send_answer(Request,
+ httpc_response:error(Request, Reason),
+ State),
+ ok;
+
+terminate(normal,
+ #state{request = Request,
+ session = {connect_failed, _} = Reason} = State) ->
+ maybe_send_answer(Request,
+ httpc_response:error(Request, Reason),
+ State),
+ ok;
+
+terminate(normal, #state{session = undefined}) ->
+ ok;
+
+%% Init error sending, no session information has been setup but
+%% there is a socket that needs closing.
+terminate(normal,
+ #state{session = #session{id = undefined} = Session}) ->
+ close_socket(Session);
+
+%% Socket closed remotely
+terminate(normal,
+ #state{session = #session{socket = {remote_close, Socket},
+ socket_type = SocketType,
+ id = Id},
+ profile_name = ProfileName,
+ request = Request,
+ timers = Timers,
+ pipeline = Pipeline,
+ keep_alive = KeepAlive} = State) ->
+ %% Clobber session
+ (catch httpc_manager:delete_session(Id, ProfileName)),
+
+ maybe_retry_queue(Pipeline, State),
+ maybe_retry_queue(KeepAlive, State),
+
+ %% Cancel timers
+ cancel_timers(Timers),
+
+ %% Maybe deliver answers to requests
+ deliver_answer(Request),
+
+ %% And, just in case, close our side (**really** overkill)
+ http_transport:close(SocketType, Socket);
+
+terminate(_Reason, #state{session = #session{id = Id,
+ socket = Socket,
+ socket_type = SocketType},
+ request = undefined,
+ profile_name = ProfileName,
+ timers = Timers,
+ pipeline = Pipeline,
+ keep_alive = KeepAlive} = State) ->
+
+ %% Clobber session
+ (catch httpc_manager:delete_session(Id, ProfileName)),
+
+ maybe_retry_queue(Pipeline, State),
+ maybe_retry_queue(KeepAlive, State),
+
+ cancel_timer(Timers#timers.queue_timer, timeout_queue),
+ http_transport:close(SocketType, Socket);
+
+terminate(_Reason, #state{request = undefined}) ->
+ ok;
+
+terminate(Reason, #state{request = Request} = State) ->
+ NewState = maybe_send_answer(Request,
+ httpc_response:error(Request, Reason),
+ State),
+ terminate(Reason, NewState#state{request = undefined}).
+
+%%--------------------------------------------------------------------
+%% Func: code_change(_OldVsn, State, Extra) -> {ok, NewState}
+%% Purpose: Convert process state when code is changed
+%%--------------------------------------------------------------------
+
+code_change(_, State, _) ->
+ {ok, State}.
+
+%%%--------------------------------------------------------------------
+%%% Internal functions
+%%%--------------------------------------------------------------------
+do_handle_call(#request{address = Addr} = Request, _,
#state{status = Status,
session = #session{type = pipeline} = Session,
timers = Timers,
options = #options{proxy = Proxy} = _Options,
profile_name = ProfileName} = State0)
when Status =/= undefined ->
-
- ?hcrv("new request on a pipeline session",
- [{request, Request},
- {profile, ProfileName},
- {status, Status},
- {timers, Timers}]),
-
Address = handle_proxy(Addr, Proxy),
-
case httpc_request:send(Address, Session, Request) of
ok ->
@@ -287,9 +406,8 @@ handle_call(#request{address = Addr} = Request, _,
case State0#state.request of
#request{} = OldRequest -> %% Old request not yet finished
- ?hcrd("old request still not finished", []),
%% Make sure to use the new value of timers in state
- NewTimers = State1#state.timers,
+ NewTimers = State1#state.timers,
NewPipeline = queue:in(Request, State1#state.pipeline),
NewSession =
Session#session{queue_length =
@@ -297,7 +415,6 @@ handle_call(#request{address = Addr} = Request, _,
queue:len(NewPipeline) + 1,
client_close = ClientClose},
insert_session(NewSession, ProfileName),
- ?hcrd("session updated", []),
{reply, ok, State1#state{
request = OldRequest,
pipeline = NewPipeline,
@@ -306,7 +423,6 @@ handle_call(#request{address = Addr} = Request, _,
undefined ->
%% Note: tcp-message receiving has already been
%% activated by handle_pipeline/2.
- ?hcrd("no current request", []),
cancel_timer(Timers#timers.queue_timer,
timeout_queue),
NewSession =
@@ -314,18 +430,16 @@ handle_call(#request{address = Addr} = Request, _,
client_close = ClientClose},
httpc_manager:insert_session(NewSession, ProfileName),
NewTimers = Timers#timers{queue_timer = undefined},
- ?hcrd("session created", []),
State = init_wait_for_response_state(Request, State1#state{session = NewSession,
timers = NewTimers}),
{reply, ok, State}
end;
{error, Reason} ->
- ?hcri("failed sending request", [{reason, Reason}]),
NewPipeline = queue:in(Request, State0#state.pipeline),
- {stop, shutdown, {pipeline_failed, Reason}, State0#state{pipeline = NewPipeline}}
+ {stop, {shutdown, {pipeline_failed, Reason}}, State0#state{pipeline = NewPipeline}}
end;
-handle_call(#request{address = Addr} = Request, _,
+do_handle_call(#request{address = Addr} = Request, _,
#state{status = Status,
session = #session{type = keep_alive} = Session,
timers = Timers,
@@ -333,17 +447,11 @@ handle_call(#request{address = Addr} = Request, _,
profile_name = ProfileName} = State0)
when Status =/= undefined ->
- ?hcrv("new request on a keep-alive session",
- [{request, Request},
- {profile, ProfileName},
- {status, Status}]),
-
ClientClose = httpc_request:is_client_closing(Request#request.headers),
case State0#state.request of
#request{} -> %% Old request not yet finished
%% Make sure to use the new value of timers in state
- ?hcrd("old request still not finished", []),
NewKeepAlive = queue:in(Request, State0#state.keep_alive),
NewSession =
Session#session{queue_length =
@@ -351,13 +459,11 @@ handle_call(#request{address = Addr} = Request, _,
queue:len(NewKeepAlive) + 1,
client_close = ClientClose},
insert_session(NewSession, ProfileName),
- ?hcrd("session updated", []),
{reply, ok, State0#state{keep_alive = NewKeepAlive,
session = NewSession}};
undefined ->
%% Note: tcp-message receiving has already been
%% activated by handle_pipeline/2.
- ?hcrd("no current request", []),
cancel_timer(Timers#timers.queue_timer,
timeout_queue),
NewTimers = Timers#timers{queue_timer = undefined},
@@ -365,8 +471,6 @@ handle_call(#request{address = Addr} = Request, _,
Address = handle_proxy(Addr, Proxy),
case httpc_request:send(Address, Session, Request) of
ok ->
- ?hcrd("request sent", []),
-
%% Activate the request time out for the new request
State2 =
activate_request_timeout(State1#state{request = Request}),
@@ -377,22 +481,13 @@ handle_call(#request{address = Addr} = Request, _,
State = init_wait_for_response_state(Request, State2#state{session = NewSession}),
{reply, ok, State};
{error, Reason} ->
- ?hcri("failed sending request", [{reason, Reason}]),
- {stop, shutdown, {keepalive_failed, Reason}, State1}
+ {stop, {shutdown, {keepalive_failed, Reason}}, State1}
end
end;
-
-handle_call(info, _, State) ->
+do_handle_call(info, _, State) ->
Info = handler_info(State),
{reply, Info, State}.
-%%--------------------------------------------------------------------
-%% Function: handle_cast(Msg, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State} (terminate/2 is called)
-%% Description: Handling cast messages
-%%--------------------------------------------------------------------
-
%% When the request in process has been canceled the handler process is
%% stopped and the pipelined requests will be reissued or remaining
%% requests will be sent on a new connection. This is is
@@ -405,145 +500,102 @@ handle_call(info, _, State) ->
%% handle_keep_alive_queue/2 on the other hand will just skip the
%% request as if it was never issued as in this case the request will
%% not have been sent.
-handle_cast({cancel, RequestId},
+do_handle_cast({cancel, RequestId},
#state{request = #request{id = RequestId} = Request,
- profile_name = ProfileName,
canceled = Canceled} = State) ->
- ?hcrv("cancel current request", [{request_id, RequestId},
- {profile, ProfileName},
- {canceled, Canceled}]),
{stop, normal,
State#state{canceled = [RequestId | Canceled],
request = Request#request{from = answer_sent}}};
-handle_cast({cancel, RequestId},
- #state{profile_name = ProfileName,
- request = #request{id = CurrId},
- canceled = Canceled} = State) ->
- ?hcrv("cancel", [{request_id, RequestId},
- {curr_req_id, CurrId},
- {profile, ProfileName},
- {canceled, Canceled}]),
+do_handle_cast({cancel, RequestId},
+ #state{request = #request{},
+ canceled = Canceled} = State) ->
{noreply, State#state{canceled = [RequestId | Canceled]}};
-handle_cast({cancel, RequestId},
- #state{profile_name = ProfileName,
- request = undefined,
- canceled = Canceled} = State) ->
- ?hcrv("cancel", [{request_id, RequestId},
- {curr_req_id, undefined},
- {profile, ProfileName},
- {canceled, Canceled}]),
+do_handle_cast({cancel, _},
+ #state{request = undefined} = State) ->
{noreply, State};
-
-handle_cast(stream_next, #state{session = Session} = State) ->
+do_handle_cast(stream_next, #state{session = Session} = State) ->
activate_once(Session),
%% Inactivate the #state.once here because we don't want
%% next_body_chunk/1 to activate the socket twice.
{noreply, State#state{once = inactive}}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_info(Info, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State} (terminate/2 is called)
-%% Description: Handling all non call/cast messages
-%%--------------------------------------------------------------------
-handle_info({Proto, _Socket, Data},
+do_handle_info({Proto, _Socket, Data},
#state{mfa = {Module, Function, Args},
- request = #request{method = Method,
- stream = Stream} = Request,
+ request = #request{method = Method} = Request,
session = Session,
status_line = StatusLine} = State)
when (Proto =:= tcp) orelse
(Proto =:= ssl) orelse
(Proto =:= httpc_handler) ->
- ?hcri("received data", [{proto, Proto},
- {module, Module},
- {function, Function},
- {method, Method},
- {stream, Stream},
- {session, Session},
- {status_line, StatusLine}]),
-
- FinalResult =
- try Module:Function([Data | Args]) of
- {ok, Result} ->
- ?hcrd("data processed - ok", []),
- handle_http_msg(Result, State);
- {_, whole_body, _} when Method =:= head ->
- ?hcrd("data processed - whole body", []),
- handle_response(State#state{body = <<>>});
- {Module, whole_body, [Body, Length]} ->
- ?hcrd("data processed - whole body", [{length, Length}]),
- {_, Code, _} = StatusLine,
- {Streamed, NewBody, NewRequest} = stream(Body, Request, Code),
- %% When we stream we will not keep the already
- %% streamed data, that would be a waste of memory.
- NewLength =
- case Streamed of
- false ->
- Length;
- true ->
- Length - size(Body)
- end,
-
- NewState = next_body_chunk(State, Code),
- NewMFA = {Module, whole_body, [NewBody, NewLength]},
- {noreply, NewState#state{mfa = NewMFA,
- request = NewRequest}};
- {Module, decode_size,
- [TotalChunk, HexList,
+ try Module:Function([Data | Args]) of
+ {ok, Result} ->
+ handle_http_msg(Result, State);
+ {_, whole_body, _} when Method =:= head ->
+ handle_response(State#state{body = <<>>});
+ {Module, whole_body, [Body, Length]} ->
+ {_, Code, _} = StatusLine,
+ {Streamed, NewBody, NewRequest} = stream(Body, Request, Code),
+ %% When we stream we will not keep the already
+ %% streamed data, that would be a waste of memory.
+ NewLength =
+ case Streamed of
+ false ->
+ Length;
+ true ->
+ Length - size(Body)
+ end,
+
+ NewState = next_body_chunk(State, Code),
+ NewMFA = {Module, whole_body, [NewBody, NewLength]},
+ {noreply, NewState#state{mfa = NewMFA,
+ request = NewRequest}};
+ {Module, decode_size,
+ [TotalChunk, HexList, AccHeaderSize,
{MaxBodySize, BodySoFar, AccLength, MaxHeaderSize}]}
- when BodySoFar =/= <<>> ->
- ?hcrd("data processed - decode_size", []),
- %% The response body is chunk-encoded. Steal decoded
- %% chunks as much as possible to stream.
- {_, Code, _} = StatusLine,
- {_, NewBody, NewRequest} = stream(BodySoFar, Request, Code),
- NewState = next_body_chunk(State, Code),
- NewMFA = {Module, decode_size,
- [TotalChunk, HexList,
+ when BodySoFar =/= <<>> ->
+ %% The response body is chunk-encoded. Steal decoded
+ %% chunks as much as possible to stream.
+ {_, Code, _} = StatusLine,
+ {_, NewBody, NewRequest} = stream(BodySoFar, Request, Code),
+ NewState = next_body_chunk(State, Code),
+ NewMFA = {Module, decode_size,
+ [TotalChunk, HexList, AccHeaderSize,
{MaxBodySize, NewBody, AccLength, MaxHeaderSize}]},
+ {noreply, NewState#state{mfa = NewMFA,
+ request = NewRequest}};
+ {Module, decode_data,
+ [ChunkSize, TotalChunk,
+ {MaxBodySize, BodySoFar, AccLength, MaxHeaderSize}]}
+ when TotalChunk =/= <<>> orelse BodySoFar =/= <<>> ->
+ %% The response body is chunk-encoded. Steal decoded
+ %% chunks as much as possible to stream.
+ ChunkSizeToSteal = min(ChunkSize, byte_size(TotalChunk)),
+ <<StolenChunk:ChunkSizeToSteal/binary, NewTotalChunk/binary>> = TotalChunk,
+ StolenBody = <<BodySoFar/binary, StolenChunk/binary>>,
+ NewChunkSize = ChunkSize - ChunkSizeToSteal,
+ {_, Code, _} = StatusLine,
+
+ {_, NewBody, NewRequest} = stream(StolenBody, Request, Code),
+ NewState = next_body_chunk(State, Code),
+ NewMFA = {Module, decode_data,
+ [NewChunkSize, NewTotalChunk,
+ {MaxBodySize, NewBody, AccLength, MaxHeaderSize}]},
{noreply, NewState#state{mfa = NewMFA,
request = NewRequest}};
- {Module, decode_data,
- [ChunkSize, TotalChunk,
- {MaxBodySize, BodySoFar, AccLength, MaxHeaderSize}]}
- when TotalChunk =/= <<>> orelse BodySoFar =/= <<>> ->
- ?hcrd("data processed - decode_data", []),
- %% The response body is chunk-encoded. Steal decoded
- %% chunks as much as possible to stream.
- ChunkSizeToSteal = min(ChunkSize, byte_size(TotalChunk)),
- <<StolenChunk:ChunkSizeToSteal/binary, NewTotalChunk/binary>> = TotalChunk,
- StolenBody = <<BodySoFar/binary, StolenChunk/binary>>,
- NewChunkSize = ChunkSize - ChunkSizeToSteal,
- {_, Code, _} = StatusLine,
-
- {_, NewBody, NewRequest} = stream(StolenBody, Request, Code),
- NewState = next_body_chunk(State, Code),
- NewMFA = {Module, decode_data,
- [NewChunkSize, NewTotalChunk,
- {MaxBodySize, NewBody, AccLength, MaxHeaderSize}]},
- {noreply, NewState#state{mfa = NewMFA,
- request = NewRequest}};
- NewMFA ->
- ?hcrd("data processed - new mfa", []),
- activate_once(Session),
- {noreply, State#state{mfa = NewMFA}}
- catch
- _:_Reason ->
- ?hcrd("data processing exit", [{exit, _Reason}]),
- ClientReason = {could_not_parse_as_http, Data},
- ClientErrMsg = httpc_response:error(Request, ClientReason),
- NewState = answer_request(Request, ClientErrMsg, State),
- {stop, normal, NewState}
- end,
- ?hcri("data processed", [{final_result, FinalResult}]),
- FinalResult;
-
+ NewMFA ->
+ activate_once(Session),
+ {noreply, State#state{mfa = NewMFA}}
+ catch
+ _:Reason ->
+ ClientReason = {could_not_parse_as_http, Data},
+ ClientErrMsg = httpc_response:error(Request, ClientReason),
+ NewState = answer_request(Request, ClientErrMsg, State),
+ {stop, {shutdown, Reason}, NewState}
+ end;
-handle_info({Proto, Socket, Data},
+do_handle_info({Proto, Socket, Data},
#state{mfa = MFA,
request = Request,
session = Session,
@@ -568,200 +620,107 @@ handle_info({Proto, Socket, Data},
{noreply, State};
-
%% The Server may close the connection to indicate that the
%% whole body is now sent instead of sending an length
%% indicator.
-handle_info({tcp_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
+do_handle_info({tcp_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
handle_response(State#state{body = hd(Args)});
-handle_info({ssl_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
+do_handle_info({ssl_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
handle_response(State#state{body = hd(Args)});
%%% Server closes idle pipeline
-handle_info({tcp_closed, _}, State = #state{request = undefined}) ->
+do_handle_info({tcp_closed, _}, State = #state{request = undefined}) ->
{stop, normal, State};
-handle_info({ssl_closed, _}, State = #state{request = undefined}) ->
+do_handle_info({ssl_closed, _}, State = #state{request = undefined}) ->
{stop, normal, State};
%%% Error cases
-handle_info({tcp_closed, _}, #state{session = Session0} = State) ->
+do_handle_info({tcp_closed, _}, #state{session = Session0} = State) ->
Socket = Session0#session.socket,
Session = Session0#session{socket = {remote_close, Socket}},
%% {stop, session_remotly_closed, State};
{stop, normal, State#state{session = Session}};
-handle_info({ssl_closed, _}, #state{session = Session0} = State) ->
+do_handle_info({ssl_closed, _}, #state{session = Session0} = State) ->
Socket = Session0#session.socket,
Session = Session0#session{socket = {remote_close, Socket}},
%% {stop, session_remotly_closed, State};
{stop, normal, State#state{session = Session}};
-handle_info({tcp_error, _, _} = Reason, State) ->
+do_handle_info({tcp_error, _, _} = Reason, State) ->
{stop, Reason, State};
-handle_info({ssl_error, _, _} = Reason, State) ->
+do_handle_info({ssl_error, _, _} = Reason, State) ->
{stop, Reason, State};
%% Timeouts
%% Internally, to a request handling process, a request timeout is
%% seen as a canceled request.
-handle_info({timeout, RequestId},
+do_handle_info({timeout, RequestId},
#state{request = #request{id = RequestId} = Request,
canceled = Canceled,
profile_name = ProfileName} = State) ->
- ?hcri("timeout of current request", [{id, RequestId}]),
httpc_response:send(Request#request.from,
httpc_response:error(Request, timeout)),
httpc_manager:request_done(RequestId, ProfileName),
- ?hcrv("response (timeout) sent - now terminate", []),
{stop, normal,
State#state{request = Request#request{from = answer_sent},
canceled = [RequestId | Canceled]}};
-handle_info({timeout, RequestId},
+do_handle_info({timeout, RequestId},
#state{canceled = Canceled,
profile_name = ProfileName} = State) ->
- ?hcri("timeout", [{id, RequestId}]),
Filter =
fun(#request{id = Id, from = From} = Request) when Id =:= RequestId ->
- ?hcrv("found request", [{id, Id}, {from, From}]),
%% Notify the owner
httpc_response:send(From,
httpc_response:error(Request, timeout)),
httpc_manager:request_done(RequestId, ProfileName),
- ?hcrv("response (timeout) sent", []),
[Request#request{from = answer_sent}];
(_) ->
true
end,
case State#state.status of
pipeline ->
- ?hcrd("pipeline", []),
Pipeline = queue:filter(Filter, State#state.pipeline),
{noreply, State#state{canceled = [RequestId | Canceled],
pipeline = Pipeline}};
keep_alive ->
- ?hcrd("keep_alive", []),
KeepAlive = queue:filter(Filter, State#state.keep_alive),
{noreply, State#state{canceled = [RequestId | Canceled],
keep_alive = KeepAlive}}
end;
-handle_info(timeout_queue, State = #state{request = undefined}) ->
+do_handle_info(timeout_queue, State = #state{request = undefined}) ->
{stop, normal, State};
%% Timing was such as the queue_timeout was not canceled!
-handle_info(timeout_queue, #state{timers = Timers} = State) ->
+do_handle_info(timeout_queue, #state{timers = Timers} = State) ->
{noreply, State#state{timers =
Timers#timers{queue_timer = undefined}}};
%% Setting up the connection to the server somehow failed.
-handle_info({init_error, Tag, ClientErrMsg},
+do_handle_info({init_error, Reason, ClientErrMsg},
State = #state{request = Request}) ->
- ?hcrv("init error", [{tag, Tag}, {client_error, ClientErrMsg}]),
NewState = answer_request(Request, ClientErrMsg, State),
- {stop, normal, NewState};
-
+ {stop, {shutdown, Reason}, NewState};
%%% httpc_manager process dies.
-handle_info({'EXIT', _, _}, State = #state{request = undefined}) ->
+do_handle_info({'EXIT', _, _}, State = #state{request = undefined}) ->
{stop, normal, State};
%%Try to finish the current request anyway,
%% there is a fairly high probability that it can be done successfully.
%% Then close the connection, hopefully a new manager is started that
%% can retry requests in the pipeline.
-handle_info({'EXIT', _, _}, State) ->
+do_handle_info({'EXIT', _, _}, State) ->
{noreply, State#state{status = close}}.
-%%--------------------------------------------------------------------
-%% Function: terminate(Reason, State) -> _ (ignored by gen_server)
-%% Description: Shutdown the httpc_handler
-%%--------------------------------------------------------------------
-
-%% Init error there is no socket to be closed.
-terminate(normal,
- #state{request = Request,
- session = {send_failed, AReason} = Reason} = State) ->
- ?hcrd("terminate", [{send_reason, AReason}, {request, Request}]),
- maybe_send_answer(Request,
- httpc_response:error(Request, Reason),
- State),
- ok;
-
-terminate(normal,
- #state{request = Request,
- session = {connect_failed, AReason} = Reason} = State) ->
- ?hcrd("terminate", [{connect_reason, AReason}, {request, Request}]),
- maybe_send_answer(Request,
- httpc_response:error(Request, Reason),
- State),
- ok;
-
-terminate(normal, #state{session = undefined}) ->
- ok;
-
-%% Init error sending, no session information has been setup but
-%% there is a socket that needs closing.
-terminate(normal,
- #state{session = #session{id = undefined} = Session}) ->
- close_socket(Session);
-
-%% Socket closed remotely
-terminate(normal,
- #state{session = #session{socket = {remote_close, Socket},
- socket_type = SocketType,
- id = Id},
- profile_name = ProfileName,
- request = Request,
- timers = Timers,
- pipeline = Pipeline,
- keep_alive = KeepAlive} = State) ->
- ?hcrt("terminate(normal) - remote close",
- [{id, Id}, {profile, ProfileName}]),
-
- %% Clobber session
- (catch httpc_manager:delete_session(Id, ProfileName)),
-
- maybe_retry_queue(Pipeline, State),
- maybe_retry_queue(KeepAlive, State),
-
- %% Cancel timers
- cancel_timers(Timers),
-
- %% Maybe deliver answers to requests
- deliver_answer(Request),
-
- %% And, just in case, close our side (**really** overkill)
- http_transport:close(SocketType, Socket);
-
-terminate(Reason, #state{session = #session{id = Id,
- socket = Socket,
- socket_type = SocketType},
- request = undefined,
- profile_name = ProfileName,
- timers = Timers,
- pipeline = Pipeline,
- keep_alive = KeepAlive} = State) ->
- ?hcrt("terminate",
- [{id, Id}, {profile, ProfileName}, {reason, Reason}]),
-
- %% Clobber session
- (catch httpc_manager:delete_session(Id, ProfileName)),
-
- maybe_retry_queue(Pipeline, State),
- maybe_retry_queue(KeepAlive, State),
-
- cancel_timer(Timers#timers.queue_timer, timeout_queue),
- http_transport:close(SocketType, Socket);
+call(Msg, Pid) ->
+ call(Msg, Pid, infinity).
-terminate(Reason, #state{request = undefined}) ->
- ?hcrt("terminate", [{reason, Reason}]),
- ok;
+call(Msg, Pid, Timeout) ->
+ gen_server:call(Pid, Msg, Timeout).
-terminate(Reason, #state{request = Request} = State) ->
- ?hcrd("terminate", [{reason, Reason}, {request, Request}]),
- NewState = maybe_send_answer(Request,
- httpc_response:error(Request, Reason),
- State),
- terminate(Reason, NewState#state{request = undefined}).
+cast(Msg, Pid) ->
+ gen_server:cast(Pid, Msg).
maybe_retry_queue(Q, State) ->
case queue:is_empty(Q) of
@@ -776,45 +735,13 @@ maybe_send_answer(#request{from = answer_sent}, _Reason, State) ->
maybe_send_answer(Request, Answer, State) ->
answer_request(Request, Answer, State).
-deliver_answer(#request{id = Id, from = From} = Request)
+deliver_answer(#request{from = From} = Request)
when is_pid(From) ->
Response = httpc_response:error(Request, socket_closed_remotely),
- ?hcrd("deliver answer", [{id, Id}, {from, From}, {response, Response}]),
httpc_response:send(From, Response);
-deliver_answer(Request) ->
- ?hcrd("skip deliver answer", [{request, Request}]),
+deliver_answer(_Request) ->
ok.
-
-%%--------------------------------------------------------------------
-%% Func: code_change(_OldVsn, State, Extra) -> {ok, NewState}
-%% Purpose: Convert process state when code is changed
-%%--------------------------------------------------------------------
-
-code_change(_, State, _) ->
- {ok, State}.
-
-
-%% new_http_options({http_options, TimeOut, AutoRedirect, SslOpts,
-%% Auth, Relaxed}) ->
-%% {http_options, "HTTP/1.1", TimeOut, AutoRedirect, SslOpts,
-%% Auth, Relaxed}.
-
-%% old_http_options({http_options, _, TimeOut, AutoRedirect,
-%% SslOpts, Auth, Relaxed}) ->
-%% {http_options, TimeOut, AutoRedirect, SslOpts, Auth, Relaxed}.
-
-%% new_queue(Queue, Fun) ->
-%% List = queue:to_list(Queue),
-%% NewList =
-%% lists:map(fun(Request) ->
-%% Settings =
-%% Fun(Request#request.settings),
-%% Request#request{settings = Settings}
-%% end, List),
-%% queue:from_list(NewList).
-
-
%%%--------------------------------------------------------------------
%%% Internal functions
%%%--------------------------------------------------------------------
@@ -872,26 +799,21 @@ connect(SocketType, ToAddress,
connect_and_send_first_request(Address, Request, #state{options = Options} = State) ->
SocketType = socket_type(Request),
ConnTimeout = (Request#request.settings)#http_options.connect_timeout,
- ?hcri("connect",
- [{address, Address}, {request, Request}, {options, Options}]),
case connect(SocketType, Address, Options, ConnTimeout) of
{ok, Socket} ->
ClientClose =
- httpc_request:is_client_closing(
- Request#request.headers),
+ httpc_request:is_client_closing(
+ Request#request.headers),
SessionType = httpc_manager:session_type(Options),
SocketType = socket_type(Request),
Session = #session{id = {Request#request.address, self()},
scheme = Request#request.scheme,
socket = Socket,
- socket_type = SocketType,
- client_close = ClientClose,
- type = SessionType},
- ?hcri("connected - now send first request", [{socket, Socket}]),
-
+ socket_type = SocketType,
+ client_close = ClientClose,
+ type = SessionType},
case httpc_request:send(Address, Session, Request) of
ok ->
- ?hcri("first request sent", []),
TmpState = State#state{request = Request,
session = Session,
mfa = init_mfa(Request, State),
@@ -949,12 +871,6 @@ handler_info(#state{request = Request,
options = _Options,
timers = _Timers} = _State) ->
- ?hcrt("handler info", [{request, Request},
- {session, Session},
- {pipeline, Pipeline},
- {keep_alive, KeepAlive},
- {status, Status}]),
-
%% Info about the current request
RequestInfo =
case Request of
@@ -965,8 +881,6 @@ handler_info(#state{request = Request,
[{id, Id}, {started, ReqStarted}]
end,
- ?hcrt("handler info", [{request_info, RequestInfo}]),
-
%% Info about the current session/socket
SessionType = Session#session.type,
QueueLen = case SessionType of
@@ -979,22 +893,12 @@ handler_info(#state{request = Request,
Socket = Session#session.socket,
SocketType = Session#session.socket_type,
- ?hcrt("handler info", [{session_type, SessionType},
- {queue_length, QueueLen},
- {scheme, Scheme},
- {socket, Socket}]),
-
SocketOpts = http_transport:getopts(SocketType, Socket),
SocketStats = http_transport:getstat(SocketType, Socket),
Remote = http_transport:peername(SocketType, Socket),
Local = http_transport:sockname(SocketType, Socket),
- ?hcrt("handler info", [{remote, Remote},
- {local, Local},
- {socket_opts, SocketOpts},
- {socket_stats, SocketStats}]),
-
SocketInfo = [{remote, Remote},
{local, Local},
{socket_opts, SocketOpts},
@@ -1014,7 +918,6 @@ handler_info(#state{request = Request,
handle_http_msg({Version, StatusCode, ReasonPharse, Headers, Body},
State = #state{request = Request}) ->
- ?hcrt("handle_http_msg", [{headers, Headers}]),
case Headers#http_response_h.'content-type' of
"multipart/byteranges" ++ _Param ->
exit({not_yet_implemented, multypart_byteranges});
@@ -1028,15 +931,12 @@ handle_http_msg({Version, StatusCode, ReasonPharse, Headers, Body},
end;
handle_http_msg({ChunkedHeaders, Body},
#state{status_line = {_, Code, _}, headers = Headers} = State) ->
- ?hcrt("handle_http_msg",
- [{chunked_headers, ChunkedHeaders}, {headers, Headers}]),
NewHeaders = http_chunk:handle_headers(Headers, ChunkedHeaders),
{_, NewBody, NewRequest} = stream(Body, State#state.request, Code),
handle_response(State#state{headers = NewHeaders,
body = NewBody,
request = NewRequest});
handle_http_msg(Body, #state{status_line = {_,Code, _}} = State) ->
- ?hcrt("handle_http_msg", [{code, Code}]),
{_, NewBody, NewRequest} = stream(Body, State#state.request, Code),
handle_response(State#state{body = NewBody, request = NewRequest}).
@@ -1051,41 +951,28 @@ handle_http_body(_, #state{status = {ssl_tunnel, Request},
{stop, normal, NewState};
handle_http_body(<<>>, #state{status_line = {_,304, _}} = State) ->
- ?hcrt("handle_http_body - 304", []),
handle_response(State#state{body = <<>>});
handle_http_body(<<>>, #state{status_line = {_,204, _}} = State) ->
- ?hcrt("handle_http_body - 204", []),
handle_response(State#state{body = <<>>});
handle_http_body(<<>>, #state{request = #request{method = head}} = State) ->
- ?hcrt("handle_http_body - head", []),
handle_response(State#state{body = <<>>});
handle_http_body(Body, #state{headers = Headers,
max_body_size = MaxBodySize,
status_line = {_,Code, _},
request = Request} = State) ->
- ?hcrt("handle_http_body",
- [{max_body_size, MaxBodySize}, {headers, Headers}, {code, Code}]),
TransferEnc = Headers#http_response_h.'transfer-encoding',
case case_insensitive_header(TransferEnc) of
"chunked" ->
- ?hcrt("handle_http_body - chunked", []),
try http_chunk:decode(Body, State#state.max_body_size,
State#state.max_header_size) of
{Module, Function, Args} ->
- ?hcrt("handle_http_body - new mfa",
- [{module, Module},
- {function, Function},
- {args, Args}]),
NewState = next_body_chunk(State, Code),
{noreply, NewState#state{mfa =
{Module, Function, Args}}};
{ok, {ChunkedHeaders, NewBody}} ->
- ?hcrt("handle_http_body - new body",
- [{chunked_headers, ChunkedHeaders},
- {new_body, NewBody}]),
NewHeaders = http_chunk:handle_headers(Headers,
ChunkedHeaders),
case Body of
@@ -1107,7 +994,6 @@ handle_http_body(Body, #state{headers = Headers,
{stop, normal, NewState}
end;
Enc when Enc =:= "identity"; Enc =:= undefined ->
- ?hcrt("handle_http_body - identity", []),
Length =
list_to_integer(Headers#http_response_h.'content-length'),
case ((Length =< MaxBodySize) orelse (MaxBodySize =:= nolimit)) of
@@ -1131,7 +1017,6 @@ handle_http_body(Body, #state{headers = Headers,
{stop, normal, NewState}
end;
Encoding when is_list(Encoding) ->
- ?hcrt("handle_http_body - other", [{encoding, Encoding}]),
NewState = answer_request(Request,
httpc_response:error(Request,
unknown_encoding),
@@ -1152,18 +1037,10 @@ handle_response(#state{request = Request,
options = Options,
profile_name = ProfileName} = State)
when Status =/= new ->
-
- ?hcrd("handle response", [{profile, ProfileName},
- {status, Status},
- {request, Request},
- {session, Session},
- {status_line, StatusLine}]),
-
handle_cookies(Headers, Request, Options, ProfileName),
case httpc_response:result({StatusLine, Headers, Body}, Request) of
%% 100-continue
continue ->
- ?hcrd("handle response - continue", []),
%% Send request body
{_, RequestBody} = Request#request.content,
send_raw(Session, RequestBody),
@@ -1180,7 +1057,6 @@ handle_response(#state{request = Request,
%% Ignore unexpected 100-continue response and receive the
%% actual response that the server will send right away.
{ignore, Data} ->
- ?hcrd("handle response - ignore", [{data, Data}]),
Relaxed = (Request#request.settings)#http_options.relaxed,
MFA = {httpc_response, parse,
[State#state.max_header_size, Relaxed]},
@@ -1194,23 +1070,17 @@ handle_response(#state{request = Request,
%% obsolete and the manager will create a new request
%% with the same id as the current.
{redirect, NewRequest, Data} ->
- ?hcrt("handle response - redirect",
- [{new_request, NewRequest}, {data, Data}]),
ok = httpc_manager:redirect_request(NewRequest, ProfileName),
handle_queue(State#state{request = undefined}, Data);
{retry, TimeNewRequest, Data} ->
- ?hcrt("handle response - retry",
- [{time_new_request, TimeNewRequest}, {data, Data}]),
ok = httpc_manager:retry_request(TimeNewRequest, ProfileName),
handle_queue(State#state{request = undefined}, Data);
{ok, Msg, Data} ->
- ?hcrd("handle response - ok", []),
stream_remaining_body(Body, Request, StatusLine),
end_stream(StatusLine, Request),
NewState = maybe_send_answer(Request, Msg, State),
handle_queue(NewState, Data);
{stop, Msg} ->
- ?hcrd("handle response - stop", [{msg, Msg}]),
end_stream(StatusLine, Request),
NewState = maybe_send_answer(Request, Msg, State),
{stop, normal, NewState}
@@ -1245,28 +1115,19 @@ handle_pipeline(#state{status = pipeline,
profile_name = ProfileName,
options = #options{pipeline_timeout = TimeOut}} = State,
Data) ->
-
- ?hcrd("handle pipeline", [{profile, ProfileName},
- {session, Session},
- {timeout, TimeOut}]),
-
case queue:out(State#state.pipeline) of
{empty, _} ->
- ?hcrd("pipeline queue empty", []),
handle_empty_queue(Session, ProfileName, TimeOut, State);
{{value, NextRequest}, Pipeline} ->
- ?hcrd("pipeline queue non-empty", []),
case lists:member(NextRequest#request.id,
State#state.canceled) of
true ->
- ?hcrv("next request had been cancelled", []),
%% See comment for handle_cast({cancel, RequestId})
{stop, normal,
State#state{request =
NextRequest#request{from = answer_sent},
pipeline = Pipeline}};
false ->
- ?hcrv("next request", [{request, NextRequest}]),
NewSession =
Session#session{queue_length =
%% Queue + current
@@ -1283,25 +1144,16 @@ handle_keep_alive_queue(#state{status = keep_alive,
options = #options{keep_alive_timeout = TimeOut,
proxy = Proxy}} = State,
Data) ->
-
- ?hcrd("handle keep_alive", [{profile, ProfileName},
- {session, Session},
- {timeout, TimeOut}]),
-
case queue:out(State#state.keep_alive) of
{empty, _} ->
- ?hcrd("keep_alive queue empty", []),
handle_empty_queue(Session, ProfileName, TimeOut, State);
{{value, NextRequest}, KeepAlive} ->
- ?hcrd("keep_alive queue non-empty", []),
case lists:member(NextRequest#request.id,
State#state.canceled) of
true ->
- ?hcrv("next request has already been canceled", []),
handle_keep_alive_queue(
State#state{keep_alive = KeepAlive}, Data);
false ->
- ?hcrv("next request", [{request, NextRequest}]),
#request{address = Addr} = NextRequest,
Address = handle_proxy(Addr, Proxy),
case httpc_request:send(Address, Session, NextRequest) of
@@ -1314,7 +1166,6 @@ handle_keep_alive_queue(#state{status = keep_alive,
end
end
end.
-
handle_empty_queue(Session, ProfileName, TimeOut, State) ->
%% The server may choose too terminate an idle pipline| keep_alive session
%% in this case we want to receive the close message
@@ -1350,7 +1201,6 @@ init_wait_for_response_state(Request, State) ->
status_line = undefined,
headers = undefined,
body = undefined}.
-
gather_data(<<>>, Session, State) ->
activate_once(Session),
{noreply, State};
@@ -1381,10 +1231,6 @@ activate_request_timeout(
State;
_ ->
ReqId = Request#request.id,
- ?hcrt("activate request timer",
- [{request_id, ReqId},
- {time_consumed, t() - Request#request.started},
- {timeout, Timeout}]),
Msg = {timeout, ReqId},
Ref = erlang:send_after(Timeout, self(), Msg),
Request2 = Request#request{timer = Ref},
@@ -1427,10 +1273,6 @@ try_to_enable_pipeline_or_keep_alive(
status_line = {Version, _, _},
headers = Headers,
profile_name = ProfileName} = State) ->
- ?hcrd("try to enable pipeline or keep-alive",
- [{version, Version},
- {headers, Headers},
- {session, Session}]),
case is_keep_alive_enabled_server(Version, Headers) andalso
is_keep_alive_connection(Headers, Session) of
true ->
@@ -1455,7 +1297,6 @@ answer_request(#request{id = RequestId, from = From} = Request, Msg,
#state{session = Session,
timers = Timers,
profile_name = ProfileName} = State) ->
- ?hcrt("answer request", [{request, Request}, {msg, Msg}]),
httpc_response:send(From, Msg),
RequestTimers = Timers#timers.request_timers,
TimerRef =
@@ -1602,42 +1443,32 @@ socket_type(#request{scheme = http}) ->
ip_comm;
socket_type(#request{scheme = https, settings = Settings}) ->
Settings#http_options.ssl.
-%% socket_type(http) ->
-%% ip_comm;
-%% socket_type(https) ->
-%% {ssl1, []}. %% Dummy value ok for ex setopts that does not use this value
start_stream({_Version, _Code, _ReasonPhrase}, _Headers,
#request{stream = none} = Request) ->
- ?hcrt("start stream - none", []),
{ok, Request};
start_stream({_Version, Code, _ReasonPhrase}, Headers,
#request{stream = self} = Request)
when ?IS_STREAMED(Code) ->
- ?hcrt("start stream - self", [{code, Code}]),
Msg = httpc_response:stream_start(Headers, Request, ignore),
httpc_response:send(Request#request.from, Msg),
{ok, Request};
start_stream({_Version, Code, _ReasonPhrase}, Headers,
#request{stream = {self, once}} = Request)
when ?IS_STREAMED(Code) ->
- ?hcrt("start stream - self:once", [{code, Code}]),
Msg = httpc_response:stream_start(Headers, Request, self()),
httpc_response:send(Request#request.from, Msg),
{ok, Request};
start_stream({_Version, Code, _ReasonPhrase}, _Headers,
#request{stream = Filename} = Request)
when ?IS_STREAMED(Code) andalso is_list(Filename) ->
- ?hcrt("start stream", [{code, Code}, {filename, Filename}]),
case file:open(Filename, [write, raw, append, delayed_write]) of
{ok, Fd} ->
- ?hcri("start stream - file open ok", [{fd, Fd}]),
{ok, Request#request{stream = Fd}};
{error, Reason} ->
exit({stream_to_file_failed, Reason})
end;
start_stream(_StatusLine, _Headers, Request) ->
- ?hcrt("start stream - no op", []),
{ok, Request}.
stream_remaining_body(<<>>, _, _) ->
@@ -1648,16 +1479,12 @@ stream_remaining_body(Body, Request, {_, Code, _}) ->
%% Note the end stream message is handled by httpc_response and will
%% be sent by answer_request
end_stream(_, #request{stream = none}) ->
- ?hcrt("end stream - none", []),
ok;
end_stream(_, #request{stream = self}) ->
- ?hcrt("end stream - self", []),
ok;
end_stream(_, #request{stream = {self, once}}) ->
- ?hcrt("end stream - self:once", []),
ok;
end_stream({_,200,_}, #request{stream = Fd}) ->
- ?hcrt("end stream - 200", [{stream, Fd}]),
case file:close(Fd) of
ok ->
ok;
@@ -1665,15 +1492,13 @@ end_stream({_,200,_}, #request{stream = Fd}) ->
file:close(Fd)
end;
end_stream({_,206,_}, #request{stream = Fd}) ->
- ?hcrt("end stream - 206", [{stream, Fd}]),
case file:close(Fd) of
ok ->
ok;
{error, enospc} -> % Could be due to delayed_write
file:close(Fd)
end;
-end_stream(SL, R) ->
- ?hcrt("end stream", [{status_line, SL}, {request, R}]),
+end_stream(_, _) ->
ok.
@@ -1702,11 +1527,8 @@ handle_verbose(trace) ->
handle_verbose(_) ->
ok.
-
-
send_raw(#session{socket = Socket, socket_type = SocketType},
{ProcessBody, Acc}) when is_function(ProcessBody, 1) ->
- ?hcrt("send raw", [{acc, Acc}]),
send_raw(SocketType, Socket, ProcessBody, Acc);
send_raw(#session{socket = Socket, socket_type = SocketType}, Body) ->
http_transport:send(SocketType, Socket, Body).
@@ -1717,7 +1539,6 @@ send_raw(SocketType, Socket, ProcessBody, Acc) ->
ok;
{ok, Data, NewAcc} ->
DataBin = iolist_to_binary(Data),
- ?hcrd("send", [{data, DataBin}]),
case http_transport:send(SocketType, Socket, DataBin) of
ok ->
send_raw(SocketType, Socket, ProcessBody, NewAcc);
@@ -1883,16 +1704,3 @@ update_session(ProfileName, #session{id = SessionId} = Session, Pos, Value) ->
end.
-%% ---------------------------------------------------------------------
-
-call(Msg, Pid) ->
- call(Msg, Pid, infinity).
-
-call(Msg, Pid, Timeout) ->
- gen_server:call(Pid, Msg, Timeout).
-
-cast(Msg, Pid) ->
- gen_server:cast(Pid, Msg).
-
-t() ->
- http_util:timestamp().
diff --git a/lib/inets/test/httpc_SUITE.erl b/lib/inets/test/httpc_SUITE.erl
index b2d0ce7631..8aea38037d 100644
--- a/lib/inets/test/httpc_SUITE.erl
+++ b/lib/inets/test/httpc_SUITE.erl
@@ -88,7 +88,8 @@ real_requests()->
stream_through_mfa,
streaming_error,
inet_opts,
- invalid_headers
+ invalid_headers,
+ invalid_body
].
only_simulated() ->
@@ -126,7 +127,8 @@ only_simulated() ->
redirect_temporary_redirect,
port_in_host_header,
redirect_port_in_host_header,
- relaxed
+ relaxed,
+ multipart_chunks
].
misc() ->
@@ -1001,10 +1003,25 @@ invalid_headers(Config) ->
Request = {url(group_name(Config), "/dummy.html", Config), [{"cookie", undefined}]},
{error, _} = httpc:request(get, Request, [], []).
+%%-------------------------------------------------------------------------
+
+invalid_body(Config) ->
+ URL = url(group_name(Config), "/dummy.html", Config),
+ try
+ httpc:request(post, {URL, [], <<"text/plain">>, "foobar"},
+ [], []),
+ ct:fail(accepted_invalid_input)
+ catch
+ error:function_clause ->
+ ok
+ end.
+
+%%-------------------------------------------------------------------------
remote_socket_close(Config) when is_list(Config) ->
URL = url(group_name(Config), "/just_close.html", Config),
{error, socket_closed_remotely} = httpc:request(URL).
+
%%-------------------------------------------------------------------------
remote_socket_close_async(Config) when is_list(Config) ->
@@ -1111,6 +1128,13 @@ redirect_port_in_host_header(Config) when is_list(Config) ->
inets_test_lib:check_body(Body).
%%-------------------------------------------------------------------------
+multipart_chunks(Config) when is_list(Config) ->
+ Request = {url(group_name(Config), "/multipart_chunks.html", Config), []},
+ {ok, Ref} = httpc:request(get, Request, [], [{sync, false}, {stream, self}]),
+ ok = receive_stream_n(Ref, 10),
+ httpc:cancel_request(Ref).
+
+%%-------------------------------------------------------------------------
timeout_memory_leak() ->
[{doc, "Check OTP-8739"}].
timeout_memory_leak(Config) when is_list(Config) ->
@@ -1405,7 +1429,7 @@ dummy_server(Caller, SocketType, Inet, Extra) ->
end.
dummy_server_init(Caller, ip_comm, Inet, _) ->
- BaseOpts = [binary, {packet, 0}, {reuseaddr,true}, {active, false}],
+ BaseOpts = [binary, {packet, 0}, {reuseaddr,true}, {keepalive, true}, {active, false}],
{ok, ListenSocket} = gen_tcp:listen(0, [Inet | BaseOpts]),
{ok, Port} = inet:port(ListenSocket),
Caller ! {port, Port},
@@ -1981,6 +2005,16 @@ handle_uri(_,"/missing_CR.html",_,_,_,_) ->
"Content-Length:32\r\n\n" ++
"<HTML><BODY>foobar</BODY></HTML>";
+handle_uri(_,"/multipart_chunks.html",_,_,Socket,_) ->
+ Head = "HTTP/1.1 200 ok\r\n" ++
+ "Transfer-Encoding:chunked\r\n" ++
+ "Date: " ++ httpd_util:rfc1123_date() ++ "\r\n"
+ "Connection: Keep-Alive\r\n" ++
+ "Content-Type: multipart/x-mixed-replace; boundary=chunk_boundary\r\n" ++
+ "\r\n",
+ send(Socket, Head),
+ send_multipart_chunks(Socket),
+ http_chunk:encode_last();
handle_uri("HEAD",_,_,_,_,_) ->
"HTTP/1.1 200 ok\r\n" ++
"Content-Length:0\r\n\r\n";
@@ -2277,3 +2311,21 @@ otp_8739_dummy_server_main(_Parent, ListenSocket) ->
Error ->
exit(Error)
end.
+
+send_multipart_chunks(Socket) ->
+ send(Socket, http_chunk:encode("--chunk_boundary\r\n")),
+ send(Socket, http_chunk:encode("Content-Type: text/plain\r\nContent-Length: 4\r\n\r\n")),
+ send(Socket, http_chunk:encode("test\r\n")),
+ ct:sleep(500),
+ send_multipart_chunks(Socket).
+
+receive_stream_n(_, 0) ->
+ ok;
+receive_stream_n(Ref, N) ->
+ receive
+ {http, {Ref, stream_start, _}} ->
+ receive_stream_n(Ref, N);
+ {http, {Ref,stream, Data}} ->
+ ct:pal("Data: ~p", [Data]),
+ receive_stream_n(Ref, N-1)
+ end.
diff --git a/lib/kernel/doc/src/heart.xml b/lib/kernel/doc/src/heart.xml
index 59a046bf4d..5b5b71e521 100644
--- a/lib/kernel/doc/src/heart.xml
+++ b/lib/kernel/doc/src/heart.xml
@@ -37,10 +37,7 @@
the <c>heart</c> port program is to check that the Erlang runtime system
it is supervising is still running. If the port program has not
received any heartbeats within <c>HEART_BEAT_TIMEOUT</c> seconds
- (defaults to 60 seconds), the system can be rebooted. Also, if
- the system is equipped with a hardware watchdog timer and is
- running Solaris, the watchdog can be used to supervise the entire
- system.</p>
+ (defaults to 60 seconds), the system can be rebooted.</p>
<p>An Erlang runtime system to be monitored by a heart program
is to be started with command-line flag <c>-heart</c> (see
also <seealso marker="erts:erl"><c>erl(1)</c></seealso>).
@@ -51,17 +48,13 @@
or a terminated Erlang runtime system, environment variable
<c>HEART_COMMAND</c> must be set before the system is started.
If this variable is not set, a warning text is printed but
- the system does not reboot. However, if the hardware watchdog is
- used, it still triggers a reboot <c>HEART_BEAT_BOOT_DELAY</c>
- seconds later (defaults to 60 seconds).</p>
+ the system does not reboot.</p>
<p>To reboot on Windows, <c>HEART_COMMAND</c> can be
set to <c>heart -shutdown</c> (included in the Erlang delivery)
or to any other suitable program that can activate a reboot.</p>
- <p>The hardware watchdog is not started under Solaris if
- environment variable <c>HW_WD_DISABLE</c> is set.</p>
- <p>The environment variables <c>HEART_BEAT_TIMEOUT</c> and
- <c>HEART_BEAT_BOOT_DELAY</c> can be used to configure the heart
- time-outs; they can be set in the operating system shell before Erlang
+ <p>The environment variable <c>HEART_BEAT_TIMEOUT</c>
+ can be used to configure the heart
+ time-outs; it can be set in the operating system shell before Erlang
is started or be specified at the command line:</p>
<pre>
% <input>erl -heart -env HEART_BEAT_TIMEOUT 30 ...</input></pre>
diff --git a/lib/kernel/src/heart.erl b/lib/kernel/src/heart.erl
index eea78aabdf..8fa48d56fb 100644
--- a/lib/kernel/src/heart.erl
+++ b/lib/kernel/src/heart.erl
@@ -198,16 +198,11 @@ start_portprogram() ->
end.
get_heart_timeouts() ->
- HeartOpts = case os:getenv("HEART_BEAT_TIMEOUT") of
- false -> "";
- H when is_list(H) ->
- "-ht " ++ H
- end,
- HeartOpts ++ case os:getenv("HEART_BEAT_BOOT_DELAY") of
- false -> "";
- W when is_list(W) ->
- " -wt " ++ W
- end.
+ case os:getenv("HEART_BEAT_TIMEOUT") of
+ false -> "";
+ H when is_list(H) ->
+ "-ht " ++ H
+ end.
check_start_heart() ->
case init:get_argument(heart) of
diff --git a/lib/observer/src/crashdump_viewer.erl b/lib/observer/src/crashdump_viewer.erl
index 2f9f81104a..13e73f027d 100644
--- a/lib/observer/src/crashdump_viewer.erl
+++ b/lib/observer/src/crashdump_viewer.erl
@@ -928,7 +928,10 @@ general_info(File) ->
WholeLine -> WholeLine
end,
- GI = get_general_info(Fd,#general_info{created=Created}),
+ {Slogan,SysVsn} = get_slogan_and_sysvsn(Fd,[]),
+ GI = get_general_info(Fd,#general_info{created=Created,
+ slogan=Slogan,
+ system_vsn=SysVsn}),
{MemTot,MemMax} =
case lookup_index(?memory) of
@@ -982,12 +985,20 @@ general_info(File) ->
mem_max=MemMax,
instr_info=InstrInfo}.
+get_slogan_and_sysvsn(Fd,Acc) ->
+ case val(Fd,eof) of
+ "Slogan: " ++ SloganPart when Acc==[] ->
+ get_slogan_and_sysvsn(Fd,[SloganPart]);
+ "System version: " ++ SystemVsn ->
+ {lists:append(lists:reverse(Acc)),SystemVsn};
+ eof ->
+ {lists:append(lists:reverse(Acc)),"-1"};
+ SloganPart ->
+ get_slogan_and_sysvsn(Fd,[[$\n|SloganPart]|Acc])
+ end.
+
get_general_info(Fd,GenInfo) ->
case line_head(Fd) of
- "Slogan" ->
- get_general_info(Fd,GenInfo#general_info{slogan=val(Fd)});
- "System version" ->
- get_general_info(Fd,GenInfo#general_info{system_vsn=val(Fd)});
"Compiled" ->
get_general_info(Fd,GenInfo#general_info{compile_time=val(Fd)});
"Taints" ->
diff --git a/lib/observer/src/etop.erl b/lib/observer/src/etop.erl
index fcb900960b..925f4456bb 100644
--- a/lib/observer/src/etop.erl
+++ b/lib/observer/src/etop.erl
@@ -23,7 +23,7 @@
-export([start/0, start/1, config/2, stop/0, dump/1, help/0]).
%% Internal
-export([update/1]).
--export([loadinfo/1, meminfo/2, getopt/2]).
+-export([loadinfo/2, meminfo/2, getopt/2]).
-include("etop.hrl").
-include("etop_defs.hrl").
@@ -319,18 +319,18 @@ output(graphical) -> exit({deprecated, "Use observer instead"});
output(text) -> etop_txt.
-loadinfo(SysI) ->
+loadinfo(SysI,Prev) ->
#etop_info{n_procs = Procs,
run_queue = RQ,
now = Now,
wall_clock = WC,
runtime = RT} = SysI,
- Cpu = calculate_cpu_utilization(WC,RT),
+ Cpu = calculate_cpu_utilization(WC,RT,Prev#etop_info.runtime),
Clock = io_lib:format("~2.2.0w:~2.2.0w:~2.2.0w",
tuple_to_list(element(2,calendar:now_to_datetime(Now)))),
{Cpu,Procs,RQ,Clock}.
-calculate_cpu_utilization({_,WC},{_,RT}) ->
+calculate_cpu_utilization({_,WC},{_,RT},_) ->
%% Old version of observer_backend, using statistics(wall_clock)
%% and statistics(runtime)
case {WC,RT} of
@@ -341,15 +341,23 @@ calculate_cpu_utilization({_,WC},{_,RT}) ->
_ ->
round(100*RT/WC)
end;
-calculate_cpu_utilization(_,undefined) ->
+calculate_cpu_utilization(_,undefined,_) ->
%% First time collecting - no cpu utilization has been measured
%% since scheduler_wall_time flag is not yet on
0;
-calculate_cpu_utilization(_,RTInfo) ->
+calculate_cpu_utilization(WC,RTInfo,undefined) ->
+ %% Second time collecting - RTInfo shows scheduler_wall_time since
+ %% flag was set to true. Faking previous values by setting
+ %% everything to zero.
+ ZeroRT = [{Id,0,0} || {Id,_,_} <- RTInfo],
+ calculate_cpu_utilization(WC,RTInfo,ZeroRT);
+calculate_cpu_utilization(_,RTInfo,PrevRTInfo) ->
%% New version of observer_backend, using statistics(scheduler_wall_time)
- Sum = lists:foldl(fun({_,A,T},{AAcc,TAcc}) -> {A+AAcc,T+TAcc} end,
+ Sum = lists:foldl(fun({{_, A0, T0}, {_, A1, T1}},{AAcc,TAcc}) ->
+ {(A1 - A0)+AAcc,(T1 - T0)+TAcc}
+ end,
{0,0},
- RTInfo),
+ lists:zip(PrevRTInfo,RTInfo)),
case Sum of
{0,0} ->
0;
diff --git a/lib/observer/src/etop_txt.erl b/lib/observer/src/etop_txt.erl
index 3b4c176478..6b8f9df24f 100644
--- a/lib/observer/src/etop_txt.erl
+++ b/lib/observer/src/etop_txt.erl
@@ -22,35 +22,35 @@
%%-compile(export_all).
-export([init/1,stop/1]).
--export([do_update/3]).
+-export([do_update/4]).
-include("etop.hrl").
-include("etop_defs.hrl").
--import(etop,[loadinfo/1,meminfo/2]).
+-import(etop,[loadinfo/2,meminfo/2]).
-define(PROCFORM,"~-15w~-20s~8w~8w~8w~8w ~-20s~n").
stop(Pid) -> Pid ! stop.
init(Config) ->
- loop(Config).
+ loop(#etop_info{},Config).
-loop(Config) ->
- Info = do_update(Config),
+loop(Prev,Config) ->
+ Info = do_update(Prev,Config),
receive
stop -> stopped;
- {dump,Fd} -> do_update(Fd,Info,Config), loop(Config);
- {config,_,Config1} -> loop(Config1)
- after Config#opts.intv -> loop(Config)
+ {dump,Fd} -> do_update(Fd,Info,Prev,Config), loop(Info,Config);
+ {config,_,Config1} -> loop(Info,Config1)
+ after Config#opts.intv -> loop(Info,Config)
end.
-do_update(Config) ->
+do_update(Prev,Config) ->
Info = etop:update(Config),
- do_update(standard_io,Info,Config).
+ do_update(standard_io,Info,Prev,Config).
-do_update(Fd,Info,Config) ->
- {Cpu,NProcs,RQ,Clock} = loadinfo(Info),
+do_update(Fd,Info,Prev,Config) ->
+ {Cpu,NProcs,RQ,Clock} = loadinfo(Info,Prev),
io:nl(Fd),
writedoubleline(Fd),
case Info#etop_info.memi of
diff --git a/lib/observer/src/observer_lib.erl b/lib/observer/src/observer_lib.erl
index 59a2f9f205..1eaba31a3a 100644
--- a/lib/observer/src/observer_lib.erl
+++ b/lib/observer/src/observer_lib.erl
@@ -461,14 +461,16 @@ create_box(Parent, Data) ->
link_entry(Panel,Value);
_ ->
Value = to_str(Value0),
- case length(Value) > 100 of
- true ->
- Shown = lists:sublist(Value, 80),
+ case string:sub_word(lists:sublist(Value, 80),1,$\n) of
+ Value ->
+ %% Short string, no newlines - show all
+ wxStaticText:new(Panel, ?wxID_ANY, Value);
+ Shown ->
+ %% Long or with newlines,
+ %% use tooltip to show all
TCtrl = wxStaticText:new(Panel, ?wxID_ANY, [Shown,"..."]),
wxWindow:setToolTip(TCtrl,wxToolTip:new(Value)),
- TCtrl;
- false ->
- wxStaticText:new(Panel, ?wxID_ANY, Value)
+ TCtrl
end
end,
wxSizer:add(Line, 10, 0), % space of size 10 horisontally
diff --git a/lib/observer/src/observer_pro_wx.erl b/lib/observer/src/observer_pro_wx.erl
index ee6829b847..f07b9e295a 100644
--- a/lib/observer/src/observer_pro_wx.erl
+++ b/lib/observer/src/observer_pro_wx.erl
@@ -511,7 +511,13 @@ table_holder(#holder{info=Info, attrs=Attrs,
table_holder(S0);
{dump, Fd} ->
EtopInfo = (S0#holder.etop)#etop_info{procinfo=array:to_list(Info)},
- etop_txt:do_update(Fd, EtopInfo, #opts{node=Node}),
+ %% The empty #etop_info{} below is a dummy previous info
+ %% value. It is used by etop to calculate the scheduler
+ %% utilization since last update. When dumping to file,
+ %% there is no previous measurement to use, so we just add
+ %% a dummy here, and the value shown will be since the
+ %% tool was started.
+ etop_txt:do_update(Fd, EtopInfo, #etop_info{}, #opts{node=Node}),
file:close(Fd),
table_holder(S0);
stop ->
diff --git a/lib/runtime_tools/src/observer_backend.erl b/lib/runtime_tools/src/observer_backend.erl
index e943fb4a3e..b27bc63d15 100644
--- a/lib/runtime_tools/src/observer_backend.erl
+++ b/lib/runtime_tools/src/observer_backend.erl
@@ -314,13 +314,12 @@ etop_collect(Collector) ->
case SchedulerWallTime of
undefined ->
- spawn(fun() -> flag_holder_proc(Collector) end),
+ erlang:system_flag(scheduler_wall_time,true),
+ spawn(fun() -> flag_holder_proc(Collector) end),
ok;
_ ->
ok
- end,
-
- erlang:system_flag(scheduler_wall_time,true).
+ end.
flag_holder_proc(Collector) ->
Ref = erlang:monitor(process,Collector),
diff --git a/lib/sasl/test/release_handler_SUITE_data/start b/lib/sasl/test/release_handler_SUITE_data/start
index 87275045b1..eab2b77aed 100755
--- a/lib/sasl/test/release_handler_SUITE_data/start
+++ b/lib/sasl/test/release_handler_SUITE_data/start
@@ -21,8 +21,7 @@ then
fi
HEART_COMMAND=$ROOTDIR/bin/start
-HW_WD_DISABLE=true
-export HW_WD_DISABLE HEART_COMMAND
+export HEART_COMMAND
START_ERL_DATA=${1:-$RELDIR/start_erl.data}
diff --git a/lib/sasl/test/release_handler_SUITE_data/start_client b/lib/sasl/test/release_handler_SUITE_data/start_client
index 5ea94d6f7c..05d744f06e 100755
--- a/lib/sasl/test/release_handler_SUITE_data/start_client
+++ b/lib/sasl/test/release_handler_SUITE_data/start_client
@@ -24,8 +24,7 @@ RELDIR=$CLIENTDIR/releases
# Note that this scripts is modified an copied to $CLIENTDIR/bin/start
# in release_handler_SUITE:copy_client - therefore HEART_COMMAND is as follows:
HEART_COMMAND=$CLIENTDIR/bin/start
-HW_WD_DISABLE=true
-export HW_WD_DISABLE HEART_COMMAND
+export HEART_COMMAND
START_ERL_DATA=${1:-$RELDIR/start_erl.data}
diff --git a/lib/stdlib/test/shell_SUITE.erl b/lib/stdlib/test/shell_SUITE.erl
index c409a6949b..80585ca359 100644
--- a/lib/stdlib/test/shell_SUITE.erl
+++ b/lib/stdlib/test/shell_SUITE.erl
@@ -2325,7 +2325,7 @@ otp_6554(Config) when is_list(Config) ->
"[unproper | list]).">>),
%% Cheating:
"exception error: no function clause matching "
- "erl_eval:do_apply(4)" ++ _ =
+ "shell:apply_fun(4)" ++ _ =
comm_err(<<"erlang:error(function_clause, [4]).">>),
"exception error: no function clause matching "
"lists:reverse(" ++ _ =
diff --git a/system/doc/efficiency_guide/advanced.xml b/system/doc/efficiency_guide/advanced.xml
index eee2648f34..e1760d0ded 100644
--- a/system/doc/efficiency_guide/advanced.xml
+++ b/system/doc/efficiency_guide/advanced.xml
@@ -87,15 +87,15 @@
</row>
<row>
<cell>Small Map</cell>
- <cell>4 words + 2 words per entry (key and value) + the size of each key and value pair.</cell>
+ <cell>5 words + the size of all keys and values.</cell>
</row>
<row>
- <cell>Large Map</cell>
+ <cell>Large Map (> 32 keys)</cell>
<cell>
- At least, 2 words + 2 x <c>N</c> words + 2 x log16(<c>N</c>) words +
- the size of each key and value pair, where <c>N</c> is the number of pairs in the Map.
- A large Map is represented as a tree internally where each node in the tree is a
- "sparse tuple" of arity 16.
+ <c>N</c> x <c>F</c> words + the size of all keys and values.<br></br>
+ <c>N</c> is the number of keys in the Map.<br></br>
+ <c>F</c> is a sparsity factor that can vary between 1.6 and 1.8
+ due to the probabilistic nature of the internal HAMT data structure.
</cell>
</row>
<row>
diff --git a/system/doc/tutorial/c_portdriver.xmlsrc b/system/doc/tutorial/c_portdriver.xmlsrc
index 933e2395a3..da680642b6 100644
--- a/system/doc/tutorial/c_portdriver.xmlsrc
+++ b/system/doc/tutorial/c_portdriver.xmlsrc
@@ -161,8 +161,8 @@ decode([Int]) -> Int.</pre>
<title>Running the Example</title>
<p><em>Step 1.</em> Compile the C code:</p>
<pre>
-unix> <input>gcc -o exampledrv -fpic -shared complex.c port_driver.c</input>
-windows> <input>cl -LD -MD -Fe exampledrv.dll complex.c port_driver.c</input></pre>
+unix> <input>gcc -o example_drv.so -fpic -shared complex.c port_driver.c</input>
+windows> <input>cl -LD -MD -Fe example_drv.dll complex.c port_driver.c</input></pre>
<p><em>Step 2.</em> Start Erlang and compile the Erlang code:</p>
<pre>
> <input>erl</input>