aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/beam_bif_load.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/beam_bif_load.c')
-rw-r--r--erts/emulator/beam/beam_bif_load.c1534
1 files changed, 1265 insertions, 269 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 0e192b1ebd..b664532c1c 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -37,13 +37,93 @@
#include "erl_bits.h"
#include "erl_thr_progress.h"
+#ifdef HIPE
+# include "hipe_stack.h"
+#endif
+
+static struct {
+ Eterm module;
+ erts_smp_mtx_t mtx;
+ Export *pending_purge_lambda;
+ Eterm *sprocs;
+ Eterm def_sprocs[10];
+ Uint sp_size;
+ Uint sp_ix;
+ ErlFunEntry **funs;
+ ErlFunEntry *def_funs[10];
+ Uint fe_size;
+ Uint fe_ix;
+ struct erl_module_instance saved_old;
+} purge_state;
+
+Process *erts_code_purger = NULL;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+Process *erts_dirty_process_code_checker;
+#endif
+erts_smp_atomic_t erts_copy_literal_area__;
+#define ERTS_SET_COPY_LITERAL_AREA(LA) \
+ erts_smp_atomic_set_nob(&erts_copy_literal_area__, \
+ (erts_aint_t) (LA))
+#ifdef ERTS_NEW_PURGE_STRATEGY
+Process *erts_literal_area_collector = NULL;
+
+typedef struct ErtsLiteralAreaRef_ ErtsLiteralAreaRef;
+struct ErtsLiteralAreaRef_ {
+ ErtsLiteralAreaRef *next;
+ ErtsLiteralArea *literal_area;
+};
+
+struct {
+ erts_smp_mtx_t mtx;
+ ErtsLiteralAreaRef *first;
+ ErtsLiteralAreaRef *last;
+} release_literal_areas;
+#endif
+
static void set_default_trace_pattern(Eterm module);
-static Eterm check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp);
+static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls);
static void delete_code(Module* modp);
-static void decrement_refc(BeamInstr* code);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
+static void
+init_purge_state(void)
+{
+ purge_state.module = THE_NON_VALUE;
+
+ erts_smp_mtx_init(&purge_state.mtx, "purge_state");
+
+ purge_state.pending_purge_lambda =
+ erts_export_put(am_erts_code_purger, am_pending_purge_lambda, 3);
+
+ purge_state.sprocs = &purge_state.def_sprocs[0];
+ purge_state.sp_size = sizeof(purge_state.def_sprocs);
+ purge_state.sp_size /= sizeof(purge_state.def_sprocs[0]);
+ purge_state.sp_ix = 0;
+
+ purge_state.funs = &purge_state.def_funs[0];
+ purge_state.fe_size = sizeof(purge_state.def_funs);
+ purge_state.fe_size /= sizeof(purge_state.def_funs[0]);
+ purge_state.fe_ix = 0;
+
+ purge_state.saved_old.code_hdr = 0;
+}
+
+void
+erts_beam_bif_load_init(void)
+{
+#ifdef ERTS_NEW_PURGE_STRATEGY
+ erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas");
+ release_literal_areas.first = NULL;
+ release_literal_areas.last = NULL;
+#endif
+ erts_smp_atomic_init_nob(&erts_copy_literal_area__,
+ (erts_aint_t) NULL);
+
+ init_purge_state();
+}
+
BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
{
Module* modp;
@@ -58,8 +138,8 @@ BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
return am_undefined;
}
erts_rlock_old_code(code_ix);
- res = (erts_is_module_native(modp->curr.code) ||
- erts_is_module_native(modp->old.code)) ?
+ res = (erts_is_module_native(modp->curr.code_hdr) ||
+ erts_is_module_native(modp->old.code_hdr)) ?
am_true : am_false;
erts_runlock_old_code(code_ix);
return res;
@@ -81,12 +161,12 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
if (modp && modp->curr.num_breakpoints > 0) {
- ASSERT(modp->curr.code != NULL);
+ ASSERT(modp->curr.code_hdr != NULL);
erts_clear_module_break(modp);
ASSERT(modp->curr.num_breakpoints == 0);
}
- erts_start_staging_code_ix();
+ erts_start_staging_code_ix(1);
res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
@@ -139,6 +219,25 @@ prepare_loading_2(BIF_ALIST_2)
BIF_RET(res);
}
+BIF_RETTYPE
+has_prepared_code_on_load_1(BIF_ALIST_1)
+{
+ Eterm res;
+ ProcBin* pb;
+
+ if (!ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_1)) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ pb = (ProcBin*) binary_val(BIF_ARG_1);
+ res = erts_has_code_on_load(pb->val);
+ if (res == NIL) {
+ goto error;
+ }
+ BIF_RET(res);
+}
+
struct m {
Binary* code;
Eterm module;
@@ -154,7 +253,7 @@ static struct /* Protected by code_write_permission */
{
Process* stager;
ErtsThrPrgrLaterOp lop;
-}commiter_state;
+} committer_state;
#endif
static Eterm
@@ -163,14 +262,13 @@ exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
Eterm* hp = HAlloc(p, 3 + 2*exceptions);
Eterm res = NIL;
- mp += exceptions - 1;
while (exceptions > 0) {
if (mp->exception) {
res = CONS(hp, mp->module, res);
hp += 2;
exceptions--;
}
- mp--;
+ mp++;
}
return TUPLE2(hp, tag, res);
}
@@ -179,8 +277,8 @@ exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
BIF_RETTYPE
finish_loading_1(BIF_ALIST_1)
{
- int i;
- int n;
+ Sint i;
+ Sint n;
struct m* p = NULL;
Uint exceptions;
Eterm res;
@@ -201,9 +299,13 @@ finish_loading_1(BIF_ALIST_1)
*/
n = erts_list_length(BIF_ARG_1);
- if (n == -1) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
- goto done;
+ if (n < 0) {
+ badarg:
+ if (p) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, p);
+ }
+ erts_release_code_write_permission();
+ BIF_ERROR(BIF_P, BADARG);
}
p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m));
@@ -218,29 +320,32 @@ finish_loading_1(BIF_ALIST_1)
ProcBin* pb;
if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
- goto done;
+ goto badarg;
}
pb = (ProcBin*) binary_val(term);
p[i].code = pb->val;
p[i].module = erts_module_for_prepared_code(p[i].code);
if (p[i].module == NIL) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
- goto done;
+ goto badarg;
}
BIF_ARG_1 = CDR(cons);
}
/*
* Since we cannot handle atomic loading of a group of modules
- * if one or more of them uses on_load, we will only allow one
- * element in the list. This limitation is intended to be
- * lifted in the future.
+ * if one or more of them uses on_load, we will only allow
+ * more than one element in the list if none of the modules
+ * have an on_load function.
*/
if (n > 1) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT);
- goto done;
+ for (i = 0; i < n; i++) {
+ if (erts_has_code_on_load(p[i].code) == am_true) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, p);
+ erts_release_code_write_permission();
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
+ }
}
/*
@@ -252,11 +357,27 @@ finish_loading_1(BIF_ALIST_1)
*/
res = am_ok;
- erts_start_staging_code_ix();
+ erts_start_staging_code_ix(n);
for (i = 0; i < n; i++) {
p[i].modp = erts_put_module(p[i].module);
+ p[i].modp->seen = 0;
}
+
+ exceptions = 0;
+ for (i = 0; i < n; i++) {
+ p[i].exception = 0;
+ if (p[i].modp->seen) {
+ p[i].exception = 1;
+ exceptions++;
+ }
+ p[i].modp->seen = 1;
+ }
+ if (exceptions) {
+ res = exception_list(BIF_P, am_duplicated, p, exceptions);
+ goto done;
+ }
+
for (i = 0; i < n; i++) {
if (p[i].modp->curr.num_breakpoints > 0 ||
p[i].modp->curr.num_traced_exports > 0 ||
@@ -281,7 +402,7 @@ finish_loading_1(BIF_ALIST_1)
exceptions = 0;
for (i = 0; i < n; i++) {
p[i].exception = 0;
- if (p[i].modp->curr.code && p[i].modp->old.code) {
+ if (p[i].modp->curr.code_hdr && p[i].modp->old.code_hdr) {
p[i].exception = 1;
exceptions++;
}
@@ -367,9 +488,9 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
* schedulers to read active code_ix in a safe way while executing
* without any memory barriers at all.
*/
- ASSERT(commiter_state.stager == NULL);
- commiter_state.stager = c_p;
- erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &commiter_state.lop);
+ ASSERT(committer_state.stager == NULL);
+ committer_state.stager = c_p;
+ erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &committer_state.lop);
erts_proc_inc_refc(c_p);
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
/*
@@ -385,11 +506,11 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
#ifdef ERTS_SMP
static void smp_code_ix_commiter(void* null)
{
- Process* p = commiter_state.stager;
+ Process* p = committer_state.stager;
erts_commit_staging_code_ix();
#ifdef DEBUG
- commiter_state.stager = NULL;
+ committer_state.stager = NULL;
#endif
erts_release_code_write_permission();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
@@ -417,7 +538,7 @@ check_old_code_1(BIF_ALIST_1)
modp = erts_get_module(BIF_ARG_1, code_ix);
if (modp != NULL) {
erts_rlock_old_code(code_ix);
- if (modp->old.code != NULL) {
+ if (modp->old.code_hdr) {
res = am_true;
}
erts_runlock_old_code(code_ix);
@@ -426,7 +547,7 @@ check_old_code_1(BIF_ALIST_1)
}
Eterm
-erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp)
+erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls)
{
Module* modp;
Eterm res;
@@ -441,7 +562,8 @@ erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp)
if (!modp)
return am_false;
erts_rlock_old_code(code_ix);
- res = modp->old.code ? check_process_code(c_p, modp, allow_gc, redsp) : am_false;
+ res = (!modp->old.code_hdr ? am_false :
+ check_process_code(c_p, modp, flags, redsp, fcalls));
erts_runlock_old_code(code_ix);
return res;
@@ -450,49 +572,21 @@ erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp)
BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2)
{
int reds = 0;
+ Uint flags;
Eterm res;
- Eterm olist = BIF_ARG_2;
- int allow_gc = 1;
if (is_not_atom(BIF_ARG_1))
goto badarg;
- while (is_list(olist)) {
- Eterm *lp = list_val(olist);
- Eterm opt = CAR(lp);
- if (is_tuple(opt)) {
- Eterm* tp = tuple_val(opt);
- switch (arityval(tp[0])) {
- case 2:
- switch (tp[1]) {
- case am_allow_gc:
- switch (tp[2]) {
- case am_false:
- allow_gc = 0;
- break;
- case am_true:
- allow_gc = 1;
- break;
- default:
- goto badarg;
- }
- break;
- default:
- goto badarg;
- }
- break;
- default:
- goto badarg;
- }
- }
- else
- goto badarg;
- olist = CDR(lp);
+ if (is_not_small(BIF_ARG_2))
+ goto badarg;
+
+ flags = unsigned_val(BIF_ARG_2);
+ if (flags & ~ERTS_CPC_ALL) {
+ goto badarg;
}
- if (is_not_nil(olist))
- goto badarg;
- res = erts_check_process_code(BIF_P, BIF_ARG_1, allow_gc, &reds);
+ res = erts_check_process_code(BIF_P, BIF_ARG_1, flags, &reds, BIF_P->fcalls);
ASSERT(is_value(res));
@@ -502,6 +596,43 @@ badarg:
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
+{
+#if !defined(ERTS_DIRTY_SCHEDULERS)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+#else
+ Process *rp;
+ int reds = 0;
+ Eterm res;
+
+ if (BIF_P != erts_dirty_process_code_checker)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (is_not_atom(BIF_ARG_2))
+ BIF_ERROR(BIF_P, BADARG);
+
+ rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ if (!rp)
+ BIF_RET(am_false);
+
+ res = erts_check_process_code(rp, BIF_ARG_2, 0, &reds, BIF_P->fcalls);
+
+ if (BIF_P != rp)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(is_value(res));
+
+ BIF_RET2(res, reds);
+#endif
+}
+
BIF_RETTYPE delete_module_1(BIF_ALIST_1)
{
ErtsCodeIndex code_ix;
@@ -519,15 +650,15 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
{
- erts_start_staging_code_ix();
+ erts_start_staging_code_ix(0);
code_ix = erts_staging_code_ix();
modp = erts_get_module(BIF_ARG_1, code_ix);
if (!modp) {
res = am_undefined;
}
- else if (modp->old.code != 0) {
+ else if (modp->old.code_hdr) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Module %T must be purged before loading\n",
+ erts_dsprintf(dsbufp, "Module %T must be purged before deleting\n",
BIF_ARG_1);
erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
@@ -563,8 +694,8 @@ BIF_RETTYPE module_loaded_1(BIF_ALIST_1)
}
code_ix = erts_active_code_ix();
if ((modp = erts_get_module(BIF_ARG_1, code_ix)) != NULL) {
- if (modp->curr.code != NULL
- && modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] == 0) {
+ if (modp->curr.code_hdr
+ && modp->curr.code_hdr->on_load_function_ptr == NULL) {
res = am_true;
}
}
@@ -611,10 +742,13 @@ BIF_RETTYPE call_on_load_function_1(BIF_ALIST_1)
{
Module* modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
- if (modp && modp->curr.code) {
- BIF_TRAP_CODE_PTR_0(BIF_P, modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]);
+ if (!modp || !modp->on_load) {
+ BIF_ERROR(BIF_P, BADARG);
}
- else {
+ if (modp->on_load->code_hdr) {
+ BIF_TRAP_CODE_PTR_0(BIF_P,
+ modp->on_load->code_hdr->on_load_function_ptr);
+ } else {
BIF_ERROR(BIF_P, BADARG);
}
}
@@ -623,7 +757,6 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
{
ErtsCodeIndex code_ix;
Module* modp;
- Eterm on_load;
if (!erts_try_seize_code_write_permission(BIF_P)) {
ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2],
@@ -638,14 +771,14 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
code_ix = erts_active_code_ix();
modp = erts_get_module(BIF_ARG_1, code_ix);
- if (!modp || modp->curr.code == 0) {
+ if (!modp || !modp->on_load || !modp->on_load->code_hdr) {
error:
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
BIF_ERROR(BIF_P, BADARG);
}
- if ((on_load = modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
+ if (modp->on_load->code_hdr->on_load_function_ptr == NULL) {
goto error;
}
if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
@@ -653,42 +786,61 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
}
if (BIF_ARG_2 == am_true) {
- int i;
+ int i, num_exps;
+
+ /*
+ * Make the code with the on_load function current.
+ */
+
+ if (modp->curr.code_hdr) {
+ modp->old = modp->curr;
+ }
+ modp->curr = *modp->on_load;
+ erts_free(ERTS_ALC_T_PREPARED_CODE, modp->on_load);
+ modp->on_load = 0;
/*
* The on_load function succeded. Fix up export entries.
*/
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
- if (ep != NULL &&
- ep->code[0] == BIF_ARG_1 &&
- ep->code[4] != 0) {
+ if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ continue;
+ }
+ if (ep->code[4] != 0) {
ep->addressv[code_ix] = (void *) ep->code[4];
ep->code[4] = 0;
+ } else {
+ if (ep->addressv[code_ix] == ep->code+3 &&
+ ep->code[3] == (BeamInstr) em_apply_bif) {
+ continue;
+ }
+ ep->addressv[code_ix] = ep->code+3;
+ ep->code[3] = (BeamInstr) em_call_error_handler;
}
}
- modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] = 0;
+ modp->curr.code_hdr->on_load_function_ptr = NULL;
set_default_trace_pattern(BIF_ARG_1);
} else if (BIF_ARG_2 == am_false) {
- BeamInstr* code;
- BeamInstr* end;
+ int i, num_exps;
/*
- * The on_load function failed. Remove the loaded code.
- * This is an combination of delete and purge. We purge
- * the current code; the old code is not touched.
+ * The on_load function failed. Remove references to the
+ * code that is about to be purged from the export entries.
*/
- erts_total_code_size -= modp->curr.code_length;
- code = modp->curr.code;
- end = (BeamInstr *)((char *)code + modp->curr.code_length);
- erts_cleanup_funs_on_purge(code, end);
- beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length,
- erts_active_code_ix());
- erts_free(ERTS_ALC_T_CODE, (void *) code);
- modp->curr.code = NULL;
- modp->curr.code_length = 0;
- modp->curr.catches = BEAM_CATCHES_NIL;
- erts_remove_from_ranges(code);
+
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
+ Export *ep = export_list(i,code_ix);
+ if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ continue;
+ }
+ if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ continue;
+ }
+ ep->code[4] = 0;
+ }
}
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
@@ -703,13 +855,13 @@ set_default_trace_pattern(Eterm module)
Binary *match_spec;
Binary *meta_match_spec;
struct trace_pattern_flags trace_pattern_flags;
- Eterm meta_tracer_pid;
+ ErtsTracer meta_tracer;
erts_get_default_trace_pattern(&trace_pattern_is_on,
&match_spec,
&meta_match_spec,
&trace_pattern_flags,
- &meta_tracer_pid);
+ &meta_tracer);
if (trace_pattern_is_on) {
Eterm mfa[1];
mfa[0] = module;
@@ -717,43 +869,334 @@ set_default_trace_pattern(Eterm module)
match_spec,
meta_match_spec,
1, trace_pattern_flags,
- meta_tracer_pid, 1);
+ meta_tracer, 1);
+ }
+}
+
+#ifndef ERTS_NEW_PURGE_STRATEGY
+
+static ERTS_INLINE int
+check_mod_funs(Process *p, ErlOffHeap *off_heap, char *area, size_t area_size)
+{
+ struct erl_off_heap_header* oh;
+ for (oh = off_heap->first; oh; oh = oh->next) {
+ if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
+ ErlFunThing* funp = (ErlFunThing*) oh;
+ if (ErtsInArea(funp->fe->address, area, area_size))
+ return !0;
+ }
+ }
+ return 0;
+}
+
+#endif
+
+static Uint hfrag_literal_size(Eterm* start, Eterm* end,
+ char* lit_start, Uint lit_size);
+static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
+ Eterm *start, Eterm *end,
+ char *lit_start, Uint lit_size);
+
+#ifdef ERTS_NEW_PURGE_STRATEGY
+
+Eterm
+erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed)
+{
+ ErtsLiteralArea *la;
+ ErtsMessage *msgp;
+ struct erl_off_heap_header* oh;
+ char *literals;
+ Uint lit_bsize;
+ ErlHeapFragment *hfrag;
+
+ la = ERTS_COPY_LITERAL_AREA();
+ if (!la)
+ return am_ok;
+
+ oh = la->off_heap;
+ literals = (char *) &la->start[0];
+ lit_bsize = (char *) la->end - literals;
+
+ /*
+ * If a literal is in the message queue we make an explicit copy of
+ * it and attach it to the heap fragment. Each message needs to be
+ * self contained, we cannot save the literal in the old_heap or
+ * any other heap than the message it self.
+ */
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+
+ for (msgp = c_p->msg.first; msgp; msgp = msgp->next) {
+ ErlHeapFragment *hf;
+ Uint lit_sz = 0;
+
+ *redsp += 1;
+
+ if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ hfrag = &msgp->hfrag;
+ else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag)
+ hfrag = msgp->data.heap_frag;
+ else
+ continue; /* Content on heap or in external term format... */
+
+ for (hf = hfrag; hf; hf = hf->next) {
+ lit_sz += hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ *redsp += 1;
+ }
+
+ *redsp += lit_sz / 16; /* Better value needed... */
+ if (lit_sz > 0) {
+ ErlHeapFragment *bp = new_message_buffer(lit_sz);
+ Eterm *hp = bp->mem;
+
+ for (hf = hfrag; hf; hf = hf->next) {
+ hfrag_literal_copy(&hp, &bp->off_heap,
+ &hf->mem[0], &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ hfrag = hf;
+ }
+
+ /* link new hfrag last */
+ ASSERT(hfrag->next == NULL);
+ hfrag->next = bp;
+ bp->next = NULL;
+ }
+ }
+
+ if (gc_allowed) {
+ /*
+ * Current implementation first tests without
+ * allowing GC, and then restarts the operation
+ * allowing GC if it is needed. It is therfore
+ * very likely that we will need the GC (although
+ * this is not completely certain). We go for
+ * the GC directly instead of scanning everything
+ * one more time...
+ */
+ goto literal_gc;
+ }
+
+ *redsp += 2;
+ if (any_heap_ref_ptrs(&c_p->fvalue, &c_p->fvalue+1, literals, lit_bsize)) {
+ c_p->freason = EXC_NULL;
+ c_p->fvalue = NIL;
+ c_p->ftrace = NIL;
+ }
+
+ if (any_heap_ref_ptrs(c_p->stop, c_p->hend, literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+#ifdef HIPE
+ if (nstack_any_heap_ref_ptrs(c_p, literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+#endif
+ if (any_heap_refs(c_p->heap, c_p->htop, literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+ if (c_p->abandoned_heap) {
+ if (any_heap_refs(c_p->abandoned_heap, c_p->abandoned_heap + c_p->heap_sz,
+ literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+ }
+ if (any_heap_refs(c_p->old_heap, c_p->old_htop, literals, lit_bsize))
+ goto literal_gc;
+
+ /* Check dictionary */
+ *redsp += 1;
+ if (c_p->dictionary) {
+ Eterm* start = ERTS_PD_START(c_p->dictionary);
+ Eterm* end = start + ERTS_PD_SIZE(c_p->dictionary);
+
+ if (any_heap_ref_ptrs(start, end, literals, lit_bsize))
+ goto literal_gc;
+ }
+
+ /* Check heap fragments */
+ for (hfrag = c_p->mbuf; hfrag; hfrag = hfrag->next) {
+ Eterm *hp, *hp_end;
+
+ *redsp += 1;
+
+ hp = &hfrag->mem[0];
+ hp_end = &hfrag->mem[hfrag->used_size];
+ if (any_heap_refs(hp, hp_end, literals, lit_bsize))
+ goto literal_gc;
+ }
+
+ /*
+ * Message buffer fragments (matched messages)
+ * - off heap lists should already have been moved into
+ * process off heap structure.
+ * - Check for literals
+ */
+ for (msgp = c_p->msg_frag; msgp; msgp = msgp->next) {
+ hfrag = erts_message_to_heap_frag(msgp);
+ for (; hfrag; hfrag = hfrag->next) {
+ Eterm *hp, *hp_end;
+
+ *redsp += 1;
+
+ hp = &hfrag->mem[0];
+ hp_end = &hfrag->mem[hfrag->used_size];
+
+ if (any_heap_refs(hp, hp_end, literals, lit_bsize))
+ goto literal_gc;
+ }
+ }
+
+ return am_ok;
+
+literal_gc: {
+ int hibernated = !!(c_p->flags & F_HIBERNATED);
+ int gc_cost;
+
+ if (!gc_allowed)
+ return am_need_gc;
+
+ if (c_p->flags & F_DISABLE_GC)
+ return THE_NON_VALUE;
+
+ FLAGS(c_p) |= F_NEED_FULLSWEEP;
+
+ gc_cost = erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg, c_p->arity, fcalls);
+ *redsp += gc_cost;
+
+ erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize, oh);
+
+ *redsp += lit_bsize / 64; /* Need, better value... */
+
+ if (hibernated) {
+ erts_garbage_collect_hibernate(c_p);
+ *redsp += gc_cost;
+ }
+
+ return am_ok;
}
}
static Eterm
-check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp)
+check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
{
BeamInstr* start;
char* mod_start;
Uint mod_size;
- BeamInstr* end;
Eterm* sp;
- struct erl_off_heap_header* oh;
+
+ *redsp += 1;
+
+ /*
+ * Pick up limits for the module.
+ */
+ start = (BeamInstr*) modp->old.code_hdr;
+ mod_start = (char *) start;
+ mod_size = modp->old.code_length;
+
+ /*
+ * Check if current instruction or continuation pointer points into module.
+ */
+ if (ErtsInArea(rp->i, mod_start, mod_size)
+ || ErtsInArea(rp->cp, mod_start, mod_size)) {
+ return am_true;
+ }
+
+ *redsp += 1;
+
+ if (erts_check_nif_export_in_area(rp, mod_start, mod_size))
+ return am_true;
+
+ *redsp += (STACK_START(rp) - rp->stop) / 32;
+
+ /*
+ * Check all continuation pointers stored on the stack.
+ */
+ for (sp = rp->stop; sp < STACK_START(rp); sp++) {
+ if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) {
+ return am_true;
+ }
+ }
+
+ /*
+ * Check all continuation pointers stored in stackdump
+ * and clear exception stackdump if there is a pointer
+ * to the module.
+ */
+ if (rp->ftrace != NIL) {
+ struct StackTrace *s;
+ ASSERT(is_list(rp->ftrace));
+ s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace)));
+ if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) ||
+ (s->current && ErtsInArea(s->current, mod_start, mod_size))) {
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ } else {
+ int i;
+ for (i = 0; i < s->depth; i++) {
+ if (ErtsInArea(s->trace[i], mod_start, mod_size)) {
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ break;
+ }
+ }
+ }
+ }
+
+ return am_false;
+}
+
+#else /* !ERTS_NEW_PURGE_STRATEGY, i.e, old style purge... */
+
+static Eterm
+check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
+{
+ BeamInstr* start;
+ char* literals;
+ Uint lit_bsize;
+ char* mod_start;
+ Uint mod_size;
+ Eterm* sp;
int done_gc = 0;
+ int need_gc = 0;
+ int hibernated = !!(rp->flags & F_HIBERNATED);
+ int gc_cost = 0;
+ ErtsMessage *msgp;
+ ErlHeapFragment *hfrag;
-#define INSIDE(a) (start <= (a) && (a) < end)
+#define ERTS_ORDINARY_GC__ (1 << 0)
+#define ERTS_LITERAL_GC__ (1 << 1)
/*
* Pick up limits for the module.
*/
- start = modp->old.code;
- end = (BeamInstr *)((char *)start + modp->old.code_length);
+ start = (BeamInstr*) modp->old.code_hdr;
mod_start = (char *) start;
mod_size = modp->old.code_length;
/*
* Check if current instruction or continuation pointer points into module.
*/
- if (INSIDE(rp->i) || INSIDE(rp->cp)) {
+ if (ErtsInArea(rp->i, mod_start, mod_size)
+ || ErtsInArea(rp->cp, mod_start, mod_size)) {
return am_true;
}
+
+ *redsp += 1;
+
+ if (erts_check_nif_export_in_area(rp, mod_start, mod_size))
+ return am_true;
+
/*
* Check all continuation pointers stored on the stack.
*/
for (sp = rp->stop; sp < STACK_START(rp); sp++) {
- if (is_CP(*sp) && INSIDE(cp_val(*sp))) {
+ if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) {
return am_true;
}
}
@@ -767,15 +1210,15 @@ check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp)
struct StackTrace *s;
ASSERT(is_list(rp->ftrace));
s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace)));
- if ((s->pc && INSIDE(s->pc)) ||
- (s->current && INSIDE(s->current))) {
+ if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) ||
+ (s->current && ErtsInArea(s->current, mod_start, mod_size))) {
rp->freason = EXC_NULL;
rp->fvalue = NIL;
rp->ftrace = NIL;
} else {
int i;
for (i = 0; i < s->depth; i++) {
- if (INSIDE(s->trace[i])) {
+ if (ErtsInArea(s->trace[i], mod_start, mod_size)) {
rp->freason = EXC_NULL;
rp->fvalue = NIL;
rp->ftrace = NIL;
@@ -796,111 +1239,187 @@ check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp)
}
/*
- * See if there are funs that refer to the old version of the module.
+ * Message queue can contains funs, and may contain
+ * literals. If we got references to this module from the message
+ * queue.
+ *
+ * If a literal is in the message queue we maka an explicit copy of
+ * and attach it to the heap fragment. Each message needs to be
+ * self contained, we cannot save the literal in the old_heap or
+ * any other heap than the message it self.
*/
- rescan:
- for (oh = MSO(rp).first; oh; oh = oh->next) {
- if (thing_subtag(oh->thing_word) == FUN_SUBTAG) {
- ErlFunThing* funp = (ErlFunThing*) oh;
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
- if (INSIDE((BeamInstr *) funp->fe->address)) {
- if (done_gc) {
- return am_true;
- } else {
- if (!allow_gc)
- return am_aborted;
- /*
- * Try to get rid of this fun by garbage collecting.
- * Clear both fvalue and ftrace to make sure they
- * don't hold any funs.
- */
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- done_gc = 1;
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
- goto rescan;
- }
- }
- }
+ if (modp->old.code_hdr->literal_area) {
+ literals = (char*) modp->old.code_hdr->literal_area->start;
+ lit_bsize = (char*) modp->old.code_hdr->literal_area->end - literals;
+ }
+ else {
+ literals = NULL;
+ lit_bsize = 0;
}
- /*
- * See if there are constants inside the module referenced by the process.
- */
- done_gc = 0;
- for (;;) {
- ErlMessage* mp;
+ for (msgp = rp->msg.first; msgp; msgp = msgp->next) {
+ if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
+ hfrag = &msgp->hfrag;
+ else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag)
+ hfrag = msgp->data.heap_frag;
+ else
+ continue;
+ {
+ ErlHeapFragment *hf;
+ Uint lit_sz;
+ for (hf=hfrag; hf; hf = hf->next) {
+ if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size))
+ return am_true;
+ lit_sz = hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ }
+ if (lit_sz > 0) {
+ ErlHeapFragment *bp = new_message_buffer(lit_sz);
+ Eterm *hp = bp->mem;
+
+ for (hf=hfrag; hf; hf = hf->next) {
+ hfrag_literal_copy(&hp, &bp->off_heap,
+ &hf->mem[0], &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ hfrag=hf;
+ }
+ /* link new hfrag last */
+ ASSERT(hfrag->next == NULL);
+ hfrag->next = bp;
+ bp->next = NULL;
+ }
+ }
+ }
+
+ while (1) {
- if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, mod_start, mod_size)) {
+ /* Check heap, stack etc... */
+ if (check_mod_funs(rp, &rp->off_heap, mod_start, mod_size)) {
+ if (hibernated) {
+ /* GC wont help; everything on heap is live... */
+ return am_true;
+ }
+ goto try_gc;
+ }
+ if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, literals, lit_bsize)) {
rp->freason = EXC_NULL;
rp->fvalue = NIL;
rp->ftrace = NIL;
}
- if (any_heap_ref_ptrs(rp->stop, rp->hend, mod_start, mod_size)) {
- goto need_gc;
- }
- if (any_heap_refs(rp->heap, rp->htop, mod_start, mod_size)) {
- goto need_gc;
+ if (any_heap_ref_ptrs(rp->stop, rp->hend, literals, lit_bsize))
+ goto try_literal_gc;
+#ifdef HIPE
+ if (nstack_any_heap_ref_ptrs(rp, literals, lit_bsize))
+ goto try_literal_gc;
+#endif
+ if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize))
+ goto try_literal_gc;
+ if (rp->abandoned_heap) {
+ if (any_heap_refs(rp->abandoned_heap, rp->abandoned_heap + rp->heap_sz,
+ literals, lit_bsize))
+ goto try_literal_gc;
}
+ if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize))
+ goto try_literal_gc;
- if (any_heap_refs(rp->old_heap, rp->old_htop, mod_start, mod_size)) {
- goto need_gc;
+ /* Check dictionary */
+ if (rp->dictionary) {
+ Eterm* start = ERTS_PD_START(rp->dictionary);
+ Eterm* end = start + ERTS_PD_SIZE(rp->dictionary);
+
+ if (any_heap_ref_ptrs(start, end, literals, lit_bsize))
+ goto try_literal_gc;
}
- if (rp->dictionary != NULL) {
- Eterm* start = rp->dictionary->data;
- Eterm* end = start + rp->dictionary->used;
+ /* Check heap fragments */
+ for (hfrag = rp->mbuf; hfrag; hfrag = hfrag->next) {
+ Eterm *hp, *hp_end;
+ /* Off heap lists should already have been moved into process */
+ ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size));
- if (any_heap_ref_ptrs(start, end, mod_start, mod_size)) {
- goto need_gc;
- }
+ hp = &hfrag->mem[0];
+ hp_end = &hfrag->mem[hfrag->used_size];
+ if (any_heap_refs(hp, hp_end, literals, lit_bsize))
+ goto try_literal_gc;
}
- for (mp = rp->msg.first; mp != NULL; mp = mp->next) {
- if (any_heap_ref_ptrs(mp->m, mp->m+2, mod_start, mod_size)) {
- goto need_gc;
+ /*
+ * Message buffer fragments (matched messages)
+ * - off heap lists should already have been moved into
+ * process off heap structure.
+ * - Check for literals
+ */
+ for (msgp = rp->msg_frag; msgp; msgp = msgp->next) {
+ hfrag = erts_message_to_heap_frag(msgp);
+ for (; hfrag; hfrag = hfrag->next) {
+ Eterm *hp, *hp_end;
+ ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size));
+
+ hp = &hfrag->mem[0];
+ hp_end = &hfrag->mem[hfrag->used_size];
+
+ if (any_heap_refs(hp, hp_end, literals, lit_bsize))
+ goto try_literal_gc;
}
}
- break;
- need_gc:
- if (done_gc) {
+ return am_false;
+
+ try_literal_gc:
+ need_gc |= ERTS_LITERAL_GC__;
+
+ try_gc:
+ need_gc |= ERTS_ORDINARY_GC__;
+
+ if ((done_gc & need_gc) == need_gc)
return am_true;
- } else {
- Eterm* literals;
- Uint lit_size;
- struct erl_off_heap_header* oh;
- if (!allow_gc)
- return am_aborted;
+ if (!(flags & ERTS_CPC_ALLOW_GC))
+ return am_aborted;
- /*
- * Try to get rid of constants by by garbage collecting.
- * Clear both fvalue and ftrace.
- */
- rp->freason = EXC_NULL;
- rp->fvalue = NIL;
- rp->ftrace = NIL;
- done_gc = 1;
+ need_gc &= ~done_gc;
+
+ /*
+ * Try to get rid of literals by by garbage collecting.
+ * Clear both fvalue and ftrace.
+ */
+
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+
+ if (need_gc & ERTS_ORDINARY_GC__) {
FLAGS(rp) |= F_NEED_FULLSWEEP;
- *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
- literals = (Eterm *) modp->old.code[MI_LITERALS_START];
- lit_size = (Eterm *) modp->old.code[MI_LITERALS_END] - literals;
- oh = (struct erl_off_heap_header *)
- modp->old.code[MI_LITERALS_OFF_HEAP];
- *redsp += lit_size / 10; /* Need, better value... */
- erts_garbage_collect_literals(rp, literals, lit_size, oh);
+ gc_cost = erts_garbage_collect_nobump(rp, 0, rp->arg_reg, rp->arity, fcalls);
+ ASSERT(!hibernated || (need_gc & ERTS_LITERAL_GC__));
+ *redsp += gc_cost;
+ done_gc |= ERTS_ORDINARY_GC__;
+ }
+ if (need_gc & ERTS_LITERAL_GC__) {
+ struct erl_off_heap_header* oh;
+ oh = modp->old.code_hdr->literal_area->off_heap;
+ *redsp += lit_bsize / 64; /* Need, better value... */
+ erts_garbage_collect_literals(rp, (Eterm*)literals, lit_bsize, oh);
+ done_gc |= ERTS_LITERAL_GC__;
+ if (hibernated) {
+ erts_garbage_collect_hibernate(rp);
+ *redsp += gc_cost;
+ }
}
+ need_gc = 0;
}
- return am_false;
-#undef INSIDE
+
+#undef ERTS_ORDINARY_GC__
+#undef ERTS_LITERAL_GC__
+
}
-#define in_area(ptr,start,nbytes) \
- ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
+#endif /* !ERTS_NEW_PURGE_STRATEGY */
static int
any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
@@ -913,7 +1432,7 @@ any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
switch (primary_tag(val)) {
case TAG_PRIMARY_BOXED:
case TAG_PRIMARY_LIST:
- if (in_area(EXPAND_POINTER(val), mod_start, mod_size)) {
+ if (ErtsInArea(val, mod_start, mod_size)) {
return 1;
}
break;
@@ -933,7 +1452,7 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
switch (primary_tag(val)) {
case TAG_PRIMARY_BOXED:
case TAG_PRIMARY_LIST:
- if (in_area(EXPAND_POINTER(val), mod_start, mod_size)) {
+ if (ErtsInArea(val, mod_start, mod_size)) {
return 1;
}
break;
@@ -943,7 +1462,7 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
if (header_is_bin_matchstate(val)) {
ErlBinMatchState *ms = (ErlBinMatchState*) p;
ErlBinMatchBuffer *mb = &(ms->mb);
- if (in_area(EXPAND_POINTER(mb->orig), mod_start, mod_size)) {
+ if (ErtsInArea(mb->orig, mod_start, mod_size)) {
return 1;
}
}
@@ -956,100 +1475,576 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
return 0;
}
-#undef in_area
+static Uint
+hfrag_literal_size(Eterm* start, Eterm* end, char* lit_start, Uint lit_size)
+{
+ Eterm* p;
+ Eterm val;
+ Uint sz = 0;
-BIF_RETTYPE purge_module_1(BIF_ALIST_1)
+ for (p = start; p < end; p++) {
+ val = *p;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ if (ErtsInArea(val, lit_start, lit_size)) {
+ sz += size_object(val);
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (!header_is_transparent(val)) {
+ Eterm* new_p;
+ if (header_is_bin_matchstate(val)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) p;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ if (ErtsInArea(mb->orig, lit_start, lit_size)) {
+ sz += size_object(mb->orig);
+ }
+ }
+ new_p = p + thing_arityval(val);
+ ASSERT(start <= new_p && new_p < end);
+ p = new_p;
+ }
+ }
+ }
+ return sz;
+}
+
+static void
+hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
+ Eterm *start, Eterm *end,
+ char *lit_start, Uint lit_size) {
+ Eterm* p;
+ Eterm val;
+ Uint sz;
+
+ for (p = start; p < end; p++) {
+ val = *p;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ if (ErtsInArea(val, lit_start, lit_size)) {
+ sz = size_object(val);
+ val = copy_struct(val, sz, hpp, ohp);
+ *p = val;
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (!header_is_transparent(val)) {
+ Eterm* new_p;
+ /* matchstate in message, not possible. */
+ if (header_is_bin_matchstate(val)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) p;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ if (ErtsInArea(mb->orig, lit_start, lit_size)) {
+ sz = size_object(mb->orig);
+ mb->orig = copy_struct(mb->orig, sz, hpp, ohp);
+ }
+ }
+ new_p = p + thing_arityval(val);
+ ASSERT(start <= new_p && new_p < end);
+ p = new_p;
+ }
+ }
+ }
+}
+
+#ifdef ERTS_NEW_PURGE_STRATEGY
+
+#ifdef ERTS_SMP
+
+ErtsThrPrgrLaterOp later_literal_area_switch;
+
+typedef struct {
+ ErtsThrPrgrLaterOp lop;
+ ErtsLiteralArea *la;
+} ErtsLaterReleasLiteralArea;
+
+static void
+later_release_literal_area(void *vlrlap)
{
- ErtsCodeIndex code_ix;
- BeamInstr* code;
- BeamInstr* end;
- Module* modp;
- int is_blocking = 0;
- Eterm ret;
+ ErtsLaterReleasLiteralArea *lrlap;
+ lrlap = (ErtsLaterReleasLiteralArea *) vlrlap;
+ erts_release_literal_area(lrlap->la);
+ erts_free(ERTS_ALC_T_RELEASE_LAREA, vlrlap);
+}
- if (is_not_atom(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+static void
+complete_literal_area_switch(void *literal_area)
+{
+ Process *p = erts_literal_area_collector;
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (literal_area)
+ erts_release_literal_area((ErtsLiteralArea *) literal_area);
+}
+#endif
+
+#endif /* ERTS_NEW_PURGE_STRATEGY */
+
+BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
+{
+#ifndef ERTS_NEW_PURGE_STRATEGY
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+#else
+ ErtsLiteralArea *unused_la;
+ ErtsLiteralAreaRef *la_ref;
+
+ if (BIF_P != erts_literal_area_collector)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ erts_smp_mtx_lock(&release_literal_areas.mtx);
+
+ la_ref = release_literal_areas.first;
+ if (la_ref) {
+ release_literal_areas.first = la_ref->next;
+ if (!release_literal_areas.first)
+ release_literal_areas.last = NULL;
}
- if (!erts_try_seize_code_write_permission(BIF_P)) {
- ERTS_BIF_YIELD1(bif_export[BIF_purge_module_1], BIF_P, BIF_ARG_1);
+ erts_smp_mtx_unlock(&release_literal_areas.mtx);
+
+ unused_la = ERTS_COPY_LITERAL_AREA();
+
+ if (!la_ref) {
+ ERTS_SET_COPY_LITERAL_AREA(NULL);
+ if (unused_la) {
+#ifdef ERTS_SMP
+ ErtsLaterReleasLiteralArea *lrlap;
+ lrlap = erts_alloc(ERTS_ALC_T_RELEASE_LAREA,
+ sizeof(ErtsLaterReleasLiteralArea));
+ lrlap->la = unused_la;
+ erts_schedule_thr_prgr_later_cleanup_op(
+ later_release_literal_area,
+ (void *) lrlap,
+ &lrlap->lop,
+ (sizeof(ErtsLaterReleasLiteralArea)
+ + sizeof(ErtsLiteralArea)
+ + ((unused_la->end
+ - &unused_la->start[0])
+ - 1)*(sizeof(Eterm))));
+#else
+ erts_release_literal_area(unused_la);
+#endif
+ }
+ BIF_RET(am_false);
}
- code_ix = erts_active_code_ix();
+ ERTS_SET_COPY_LITERAL_AREA(la_ref->literal_area);
+
+ erts_free(ERTS_ALC_T_LITERAL_REF, la_ref);
+
+#ifdef ERTS_SMP
+ erts_schedule_thr_prgr_later_op(complete_literal_area_switch,
+ unused_la,
+ &later_literal_area_switch);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+#else
+ erts_release_literal_area(unused_la);
+ BIF_RET(am_true);
+#endif
+
+#endif /* ERTS_NEW_PURGE_STRATEGY */
+}
+
+void
+erts_purge_state_add_fun(ErlFunEntry *fe)
+{
+ ASSERT(is_value(purge_state.module));
+ if (purge_state.fe_ix >= purge_state.fe_size) {
+ ErlFunEntry **funs;
+ purge_state.fe_size += 100;
+ funs = erts_alloc(ERTS_ALC_T_PURGE_DATA,
+ sizeof(ErlFunEntry *)*purge_state.fe_size);
+ sys_memcpy((void *) funs,
+ (void *) purge_state.funs,
+ purge_state.fe_ix*sizeof(ErlFunEntry *));
+ if (purge_state.funs != &purge_state.def_funs[0])
+ erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.funs);
+ purge_state.funs = funs;
+ }
+ purge_state.funs[purge_state.fe_ix++] = fe;
+}
+
+Export *
+erts_suspend_process_on_pending_purge_lambda(Process *c_p, ErlFunEntry* fe)
+{
+ erts_smp_mtx_lock(&purge_state.mtx);
+ if (purge_state.module == fe->module) {
+ /*
+ * The process c_p is about to call a fun in the code
+ * that we are trying to purge. Suspend it and call
+ * erts_code_purger:pending_purge_lambda/3. The process
+ * will be resumed when the purge completes or aborts,
+ * and will then try to do the call again.
+ */
+ if (purge_state.sp_ix >= purge_state.sp_size) {
+ Eterm *sprocs;
+ purge_state.sp_size += 100;
+ sprocs = erts_alloc(ERTS_ALC_T_PURGE_DATA,
+ (sizeof(ErlFunEntry *)
+ * purge_state.sp_size));
+ sys_memcpy((void *) sprocs,
+ (void *) purge_state.sprocs,
+ purge_state.sp_ix*sizeof(ErlFunEntry *));
+ if (purge_state.sprocs != &purge_state.def_sprocs[0])
+ erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs);
+ purge_state.sprocs = sprocs;
+ }
+ purge_state.sprocs[purge_state.sp_ix++] = c_p->common.id;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_VBUMP_ALL_REDS(c_p);
+ }
+ erts_smp_mtx_unlock(&purge_state.mtx);
+ return purge_state.pending_purge_lambda;
+}
+
+static void
+finalize_purge_operation(Process *c_p, int succeded)
+{
+ Uint ix;
+
+ if (c_p)
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ erts_smp_mtx_lock(&purge_state.mtx);
+
+ ASSERT(purge_state.module != THE_NON_VALUE);
+
+ purge_state.module = THE_NON_VALUE;
/*
- * Correct module?
+ * Resume all processes that have tried to call
+ * funs in this code.
*/
+ for (ix = 0; ix < purge_state.sp_ix; ix++) {
+ Process *rp = erts_pid2proc(NULL, 0,
+ purge_state.sprocs[ix],
+ ERTS_PROC_LOCK_STATUS);
+ if (rp) {
+ erts_resume(rp, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ }
+ }
- if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ erts_smp_mtx_unlock(&purge_state.mtx);
+
+ if (c_p)
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ if (purge_state.sprocs != &purge_state.def_sprocs[0]) {
+ erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs);
+ purge_state.sprocs = &purge_state.def_sprocs[0];
+ purge_state.sp_size = sizeof(purge_state.def_sprocs);
+ purge_state.sp_size /= sizeof(purge_state.def_sprocs[0]);
}
- else {
- erts_rwlock_old_code(code_ix);
+ purge_state.sp_ix = 0;
+ if (purge_state.funs != &purge_state.def_funs[0]) {
+ erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.funs);
+ purge_state.funs = &purge_state.def_funs[0];
+ purge_state.fe_size = sizeof(purge_state.def_funs);
+ purge_state.fe_size /= sizeof(purge_state.def_funs[0]);
+ }
+ purge_state.fe_ix = 0;
+}
+
+#ifdef ERTS_SMP
+
+static ErtsThrPrgrLaterOp purger_lop_data;
+
+static void
+resume_purger(void *unused)
+{
+ Process *p = erts_code_purger;
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+}
+
+static void
+finalize_purge_abort(void *unused)
+{
+ erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix);
+
+ finalize_purge_operation(NULL, 0);
+
+ resume_purger(NULL);
+}
+
+#endif /* ERTS_SMP */
+
+BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
+{
+ if (BIF_P != erts_code_purger)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (is_not_atom(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ switch (BIF_ARG_2) {
+
+ case am_prepare:
+ case am_prepare_on_load: {
/*
- * Any code to purge?
+ * Prepare for purge by marking all fun
+ * entries referring to the code to purge
+ * with "pending purge" markers.
*/
- if (modp->old.code == 0) {
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
- }
+ ErtsCodeIndex code_ix;
+ Module* modp;
+ Eterm res;
+
+ if (is_value(purge_state.module))
+ BIF_ERROR(BIF_P, BADARG);
+
+ code_ix = erts_active_code_ix();
+
+ /*
+ * Correct module?
+ */
+ modp = erts_get_module(BIF_ARG_1, code_ix);
+ if (!modp)
+ res = am_false;
else {
/*
- * Unload any NIF library
+ * Any code to purge?
*/
- if (modp->old.nif != NULL) {
- /* ToDo: Do unload nif without blocking */
- erts_rwunlock_old_code(code_ix);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- is_blocking = 1;
- erts_rwlock_old_code(code_ix);
- erts_unload_nif(modp->old.nif);
- modp->old.nif = NULL;
+
+ if (BIF_ARG_2 == am_prepare_on_load) {
+ erts_rwlock_old_code(code_ix);
+ } else {
+ erts_rlock_old_code(code_ix);
+ }
+
+ if (BIF_ARG_2 == am_prepare_on_load) {
+ ASSERT(modp->on_load);
+ ASSERT(modp->on_load->code_hdr);
+ purge_state.saved_old = modp->old;
+ modp->old = *modp->on_load;
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) modp->on_load);
+ modp->on_load = 0;
+ }
+
+ if (!modp->old.code_hdr)
+ res = am_false;
+ else {
+ BeamInstr* code;
+ BeamInstr* end;
+ erts_smp_mtx_lock(&purge_state.mtx);
+ purge_state.module = BIF_ARG_1;
+ erts_smp_mtx_unlock(&purge_state.mtx);
+ res = am_true;
+ code = (BeamInstr*) modp->old.code_hdr;
+ end = (BeamInstr *)((char *)code + modp->old.code_length);
+ erts_fun_purge_prepare(code, end);
+#if !defined(ERTS_NEW_PURGE_STRATEGY)
+ ASSERT(!ERTS_COPY_LITERAL_AREA());
+ ERTS_SET_COPY_LITERAL_AREA(modp->old.code_hdr->literal_area);
+#endif
}
+ if (BIF_ARG_2 == am_prepare_on_load) {
+ erts_rwunlock_old_code(code_ix);
+ } else {
+ erts_runlock_old_code(code_ix);
+ }
+ }
+
+#ifndef ERTS_SMP
+ BIF_RET(res);
+#else
+ if (res != am_true)
+ BIF_RET(res);
+ else {
/*
- * Remove the old code.
+ * We'll be resumed when all schedulers are guaranteed
+ * to see the "pending purge" markers that we've made on
+ * all fun entries of the code that we are about to purge.
+ * Processes trying to call these funs will be suspended
+ * before calling the funs. That is we are guaranteed not
+ * to get any more direct references into the code while
+ * checking for such references...
*/
- ASSERT(erts_total_code_size >= modp->old.code_length);
- erts_total_code_size -= modp->old.code_length;
- code = modp->old.code;
- end = (BeamInstr *)((char *)code + modp->old.code_length);
- erts_cleanup_funs_on_purge(code, end);
- beam_catches_delmod(modp->old.catches, code, modp->old.code_length,
- code_ix);
- decrement_refc(code);
- erts_free(ERTS_ALC_T_CODE, (void *) code);
- modp->old.code = NULL;
- modp->old.code_length = 0;
- modp->old.catches = BEAM_CATCHES_NIL;
- erts_remove_from_ranges(code);
- ERTS_BIF_PREP_RET(ret, am_true);
+ erts_schedule_thr_prgr_later_op(resume_purger,
+ NULL,
+ &purger_lop_data);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
}
- erts_rwunlock_old_code(code_ix);
+#endif
}
- if (is_blocking) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ case am_abort: {
+ /*
+ * Soft purge that detected direct references into the code
+ * we set out to purge. Abort the purge.
+ */
+
+ if (purge_state.module != BIF_ARG_1)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix);
+
+#if !defined(ERTS_NEW_PURGE_STRATEGY)
+ ASSERT(ERTS_COPY_LITERAL_AREA());
+ ERTS_SET_COPY_LITERAL_AREA(NULL);
+#endif
+#ifndef ERTS_SMP
+ erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix);
+ finalize_purge_operation(BIF_P, 0);
+ BIF_RET(am_false);
+#else
+ /*
+ * We need to restore the code addresses of the funs in
+ * two stages in order to ensure that we do not get any
+ * stale suspended processes due to the purge abort.
+ * Restore address pointer (erts_fun_purge_abort_prepare);
+ * wait for thread progress; clear pending purge address
+ * pointer (erts_fun_purge_abort_finalize), and then
+ * resume processes that got suspended
+ * (finalize_purge_operation).
+ */
+ erts_schedule_thr_prgr_later_op(finalize_purge_abort,
+ NULL,
+ &purger_lop_data);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_false);
+#endif
}
- erts_release_code_write_permission();
- return ret;
-}
-static void
-decrement_refc(BeamInstr* code)
-{
- struct erl_off_heap_header* oh =
- (struct erl_off_heap_header *) code[MI_LITERALS_OFF_HEAP];
-
- while (oh) {
- Binary* bptr;
- ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG);
- bptr = ((ProcBin*)oh)->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0) {
- erts_bin_free(bptr);
+ case am_complete: {
+ ErtsCodeIndex code_ix;
+ BeamInstr* code;
+ Module* modp;
+ int is_blocking = 0;
+ Eterm ret;
+ ErtsLiteralArea *literals = NULL;
+
+
+ /*
+ * We have no direct references into the code.
+ * Complete to purge.
+ */
+
+ if (purge_state.module != BIF_ARG_1)
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_purge_module_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ code_ix = erts_active_code_ix();
+
+ /*
+ * Correct module?
+ */
+
+ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
+ ERTS_BIF_PREP_RET(ret, am_false);
+ }
+ else {
+
+ erts_rwlock_old_code(code_ix);
+
+ /*
+ * Any code to purge?
+ */
+ if (!modp->old.code_hdr) {
+ ERTS_BIF_PREP_RET(ret, am_false);
+ }
+ else {
+ /*
+ * Unload any NIF library
+ */
+ if (modp->old.nif != NULL) {
+ /* ToDo: Do unload nif without blocking */
+ erts_rwunlock_old_code(code_ix);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ is_blocking = 1;
+ erts_rwlock_old_code(code_ix);
+ erts_unload_nif(modp->old.nif);
+ modp->old.nif = NULL;
+ }
+
+ /*
+ * Remove the old code.
+ */
+ ASSERT(erts_total_code_size >= modp->old.code_length);
+ erts_total_code_size -= modp->old.code_length;
+ code = (BeamInstr*) modp->old.code_hdr;
+ erts_fun_purge_complete(purge_state.funs, purge_state.fe_ix);
+ beam_catches_delmod(modp->old.catches, code, modp->old.code_length,
+ code_ix);
+ literals = modp->old.code_hdr->literal_area;
+ modp->old.code_hdr->literal_area = NULL;
+ erts_free(ERTS_ALC_T_CODE, (void *) code);
+ modp->old.code_hdr = NULL;
+ modp->old.code_length = 0;
+ modp->old.catches = BEAM_CATCHES_NIL;
+ erts_remove_from_ranges(code);
+
+ ERTS_BIF_PREP_RET(ret, am_true);
+ }
+
+ if (purge_state.saved_old.code_hdr) {
+ modp->old = purge_state.saved_old;
+ purge_state.saved_old.code_hdr = 0;
+ }
+ erts_rwunlock_old_code(code_ix);
+ }
+ if (is_blocking) {
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ }
+
+ erts_release_code_write_permission();
+
+ finalize_purge_operation(BIF_P, ret == am_true);
+
+#if !defined(ERTS_NEW_PURGE_STRATEGY)
+
+ ASSERT(ERTS_COPY_LITERAL_AREA() == literals);
+ ERTS_SET_COPY_LITERAL_AREA(NULL);
+ erts_release_literal_area(literals);
+
+#else /* ERTS_NEW_PURGE_STRATEGY */
+
+ if (literals) {
+ ErtsLiteralAreaRef *ref;
+ ref = erts_alloc(ERTS_ALC_T_LITERAL_REF,
+ sizeof(ErtsLiteralAreaRef));
+ ref->literal_area = literals;
+ ref->next = NULL;
+ erts_smp_mtx_lock(&release_literal_areas.mtx);
+ if (release_literal_areas.last) {
+ release_literal_areas.last->next = ref;
+ release_literal_areas.last = ref;
+ }
+ else {
+ release_literal_areas.first = ref;
+ release_literal_areas.last = ref;
+ }
+ erts_smp_mtx_unlock(&release_literal_areas.mtx);
+ erts_queue_message(erts_literal_area_collector,
+ 0,
+ erts_alloc_message(0, NULL),
+ am_copy_literals,
+ BIF_P->common.id);
}
- oh = oh->next;
+
+#endif /* ERTS_NEW_PURGE_STRATEGY */
+
+ return ret;
+ }
+
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+
}
}
@@ -1062,9 +2057,9 @@ delete_code(Module* modp)
{
ErtsCodeIndex code_ix = erts_staging_code_ix();
Eterm module = make_atom(modp->module);
- int i;
+ int i, num_exps = export_list_size(code_ix);
- for (i = 0; i < export_list_size(code_ix); i++) {
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
if (ep != NULL && (ep->code[0] == module)) {
if (ep->addressv[code_ix] == ep->code+3) {
@@ -1089,10 +2084,11 @@ delete_code(Module* modp)
ASSERT(modp->curr.num_breakpoints == 0);
ASSERT(modp->curr.num_traced_exports == 0);
modp->old = modp->curr;
- modp->curr.code = NULL;
+ modp->curr.code_hdr = NULL;
modp->curr.code_length = 0;
modp->curr.catches = BEAM_CATCHES_NIL;
modp->curr.nif = NULL;
+
}
@@ -1106,9 +2102,9 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
* if not, delete old code; error if old code already exists.
*/
- if (modp->curr.code != NULL && modp->old.code != NULL) {
+ if (modp->curr.code_hdr && modp->old.code_hdr) {
return am_not_purged;
- } else if (modp->old.code == NULL) { /* Make the current version old. */
+ } else if (!modp->old.code_hdr) { /* Make the current version old. */
delete_code(modp);
}
return NIL;