aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/beam_bif_load.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/beam_bif_load.c')
-rw-r--r--erts/emulator/beam/beam_bif_load.c295
1 files changed, 208 insertions, 87 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 42642e103d..98e5ad1e3f 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -44,93 +44,6 @@ static int is_native(BeamInstr* code);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
-Eterm
-load_module_2(BIF_ALIST_2)
-{
- Eterm reason;
- Eterm* hp;
- int sz;
- byte* code;
- Eterm res;
- byte* temp_alloc = NULL;
- Binary* magic;
- int is_blocking = 0;
-
- if (is_not_atom(BIF_ARG_1)) {
- error:
- erts_free_aligned_binary_bytes(temp_alloc);
- BIF_ERROR(BIF_P, BADARG);
- }
- if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) {
- goto error;
- }
- hp = HAlloc(BIF_P, 3);
-
- /*
- * Read the BEAM file and prepare the module for loading.
- */
- magic = erts_alloc_loader_state();
- sz = binary_size(BIF_ARG_2);
- reason = erts_prepare_loading(magic, BIF_P, BIF_P->group_leader,
- &BIF_ARG_1, code, sz);
- erts_free_aligned_binary_bytes(temp_alloc);
- if (reason != NIL) {
- res = TUPLE2(hp, am_error, reason);
- BIF_RET(res);
- }
-
- erts_lock_code_ix();
- for(;;) {
- Module* modp;
- erts_start_staging_code_ix();
- modp = erts_put_module(BIF_ARG_1);
- if (!is_blocking) {
- if (modp->curr.num_breakpoints > 0
- || modp->curr.num_traced_exports > 0
- || erts_is_default_trace_enabled()) {
- /* we have tracing, retry single threaded */
- erts_abort_staging_code_ix();
- erts_unlock_code_ix();
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- is_blocking = 1;
- continue;
- }
- }
- else if (modp->curr.num_breakpoints) {
- erts_clear_module_break(modp);
- ASSERT(modp->curr.num_breakpoints == 0);
- }
- reason = erts_finish_loading(magic, BIF_P, 0, &BIF_ARG_1);
- break;
- }
-
- if (reason != NIL) {
- if (reason == am_on_load) {
- erts_commit_staging_code_ix();
- }
- else {
- erts_abort_staging_code_ix();
- }
- res = TUPLE2(hp, am_error, reason);
- } else {
- erts_commit_staging_code_ix();
- if (is_blocking) {
- set_default_trace_pattern(BIF_ARG_1);
- }
- res = TUPLE2(hp, am_module, BIF_ARG_1);
- }
-
- if (is_blocking) {
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- }
- else {
- erts_unlock_code_ix();
- }
- BIF_RET(res);
-}
-
BIF_RETTYPE purge_module_1(BIF_ALIST_1)
{
int purge_res;
@@ -200,6 +113,214 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
}
BIF_RETTYPE
+prepare_loading_2(BIF_ALIST_2)
+{
+ byte* temp_alloc = NULL;
+ byte* code;
+ Uint sz;
+ Binary* magic;
+ Eterm reason;
+ Eterm* hp;
+ Eterm res;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ error:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) {
+ goto error;
+ }
+
+ magic = erts_alloc_loader_state();
+ sz = binary_size(BIF_ARG_2);
+ reason = erts_prepare_loading(magic, BIF_P, BIF_P->group_leader,
+ &BIF_ARG_1, code, sz);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ if (reason != NIL) {
+ hp = HAlloc(BIF_P, 3);
+ res = TUPLE2(hp, am_error, reason);
+ BIF_RET(res);
+ }
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ res = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), magic);
+ BIF_RET(res);
+}
+
+struct m {
+ Binary* code;
+ Eterm module;
+ Module* modp;
+ Uint exception;
+};
+
+static Eterm
+exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
+{
+ Eterm* hp = HAlloc(p, 3 + 2*exceptions);
+ Eterm res = NIL;
+
+ mp += exceptions - 1;
+ while (exceptions > 0) {
+ if (mp->exception) {
+ res = CONS(hp, mp->module, res);
+ hp += 2;
+ exceptions--;
+ }
+ mp--;
+ }
+ return TUPLE2(hp, tag, res);
+}
+
+
+BIF_RETTYPE
+finish_loading_1(BIF_ALIST_1)
+{
+ int i;
+ int n;
+ struct m* p;
+ Uint exceptions;
+ Eterm res;
+ int is_blocking = 0;
+
+ /*
+ * Validate the argument before we start loading; it must be a
+ * proper list where each element is a magic binary containing
+ * prepared (not previously loaded) code.
+ *
+ * First count the number of elements and allocate an array
+ * to keep the elements in.
+ */
+
+ n = list_length(BIF_ARG_1);
+ if (n == -1) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m));
+
+ /*
+ * We now know that the argument is a proper list. Validate
+ * and collect the binaries into the array.
+ */
+
+ for (i = 0; i < n; i++) {
+ Eterm* cons = list_val(BIF_ARG_1);
+ Eterm term = CAR(cons);
+ ProcBin* pb;
+
+ if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
+ error:
+ erts_free(ERTS_ALC_T_LOADER_TMP, p);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ pb = (ProcBin*) binary_val(term);
+ p[i].code = pb->val;
+ p[i].module = erts_module_for_prepared_code(p[i].code);
+ if (p[i].module == NIL) {
+ goto error;
+ }
+ BIF_ARG_1 = CDR(cons);
+ }
+
+ /*
+ * Since we cannot handle atomic loading of a group of modules
+ * if one or more of them uses on_load, we will only allow one
+ * element in the list. This limitation is intended to be
+ * lifted in the future.
+ */
+
+ if (n > 1) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, p);
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
+
+ /*
+ * All types are correct. There cannot be a BADARG from now on.
+ * Before we can start loading, we must check whether any of
+ * the modules already has old code. To avoid a race, we must
+ * not allow other process to initiate a code loading operation
+ * from now on.
+ */
+
+ res = am_ok;
+ erts_lock_code_ix();
+ erts_start_staging_code_ix();
+
+ for (i = 0; i < n; i++) {
+ p[i].modp = erts_put_module(p[i].module);
+ if (p[i].modp->curr.num_breakpoints > 0 ||
+ p[i].modp->curr.num_traced_exports > 0 ||
+ erts_is_default_trace_enabled()) {
+ erts_abort_staging_code_ix();
+ erts_unlock_code_ix();
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ is_blocking = 1;
+ break;
+ }
+ }
+
+ if (is_blocking) {
+ for (i = 0; i < n; i++) {
+ if (p[i].modp->curr.num_breakpoints) {
+ erts_clear_module_break(p[i].modp);
+ ASSERT(p[i].modp->curr.num_breakpoints == 0);
+ }
+ }
+ }
+
+ exceptions = 0;
+ for (i = 0; i < n; i++) {
+ p[i].exception = 0;
+ if (p[i].modp->curr.code && p[i].modp->old.code) {
+ p[i].exception = 1;
+ exceptions++;
+ }
+ }
+
+ if (exceptions) {
+ res = exception_list(BIF_P, am_not_purged, p, exceptions);
+ erts_abort_staging_code_ix();
+ } else {
+ /*
+ * Now we can load all code. This can't fail.
+ */
+
+ exceptions = 0;
+ for (i = 0; i < n; i++) {
+ Eterm mod;
+ Eterm retval;
+
+ erts_refc_inc(&p[i].code->refc, 1);
+ retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod);
+ ASSERT(retval == NIL || retval == am_on_load);
+ if (retval == am_on_load) {
+ p[i].exception = 1;
+ exceptions++;
+ }
+ }
+
+ /*
+ * Check whether any module has an on_load_handler.
+ */
+
+ if (exceptions) {
+ res = exception_list(BIF_P, am_on_load, p, exceptions);
+ }
+ erts_commit_staging_code_ix();
+ }
+
+ erts_free(ERTS_ALC_T_LOADER_TMP, p);
+ if (!is_blocking) {
+ erts_unlock_code_ix();
+ } else {
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ }
+ BIF_RET(res);
+}
+
+BIF_RETTYPE
check_old_code_1(BIF_ALIST_1)
{
ErtsCodeIndex code_ix;