aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/beam_bp.c12
-rw-r--r--erts/emulator/beam/beam_bp.h17
-rw-r--r--erts/emulator/beam/beam_emu.c40
-rw-r--r--erts/emulator/beam/beam_load.c262
-rw-r--r--erts/emulator/beam/bif.c16
-rw-r--r--erts/emulator/beam/erl_bif_info.c5
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c23
-rw-r--r--erts/emulator/beam/erl_init.c9
-rw-r--r--erts/emulator/beam/erl_lock_check.c3
-rw-r--r--erts/emulator/beam/erl_nif.c2
-rw-r--r--erts/emulator/beam/erl_port_task.c2
-rw-r--r--erts/emulator/beam/erl_process.c283
-rw-r--r--erts/emulator/beam/erl_process.h29
-rw-r--r--erts/emulator/beam/erl_process_lock.c6
-rw-r--r--erts/emulator/beam/erl_process_lock.h10
-rw-r--r--erts/emulator/beam/global.h2
-rw-r--r--erts/emulator/beam/utils.c3
17 files changed, 306 insertions, 418 deletions
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index dd31376a2d..692fa61fe8 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -495,16 +495,6 @@ erts_find_local_func(Eterm mfa[3]) {
return NULL;
}
-/* bp_hash */
-ERTS_INLINE Uint bp_sched2ix() {
-#ifdef ERTS_SMP
- ErtsSchedulerData *esdp;
- esdp = erts_get_scheduler_data();
- return esdp->no - 1;
-#else
- return 0;
-#endif
-}
static void bp_hash_init(bp_time_hash_t *hash, Uint n) {
Uint size = sizeof(bp_data_time_item_t)*n;
Uint i;
@@ -1347,7 +1337,7 @@ static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
return NULL;
}
- bd = ebd = rs[bp_sched2ix()];
+ bd = ebd = rs[erts_bp_sched2ix()];
ASSERT(bd);
if ( (break_op == 0) || (bd->this_instr == break_op)) {
return bd;
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 2ec5818688..167069552f 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -144,8 +144,6 @@ extern erts_smp_spinlock_t erts_bp_lock;
#define ErtsSmpBPUnlock(BDC)
#endif
-ERTS_INLINE Uint bp_sched2ix(void);
-
#ifdef ERTS_SMP
#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
#else
@@ -247,4 +245,19 @@ BpData *erts_get_time_break(Process *p, BeamInstr *pc);
BeamInstr *erts_find_local_func(Eterm mfa[3]);
+ERTS_GLB_INLINE Uint erts_bp_sched2ix(void);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Uint erts_bp_sched2ix(void)
+{
+#ifdef ERTS_SMP
+ ErtsSchedulerData *esdp;
+ esdp = erts_get_scheduler_data();
+ return esdp->no - 1;
+#else
+ return 0;
+#endif
+}
+#endif
+
#endif /* _BEAM_BP_H */
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 68e6383f7f..c65b2be106 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -45,7 +45,7 @@
/* #define HARDDEBUG 1 */
#if defined(NO_JUMP_TABLE)
-# define OpCase(OpCode) case op_##OpCode: lb_##OpCode
+# define OpCase(OpCode) case op_##OpCode
# define CountCase(OpCode) case op_count_##OpCode
# define OpCode(OpCode) ((Uint*)op_##OpCode)
# define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
@@ -53,7 +53,7 @@
#else
# define OpCase(OpCode) lb_##OpCode
# define CountCase(OpCode) lb_count_##OpCode
-# define Goto(Rel) goto *(Rel)
+# define Goto(Rel) goto *((void *)Rel)
# define LabelAddr(Label) &&Label
# define OpCode(OpCode) (&&lb_##OpCode)
#endif
@@ -199,7 +199,7 @@ do { \
} \
} while (0)
-#define ClauseFail() goto lb_jump_f
+#define ClauseFail() goto jump_f
#define SAVE_CP(X) \
do { \
@@ -234,6 +234,12 @@ BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
+
+/*
+ * We should warn only once for tuple funs.
+ */
+static erts_smp_atomic_t warned_for_tuple_funs;
+
/*
* All Beam instructions in numerical order.
*/
@@ -1015,6 +1021,7 @@ init_emulator(void)
#if defined(VXWORKS)
init_done = 0;
#endif
+ erts_smp_atomic_init_nob(&warned_for_tuple_funs, (erts_aint_t) 0);
process_main();
}
@@ -2540,6 +2547,7 @@ void process_main(void)
lb_Cl_error: {
if (Arg(0) != 0) {
OpCase(jump_f): {
+ jump_f:
SET_I((BeamInstr *) Arg(0));
Goto(*I);
}
@@ -3113,7 +3121,7 @@ void process_main(void)
/* Fall through */
OpCase(error_action_code): {
- no_error_handler:
+ handle_error:
reg[0] = r(0);
SWAPOUT;
I = handle_error(c_p, NULL, reg, NULL);
@@ -3274,7 +3282,7 @@ void process_main(void)
OpCase(i_func_info_IaaI): {
c_p->freason = EXC_FUNCTION_CLAUSE;
c_p->current = I + 2;
- goto lb_error_action_code;
+ goto handle_error;
}
OpCase(try_case_end_s):
@@ -4960,7 +4968,7 @@ void process_main(void)
if (I) {
Goto(*I);
}
- goto no_error_handler;
+ goto handle_error;
}
@@ -6187,6 +6195,26 @@ call_fun(Process* p, /* Current process. */
if (!is_atom(module) || !is_atom(function)) {
goto badfun;
}
+
+ /*
+ * If this is the first time a tuple fun is used,
+ * send a warning to the logger.
+ */
+ if (erts_smp_atomic_xchg_nob(&warned_for_tuple_funs,
+ (erts_aint_t) 1) == 0) {
+ erts_dsprintf_buf_t* dsbufp;
+
+ dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Call to tuple fun {%T,%T}.\n\n"
+ "Tuple funs are deprecated and will be removed "
+ "in R16. Use \"fun M:F/A\" instead, for example "
+ "\"fun %T:%T/%d\".\n\n"
+ "(This warning will only be shown the first time "
+ "a tuple fun is called.)\n",
+ module, function, module, function, arity);
+ erts_send_warning_to_logger(p->group_leader, dsbufp);
+ }
+
if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
ep = erts_find_export_entry(erts_proc_get_error_handler(p),
am_undefined_function, 3);
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 4427defe0c..d8434c098e 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -254,6 +254,7 @@ typedef struct LoaderState {
char* file_name; /* Name of file we are reading (usually chunk name). */
byte* file_p; /* Current pointer within file. */
unsigned file_left; /* Number of bytes left in file. */
+ ErlDrvBinary* bin; /* Binary holding BEAM file (or NULL) */
/*
* The following are used mainly for diagnostics.
@@ -499,8 +500,10 @@ static void free_state(LoaderState* stp);
static Eterm insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
BeamInstr* code, Uint size);
+static int init_iff_file(LoaderState* stp, byte* code, Uint size);
static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
Uint num_types, Uint num_mandatory);
+static int verify_chunks(LoaderState* stp);
static int load_atom_table(LoaderState* stp);
static int load_import_table(LoaderState* stp);
static int read_export_table(LoaderState* stp);
@@ -630,40 +633,23 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
Eterm* modp, byte* code, Uint unloaded_size)
{
Eterm retval = am_badfile;
- ErlDrvBinary* bin = NULL;
stp->module = *modp;
stp->group_leader = group_leader;
- /*
- * Check if the module is compressed (or possibly invalid/corrupted).
- */
- if ( !(unloaded_size >= 4 &&
- code[0] == 'F' && code[1] == 'O' &&
- code[2] == 'R' && code[3] == '1') ) {
- bin = (ErlDrvBinary *)
- erts_gzinflate_buffer((char*)code, unloaded_size);
- if (bin == NULL) {
- goto load_error;
- }
- code = (byte*)bin->orig_bytes;
- unloaded_size = bin->orig_size;
- }
+#if defined(LOAD_MEMORY_HARD_DEBUG) && defined(DEBUG)
+ erts_fprintf(stderr,"Loading a module\n");
+#endif
/*
* Scan the IFF file.
*/
-#if defined(LOAD_MEMORY_HARD_DEBUG) && defined(DEBUG)
- erts_fprintf(stderr,"Loading a module\n");
-#endif
-
CHKALLOC();
CHKBLK(ERTS_ALC_T_CODE,stp->code);
- stp->file_name = "IFF header for Beam file";
- stp->file_p = code;
- stp->file_left = unloaded_size;
- if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
+ if (!init_iff_file(stp, code, unloaded_size) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !verify_chunks(stp)) {
goto load_error;
}
@@ -787,9 +773,6 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
retval = NIL;
load_error:
- if (bin) {
- driver_free_binary(bin);
- }
if (retval != NIL) {
free_state(stp);
}
@@ -864,6 +847,7 @@ erts_alloc_loader_state(void)
LoaderState* stp;
stp = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LoaderState));
+ stp->bin = NULL;
stp->function = THE_NON_VALUE; /* Function not known yet */
stp->arity = 0;
stp->specific_op = -1;
@@ -897,6 +881,9 @@ erts_alloc_loader_state(void)
static void
free_state(LoaderState* stp)
{
+ if (stp->bin != 0) {
+ driver_free_binary(stp->bin);
+ }
if (stp->code != 0) {
erts_free(ERTS_ALC_T_CODE, stp->code);
}
@@ -956,6 +943,7 @@ free_state(LoaderState* stp)
if (stp->fname != 0) {
erts_free(ERTS_ALC_T_LOADER_TMP, stp->fname);
}
+
erts_free(ERTS_ALC_T_LOADER_TMP, stp);
}
@@ -1011,23 +999,47 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
}
static int
-scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mandatory)
+init_iff_file(LoaderState* stp, byte* code, Uint size)
{
- MD5_CTX context;
+ Uint form_id = MakeIffId('F', 'O', 'R', '1');
Uint id;
Uint count;
- int i;
+
+ if (size < 4) {
+ goto load_error;
+ }
/*
- * The binary must start with an IFF 'FOR1' chunk.
+ * Check if the module is compressed (or possibly invalid/corrupted).
*/
+ if (MakeIffId(code[0], code[1], code[2], code[3]) != form_id) {
+ stp->bin = (ErlDrvBinary *) erts_gzinflate_buffer((char*)code, size);
+ if (stp->bin == NULL) {
+ goto load_error;
+ }
+ code = (byte*)stp->bin->orig_bytes;
+ size = stp->bin->orig_size;
+ if (size < 4) {
+ goto load_error;
+ }
+ }
- GetInt(stp, 4, id);
- if (id != MakeIffId('F', 'O', 'R', '1')) {
+ /*
+ * The binary must start with an IFF 'FOR1' chunk.
+ */
+ if (MakeIffId(code[0], code[1], code[2], code[3]) != form_id) {
LoadError0(stp, "not a BEAM file: no IFF 'FOR1' chunk");
}
/*
+ * Initialize our "virtual file system".
+ */
+
+ stp->file_name = "IFF header for Beam file";
+ stp->file_p = code + 4;
+ stp->file_left = size - 4;
+
+ /*
* Retrieve the chunk size and verify it. If the size is equal to
* or less than the size of the binary, it is ok and we will use it
* as the limit for the logical file size.
@@ -1048,6 +1060,21 @@ scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mand
if (id != MakeIffId('B', 'E', 'A', 'M')) {
LoadError0(stp, "not a BEAM file: IFF form type is not 'BEAM'");
}
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+/*
+ * Scan the IFF file. The header should have been verified by init_iff_file().
+ */
+static int
+scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mandatory)
+{
+ Uint count;
+ Uint id;
+ int i;
/*
* Initialize the chunks[] array in the state.
@@ -1104,17 +1131,25 @@ scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mand
stp->file_p += count;
stp->file_left -= count;
}
+ return 1;
- /*
- * At this point, we have read the entire IFF file, and we
- * know that it is syntactically correct.
- *
- * Now check that it contains all mandatory chunks. At the
- * same time calculate the MD5 for the module.
- */
+ load_error:
+ return 0;
+}
+
+/*
+ * Verify that all mandatory chunks are present and calculate
+ * MD5 for the module.
+ */
+
+static int
+verify_chunks(LoaderState* stp)
+{
+ int i;
+ MD5_CTX context;
MD5Init(&context);
- for (i = 0; i < num_mandatory; i++) {
+ for (i = 0; i < NUM_MANDATORY; i++) {
if (stp->chunks[i].start != NULL) {
MD5Update(&context, stp->chunks[i].start, stp->chunks[i].size);
} else {
@@ -1124,41 +1159,49 @@ scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mand
LoadError1(stp, "mandatory chunk of type '%s' not found\n", sbuf);
}
}
- if (LITERAL_CHUNK < num_types) {
- if (stp->chunks[LAMBDA_CHUNK].start != 0) {
- byte* start = stp->chunks[LAMBDA_CHUNK].start;
- Uint left = stp->chunks[LAMBDA_CHUNK].size;
- /*
- * The idea here is to ignore the OldUniq field for the fun; it is
- * based on the old broken hash function, which can be different
- * on little endian and big endian machines.
- */
- if (left >= 4) {
- static byte zero[4];
- MD5Update(&context, start, 4);
- start += 4;
- left -= 4;
+ /*
+ * If there is a lambda chunk, include parts of it in the MD5.
+ */
+ if (stp->chunks[LAMBDA_CHUNK].start != 0) {
+ byte* start = stp->chunks[LAMBDA_CHUNK].start;
+ Uint left = stp->chunks[LAMBDA_CHUNK].size;
+
+ /*
+ * The idea here is to ignore the OldUniq field for the fun; it is
+ * based on the old broken hash function, which can be different
+ * on little endian and big endian machines.
+ */
+ if (left >= 4) {
+ static byte zero[4];
+ MD5Update(&context, start, 4);
+ start += 4;
+ left -= 4;
- while (left >= 24) {
- /* Include: Function Arity Index NumFree */
- MD5Update(&context, start, 20);
- /* Set to zero: OldUniq */
- MD5Update(&context, zero, 4);
- start += 24;
- left -= 24;
- }
- }
- /* Can't happen for a correct 'FunT' chunk */
- if (left > 0) {
- MD5Update(&context, start, left);
+ while (left >= 24) {
+ /* Include: Function Arity Index NumFree */
+ MD5Update(&context, start, 20);
+ /* Set to zero: OldUniq */
+ MD5Update(&context, zero, 4);
+ start += 24;
+ left -= 24;
}
}
- if (stp->chunks[LITERAL_CHUNK].start != 0) {
- MD5Update(&context, stp->chunks[LITERAL_CHUNK].start,
- stp->chunks[LITERAL_CHUNK].size);
+ /* Can't happen for a correct 'FunT' chunk */
+ if (left > 0) {
+ MD5Update(&context, start, left);
}
}
+
+
+ /*
+ * If there is a literal chunk, include it in the MD5.
+ */
+ if (stp->chunks[LITERAL_CHUNK].start != 0) {
+ MD5Update(&context, stp->chunks[LITERAL_CHUNK].start,
+ stp->chunks[LITERAL_CHUNK].size);
+ }
+
MD5Final(stp->mod_md5, &context);
return 1;
@@ -5407,7 +5450,7 @@ code_get_chunk_2(BIF_ALIST_2)
Process* p = BIF_P;
Eterm Bin = BIF_ARG_1;
Eterm Chunk = BIF_ARG_2;
- LoaderState state;
+ LoaderState* stp;
Uint chunk = 0;
ErlSubBin* sb;
Uint offset;
@@ -5419,15 +5462,16 @@ code_get_chunk_2(BIF_ALIST_2)
Eterm real_bin;
byte* temp_alloc = NULL;
+ stp = erts_alloc_loader_state();
if ((start = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
error:
erts_free_aligned_binary_bytes(temp_alloc);
+ if (stp) {
+ free_state(stp);
+ }
BIF_ERROR(p, BADARG);
}
- state.module = THE_NON_VALUE; /* Suppress diagnostiscs */
- state.file_name = "IFF header for Beam file";
- state.file_p = start;
- state.file_left = binary_size(Bin);
+ stp->module = THE_NON_VALUE; /* Suppress diagnostics */
for (i = 0; i < 4; i++) {
Eterm* chunkp;
Eterm num;
@@ -5445,25 +5489,30 @@ code_get_chunk_2(BIF_ALIST_2)
if (is_not_nil(Chunk)) {
goto error;
}
- if (!scan_iff_file(&state, &chunk, 1, 1)) {
- erts_free_aligned_binary_bytes(temp_alloc);
- return am_undefined;
+ if (!init_iff_file(stp, start, binary_size(Bin)) ||
+ !scan_iff_file(stp, &chunk, 1, 1) ||
+ stp->chunks[0].start == NULL) {
+ res = am_undefined;
+ goto done;
}
ERTS_GET_REAL_BIN(Bin, real_bin, offset, bitoffs, bitsize);
if (bitoffs) {
- res = new_binary(p, state.chunks[0].start, state.chunks[0].size);
+ res = new_binary(p, stp->chunks[0].start, stp->chunks[0].size);
} else {
sb = (ErlSubBin *) HAlloc(p, ERL_SUB_BIN_SIZE);
sb->thing_word = HEADER_SUB_BIN;
sb->orig = real_bin;
- sb->size = state.chunks[0].size;
+ sb->size = stp->chunks[0].size;
sb->bitsize = 0;
sb->bitoffs = 0;
- sb->offs = offset + (state.chunks[0].start - start);
+ sb->offs = offset + (stp->chunks[0].start - start);
sb->is_writable = 0;
res = make_binary(sb);
}
+
+ done:
erts_free_aligned_binary_bytes(temp_alloc);
+ free_state(stp);
return res;
}
@@ -5476,21 +5525,29 @@ code_module_md5_1(BIF_ALIST_1)
{
Process* p = BIF_P;
Eterm Bin = BIF_ARG_1;
- LoaderState state;
+ LoaderState* stp;
+ byte* bytes;
byte* temp_alloc = NULL;
+ Eterm res;
- if ((state.file_p = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
+ stp = erts_alloc_loader_state();
+ if ((bytes = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
+ free_state(stp);
BIF_ERROR(p, BADARG);
}
- state.module = THE_NON_VALUE; /* Suppress diagnostiscs */
- state.file_name = "IFF header for Beam file";
- state.file_left = binary_size(Bin);
-
- if (!scan_iff_file(&state, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
- return am_undefined;
+ stp->module = THE_NON_VALUE; /* Suppress diagnostiscs */
+ if (!init_iff_file(stp, bytes, binary_size(Bin)) ||
+ !scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !verify_chunks(stp)) {
+ res = am_undefined;
+ goto done;
}
+ res = new_binary(p, stp->mod_md5, sizeof(stp->mod_md5));
+
+ done:
erts_free_aligned_binary_bytes(temp_alloc);
- return new_binary(p, state.mod_md5, sizeof(state.mod_md5));
+ free_state(stp);
+ return res;
}
#define WORDS_PER_FUNCTION 6
@@ -5776,7 +5833,6 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
int code_size;
int rval;
int i;
- ErlDrvBinary* bin = NULL;
byte* temp_alloc = NULL;
byte* bytes;
Uint size;
@@ -5809,29 +5865,17 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
size = binary_size(Beam);
/*
- * Uncompressed if needed.
- */
- if (!(size >= 4 && bytes[0] == 'F' && bytes[1] == 'O' &&
- bytes[2] == 'R' && bytes[3] == '1')) {
- bin = (ErlDrvBinary *) erts_gzinflate_buffer((char*)bytes, size);
- if (bin == NULL) {
- goto error;
- }
- bytes = (byte*)bin->orig_bytes;
- size = bin->orig_size;
- }
-
- /*
* Scan the Beam binary and read the interesting sections.
*/
- stp->file_name = "IFF header for Beam file";
- stp->file_p = bytes;
- stp->file_left = size;
stp->module = Mod;
stp->group_leader = p->group_leader;
stp->num_functions = n;
- if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
+ if (!init_iff_file(stp, bytes, size)) {
+ goto error;
+ }
+ if (!scan_iff_file(stp, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY) ||
+ !verify_chunks(stp)) {
goto error;
}
define_file(stp, "code chunk header", CODE_CHUNK);
@@ -5986,13 +6030,11 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
if (patch_funentries(Patchlist)) {
erts_free_aligned_binary_bytes(temp_alloc);
free_state(stp);
- if (bin != NULL) {
- driver_free_binary(bin);
- }
return Mod;
}
error:
+ erts_free_aligned_binary_bytes(temp_alloc);
free_state(stp);
BIF_ERROR(p, BADARG);
}
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 8ab363a1ec..26f1b4facb 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -870,8 +870,6 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
}
} else if (arg == am_scheduler && is_small(val)) {
Sint scheduler = signed_val(val);
- if (erts_common_run_queue && erts_no_schedulers > 1)
- goto error;
if (scheduler < 0 || erts_no_schedulers < scheduler)
goto error;
so.scheduler = (int) scheduler;
@@ -1535,8 +1533,6 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
ErtsRunQueue *old;
ErtsRunQueue *new;
Sint sched;
- if (erts_common_run_queue && erts_no_schedulers > 1)
- goto error;
if (!is_small(BIF_ARG_2))
goto error;
sched = signed_val(BIF_ARG_2);
@@ -4128,8 +4124,20 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
if (is_value(res))
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
+ erts_send_warning_to_logger_str(
+ BIF_P->group_leader,
+ "A call to erlang:system_flag(cpu_topology, _) was made.\n"
+ "The cpu_topology argument is deprecated and scheduled\n"
+ "for removal in erts-5.10/OTP-R16. For more information\n"
+ "see the erlang:system_flag/2 documentation.\n");
BIF_TRAP1(set_cpu_topology_trap, BIF_P, BIF_ARG_2);
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
+ erts_send_warning_to_logger_str(
+ BIF_P->group_leader,
+ "A call to erlang:system_flag(scheduler_bind_type, _) was\n"
+ "made. The scheduler_bind_type argument is deprecated and\n"
+ "scheduled for removal in erts-5.10/OTP-R16. For more\n"
+ "information see the erlang:system_flag/2 documentation.\n");
return erts_bind_schedulers(BIF_P, BIF_ARG_2);
}
error:
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index a79feaebdb..cb918fd34c 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -78,7 +78,6 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
#ifdef ERTS_SMP
" [smp:%beu:%beu]"
#endif
- " [rq:%beu]"
#ifdef USE_THREADS
" [async-threads:%d]"
#endif
@@ -301,9 +300,7 @@ erts_print_system_version(int to, void *arg, Process *c_p)
#endif
return erts_print(to, arg, erts_system_version
#ifdef ERTS_SMP
- , total, online, erts_no_run_queues
-#else
- , 1
+ , total, online
#endif
#ifdef USE_THREADS
, erts_async_max_threads
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 03c0ef904a..fe3693d0ca 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -486,10 +486,7 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
erts_thr_set_main_status(1, (int) esdp->no);
/* Make sure we check if we should bind to a cpu or not... */
- if (esdp->run_queue->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- erts_smp_atomic32_set_nob(&esdp->chk_cpu_bind, 1);
- else
- esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
}
#endif
@@ -502,11 +499,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_callback_call_t *cgcc;
#ifdef ERTS_SMP
- if (erts_common_run_queue)
- erts_smp_atomic32_set_nob(&esdp->chk_cpu_bind, 0);
- else {
- esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
- }
+ esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
#endif
erts_smp_runq_unlock(esdp->run_queue);
erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
@@ -1729,16 +1722,8 @@ erts_init_cpu_topology(void)
scheduler2cpu_map[ix].bound_id = -1;
}
- if (cpu_bind_order == ERTS_CPU_BIND_UNDEFINED) {
- int ncpus = erts_get_cpu_configured(cpuinfo);
- if (ncpus < 1 || erts_no_schedulers < ncpus)
- cpu_bind_order = ERTS_CPU_BIND_NONE;
- else
- cpu_bind_order = ((system_cpudata || user_cpudata)
- && (erts_bind_to_cpu(cpuinfo, -1) != -ENOTSUP)
- ? ERTS_CPU_BIND_DEFAULT_BIND
- : ERTS_CPU_BIND_NONE);
- }
+ if (cpu_bind_order == ERTS_CPU_BIND_UNDEFINED)
+ cpu_bind_order = ERTS_CPU_BIND_NONE;
reader_groups_map = add_cpu_groups(reader_groups,
reader_groups_callback,
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 6c4ba2af68..717315d8bd 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -111,7 +111,6 @@ Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
int erts_compat_rel;
-static int use_multi_run_queue;
static int no_schedulers;
static int no_schedulers_online;
@@ -254,8 +253,7 @@ erl_init(int ncpu)
erts_init_time();
erts_init_sys_common_misc();
erts_init_process(ncpu);
- erts_init_scheduling(use_multi_run_queue,
- no_schedulers,
+ erts_init_scheduling(no_schedulers,
no_schedulers_online);
erts_init_cpu_topology(); /* Must be after init_scheduling */
erts_alloc_late_init();
@@ -613,7 +611,6 @@ early_init(int *argc, char **argv) /*
size_t envbufsz;
erts_sched_compact_load = 1;
- use_multi_run_queue = 1;
erts_printf_eterm_func = erts_printf_term;
erts_disable_tolerant_timeofday = 0;
display_items = 200;
@@ -1257,12 +1254,8 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
- else if (sys_strcmp("mrq", sub_param) == 0)
- use_multi_run_queue = 1;
else if (sys_strcmp("nsp", sub_param) == 0)
erts_use_sender_punish = 0;
- else if (sys_strcmp("srq", sub_param) == 0)
- use_multi_run_queue = 0;
else if (sys_strcmp("wt", sub_param) == 0) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (erts_sched_set_wakeup_limit(arg) != 0) {
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 44da6b6c51..09e85893c3 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -173,7 +173,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
- { "run_queue_sleep_list", "address" },
#endif
{ "async_init_mtx", NULL },
#ifdef ERTS_SMP
@@ -1253,7 +1252,7 @@ erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
{
lck->id = erts_lc_get_lock_order_id(name);
- lck->extra = &lck->extra;
+ lck->extra = (UWord) &lck->extra;
ASSERT(is_not_immed(lck->extra));
lck->flags = flags;
lck->inited = ERTS_LC_INITITALIZED;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 62798bb2c1..740a1b853e 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1669,7 +1669,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
else { /* Function traced, patch the original instruction word */
BpData** bps = (BpData**) code_ptr[1];
- BpData* bp = (BpData*) bps[bp_sched2ix()];
+ BpData* bp = (BpData*) bps[erts_bp_sched2ix()];
bp->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 87b8e5131b..2b5e65b11a 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1048,8 +1048,6 @@ erts_port_migrate(Port *prt, int *prt_locked,
ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
- ASSERT(!erts_common_run_queue);
-
if (!*from_locked || !*to_locked) {
if (from_rq < to_rq) {
if (!*to_locked) {
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 84c0ded016..ec4b1dcd98 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -196,8 +196,6 @@ do { \
erts_sched_stat_t erts_sched_stat;
-ErtsRunQueue *erts_common_run_queue;
-
#ifdef USE_THREADS
static erts_tsd_key_t sched_data_key;
#endif
@@ -225,10 +223,6 @@ typedef union {
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
-#ifndef BM_COUNTERS
-static int processes_busy;
-#endif
-
Process** process_tab;
static Uint last_reductions;
static Uint last_exact_reductions;
@@ -497,9 +491,6 @@ erts_init_process(int ncpu)
p_serial_shift = erts_fit_in_bits(erts_max_processes - 1);
p_serial_mask = ((~(~((Uint) 0) << proc_bits)) >> p_serial_shift);
erts_process_tab_index_mask = ~(~((Uint) 0) << p_serial_shift);
-#ifndef BM_COUNTERS
- processes_busy = 0;
-#endif
last_reductions = 0;
last_exact_reductions = 0;
erts_default_process_flags = 0;
@@ -1724,21 +1715,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- erts_smp_spin_lock(&rq->sleepers.lock);
flgs = sched_prep_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
/* Go suspend instead... */
- erts_smp_spin_unlock(&rq->sleepers.lock);
return;
}
- ssi->prev = NULL;
- ssi->next = rq->sleepers.list;
- if (rq->sleepers.list)
- rq->sleepers.list->prev = ssi;
- rq->sleepers.list = ssi;
- erts_smp_spin_unlock(&rq->sleepers.lock);
-
/*
* If all schedulers are waiting, one of them *should*
* be waiting in erl_sys_schedule()
@@ -1997,10 +1979,10 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
}
static void
-wake_scheduler(ErtsRunQueue *rq, int incq, int one)
+wake_scheduler(ErtsRunQueue *rq, int incq)
{
ErtsSchedulerSleepInfo *ssi;
- ErtsSchedulerSleepList *sl;
+ erts_aint32_t flgs;
/*
* The unlocked run queue is not strictly necessary
@@ -2012,56 +1994,13 @@ wake_scheduler(ErtsRunQueue *rq, int incq, int one)
*/
ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
- sl = &rq->sleepers;
+ ssi = rq->scheduler->ssi;
- erts_smp_spin_lock(&sl->lock);
- ssi = sl->list;
- if (!ssi)
- erts_smp_spin_unlock(&sl->lock);
- else if (one) {
- erts_aint32_t flgs;
- if (ssi->prev)
- ssi->prev->next = ssi->next;
- else {
- ASSERT(sl->list == ssi);
- sl->list = ssi->next;
- }
- if (ssi->next)
- ssi->next->prev = ssi->prev;
-
- erts_smp_spin_unlock(&sl->lock);
+ flgs = ssi_flags_set_wake(ssi);
+ erts_sched_finish_poke(ssi, flgs);
- flgs = ssi_flags_set_wake(ssi);
- erts_sched_finish_poke(ssi, flgs);
-
- if (incq && !erts_common_run_queue && (flgs & ERTS_SSI_FLG_WAITING))
- non_empty_runq(rq);
- }
- else {
- sl->list = NULL;
- erts_smp_spin_unlock(&sl->lock);
-
- ERTS_THR_MEMORY_BARRIER;
- do {
- ErtsSchedulerSleepInfo *wake_ssi = ssi;
- ssi = ssi->next;
- erts_sched_finish_poke(wake_ssi, ssi_flags_set_wake(wake_ssi));
- } while (ssi);
- }
-}
-
-static void
-wake_all_schedulers(void)
-{
- if (erts_common_run_queue)
- wake_scheduler(erts_common_run_queue, 0, 0);
- else {
- int ix;
- for (ix = 0; ix < erts_no_run_queues; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0, 1);
- }
- }
+ if (incq && (flgs & ERTS_SSI_FLG_WAITING))
+ non_empty_runq(rq);
}
#define ERTS_NO_USED_RUNQS_SHIFT 16
@@ -2154,7 +2093,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
erts_smp_xrunq_unlock(crq, wrq);
}
}
- wake_scheduler(wrq, 0, 1);
+ wake_scheduler(wrq, 0);
return 1;
}
return 0;
@@ -2202,7 +2141,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq)
{
#ifdef ERTS_SMP
if (runq)
- wake_scheduler(runq, 1, 1);
+ wake_scheduler(runq, 1);
#endif
}
@@ -2217,19 +2156,12 @@ erts_sched_notify_check_cpu_bind(void)
{
#ifdef ERTS_SMP
int ix;
- if (erts_common_run_queue) {
- for (ix = 0; ix < erts_no_schedulers; ix++)
- erts_smp_atomic32_set_relb(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1);
- wake_all_schedulers();
- }
- else {
- for (ix = 0; ix < erts_no_run_queues; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
- erts_smp_runq_unlock(rq);
- wake_scheduler(rq, 0, 1);
- };
+ for (ix = 0; ix < erts_no_run_queues; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0);
}
#else
erts_sched_check_cpu_bind(erts_get_scheduler_data());
@@ -2498,7 +2430,7 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
if (notify_to_rq)
smp_notify_inc_runq(rq);
- wake_scheduler(evac_rq, 0, 1);
+ wake_scheduler(evac_rq, 0);
}
static int
@@ -2656,9 +2588,6 @@ static int
try_steal_task(ErtsRunQueue *rq)
{
int res, rq_locked, vix, active_rqs, blnc_rqs;
-
- if (erts_common_run_queue)
- return 0;
/*
* We are not allowed to steal jobs to this run queue
@@ -3335,14 +3264,10 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp)
}
void
-erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
+erts_init_scheduling(int no_schedulers, int no_schedulers_online)
{
int ix, n, no_ssi;
-#ifndef ERTS_SMP
- mrq = 0;
-#endif
-
init_misc_op_list_alloc();
ASSERT(no_schedulers_online <= no_schedulers);
@@ -3351,7 +3276,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
/* Create and initialize run queues */
- n = (int) (mrq ? no_schedulers : 1);
+ n = no_schedulers;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS,
@@ -3376,14 +3301,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
-#ifdef ERTS_SMP
- erts_smp_spinlock_init(&rq->sleepers.lock, "run_queue_sleep_list");
- rq->sleepers.list = NULL;
-#endif
-
rq->waiting = 0;
rq->woken = 0;
- rq->flags = !mrq ? ERTS_RUNQ_FLG_SHARED_RUNQ : 0;
+ rq->flags = 0;
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
rq->full_reds_history_sum = 0;
for (rix = 0; rix < ERTS_FULL_REDS_HISTORY_SIZE; rix++) {
@@ -3429,8 +3349,6 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
rq->ports.end = NULL;
}
- erts_common_run_queue = !mrq ? ERTS_RUNQ_IX(0) : NULL;
-
#ifdef ERTS_SMP
if (erts_no_run_queues != 1) {
@@ -3507,18 +3425,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_init_atom_cache_map(&esdp->atom_cache_map);
- if (erts_common_run_queue) {
- esdp->run_queue = erts_common_run_queue;
- esdp->run_queue->scheduler = NULL;
- }
- else {
- esdp->run_queue = ERTS_RUNQ_IX(ix);
- esdp->run_queue->scheduler = esdp;
- }
+ esdp->run_queue = ERTS_RUNQ_IX(ix);
+ esdp->run_queue->scheduler = esdp;
-#ifdef ERTS_SMP
- erts_smp_atomic32_init_nob(&esdp->chk_cpu_bind, 0);
-#endif
init_aux_work_data(&esdp->aux_work_data, esdp);
}
@@ -3541,8 +3450,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
schdlr_sspnd.msb.ongoing = 0;
erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers);
schdlr_sspnd.msb.procs = NULL;
- init_no_runqs(no_schedulers,
- erts_common_run_queue ? 1 : no_schedulers_online);
+ init_no_runqs(no_schedulers, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
balance_info.forced_check_balance = 0;
@@ -3555,16 +3463,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
balance_info.n = 0;
if (no_schedulers_online < no_schedulers) {
- if (erts_common_run_queue) {
- for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic32_read_bor_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- else {
- for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
- evacuate_run_queue(ERTS_RUNQ_IX(ix),
- ERTS_RUNQ_IX(ix % no_schedulers_online));
- }
+ for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix),
+ ERTS_RUNQ_IX(ix % no_schedulers_online));
}
schdlr_sspnd.wait_curr_online = no_schedulers_online;
@@ -3609,8 +3510,6 @@ ErtsRunQueue *
erts_schedid2runq(Uint id)
{
int ix;
- if (erts_common_run_queue)
- return erts_common_run_queue;
ix = (int) id - 1;
ASSERT(0 <= ix && ix < erts_no_run_queues);
return ERTS_RUNQ_IX(ix);
@@ -3901,9 +3800,11 @@ suspend_scheduler(ErtsSchedulerData *esdp)
wake = 0;
}
- flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
- break;
+ if (curr_online && !ongoing_multi_scheduling_block()) {
+ flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
+ if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
+ break;
+ }
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
while (1) {
@@ -4095,10 +3996,6 @@ erts_set_schedulers_online(Process *p,
for (ix = online; ix < no; ix++)
erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
- else if (erts_common_run_queue) {
- for (ix = online; ix < no; ix++)
- scheduler_ix_resume_wake(ix);
- }
else {
if (plocks) {
have_unlocked_plocks = 1;
@@ -4146,15 +4043,6 @@ erts_set_schedulers_online(Process *p,
for (ix = no; ix < online; ix++)
erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
- else if (erts_common_run_queue) {
- for (ix = no; ix < online; ix++) {
- ErtsSchedulerSleepInfo *ssi;
- ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_read_bor_nob(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
- }
- wake_all_schedulers();
- }
else {
if (plocks) {
have_unlocked_plocks = 1;
@@ -4181,7 +4069,7 @@ erts_set_schedulers_online(Process *p,
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
for (ix = no; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- wake_scheduler(rq, 0, 1);
+ wake_scheduler(rq, 0);
}
}
}
@@ -4274,33 +4162,26 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
schdlr_sspnd.msb.wait_active = 2;
}
- if (erts_common_run_queue) {
- for (ix = 1; ix < online; ix++)
- erts_smp_atomic32_read_bor_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
- wake_all_schedulers();
- }
- else {
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_smp_mtx_lock(&balance_info.update_mtx);
- set_no_used_runqs(1);
- for (ix = 0; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
- ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
- erts_smp_runq_unlock(rq);
- }
- /*
- * Evacuate all activities in all other run queues
- * into the first run queue. Note order is important,
- * online run queues has to be evacuated last.
- */
- for (ix = erts_no_run_queues-1; ix >= 1; ix--)
- evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(0));
- erts_smp_mtx_unlock(&balance_info.update_mtx);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+ set_no_used_runqs(1);
+ for (ix = 0; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
+ ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
+ erts_smp_runq_unlock(rq);
}
+ /*
+ * Evacuate all activities in all other run queues
+ * into the first run queue. Note order is important,
+ * online run queues has to be evacuated last.
+ */
+ for (ix = erts_no_run_queues-1; ix >= 1; ix--)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(0));
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
!= schdlr_sspnd.msb.wait_active) {
@@ -4412,12 +4293,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
- else if (erts_common_run_queue) {
- for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic32_read_band_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ~ERTS_SSI_FLG_SUSPENDED);
- wake_all_schedulers();
- }
else {
int online = schdlr_sspnd.online;
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
@@ -5096,7 +4971,7 @@ suspend_process_2(BIF_ALIST_2)
/* This is really a piece of cake without SMP support... */
if (!smon->active) {
- suspend_process(erts_common_run_queue, suspendee);
+ suspend_process(ERTS_RUNQ_IX(0), suspendee);
smon->active++;
res = am_true;
}
@@ -5666,8 +5541,6 @@ erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
|| from_locked);
ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
-
- ASSERT(!erts_common_run_queue);
/*
* If we have the lock on the run queue to migrate to,
@@ -5818,25 +5691,17 @@ erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
int i;
ErtsSchedulerData *esdp;
- if (erts_common_run_queue)
- erts_smp_runq_lock(erts_common_run_queue);
-
for (i = 0; i < erts_no_schedulers; i++) {
esdp = ERTS_SCHEDULER_IX(i);
- if (!erts_common_run_queue)
- erts_smp_runq_lock(esdp->run_queue);
+ erts_smp_runq_lock(esdp->run_queue);
if (esdp->free_process && esdp->free_process->id == rpid) {
res = am_free;
- if (!erts_common_run_queue)
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_runq_unlock(esdp->run_queue);
break;
}
- if (!erts_common_run_queue)
- erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_runq_unlock(esdp->run_queue);
}
- if (erts_common_run_queue)
- erts_smp_runq_unlock(erts_common_run_queue);
#endif
}
@@ -6147,10 +6012,8 @@ Process *schedule(Process *p, int calls)
#ifdef ERTS_SMP
- if (!(rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- && rq->check_balance_reds <= 0) {
+ if (rq->check_balance_reds <= 0)
check_balance(rq);
- }
ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -6160,20 +6023,15 @@ Process *schedule(Process *p, int calls)
continue_check_activities_to_run:
- if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_CHK_CPU_BIND
+ if (rq->flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
- if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED)) {
+ if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) {
ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
- if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
- || erts_smp_atomic32_read_acqb(&esdp->chk_cpu_bind)) {
+ if (rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
erts_sched_check_cpu_bind(esdp);
- }
}
{
@@ -6215,16 +6073,11 @@ Process *schedule(Process *p, int calls)
empty_runq(rq);
- if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
- | ERTS_RUNQ_FLG_SUSPENDED)) {
- if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED);
- non_empty_runq(rq);
- goto continue_check_activities_to_run;
- }
+ if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED);
+ non_empty_runq(rq);
+ goto continue_check_activities_to_run;
}
else if (!(rq->flags & ERTS_RUNQ_FLG_INACTIVE)) {
/*
@@ -6289,11 +6142,7 @@ Process *schedule(Process *p, int calls)
else if (rq->wakeup_other < wakeup_other_limit)
rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC;
else {
- if (erts_common_run_queue) {
- if (erts_common_run_queue->waiting)
- wake_scheduler(erts_common_run_queue, 0, 1);
- }
- else if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
+ if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
rq->wakeup_other = 0;
}
@@ -6892,7 +6741,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
goto error;
}
+#ifdef BM_COUNTERS
processes_busy++;
+#endif
BM_COUNT(processes_spawned);
#ifndef HYBRID
@@ -8415,7 +8266,9 @@ continue_exit_process(Process *p
pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+#ifdef BM_COUNTERS
processes_busy--;
+#endif
if (dep) {
erts_do_net_exits(dep, reason);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 69ff423133..a51b380bb0 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -144,12 +144,10 @@ extern int erts_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 1))
#define ERTS_RUNQ_FLG_SUSPENDED \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 2))
-#define ERTS_RUNQ_FLG_SHARED_RUNQ \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 3))
#define ERTS_RUNQ_FLG_CHK_CPU_BIND \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 4))
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 3))
#define ERTS_RUNQ_FLG_INACTIVE \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 5))
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 4))
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
@@ -272,11 +270,6 @@ typedef enum {
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
-typedef struct {
- erts_smp_spinlock_t lock;
- ErtsSchedulerSleepInfo *list;
-} ErtsSchedulerSleepList;
-
struct ErtsSchedulerSleepInfo_ {
#ifdef ERTS_SMP
ErtsSchedulerSleepInfo *next;
@@ -339,10 +332,6 @@ struct ErtsRunQueue_ {
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
-#ifdef ERTS_SMP
- ErtsSchedulerSleepList sleepers;
-#endif
-
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
int woken;
@@ -388,7 +377,6 @@ typedef union {
} ErtsAlignedRunQueue;
extern ErtsAlignedRunQueue *erts_aligned_run_queues;
-extern ErtsRunQueue *erts_common_run_queue;
#define ERTS_PROC_REDUCTIONS_EXECUTED(RQ, PRIO, REDS, AREDS) \
do { \
@@ -469,11 +457,6 @@ struct ErtsSchedulerData_ {
ErtsSchedAllocData alloc_data;
-#ifdef ERTS_SMP
- /* NOTE: These fields are modified under held mutexes by other threads */
- erts_smp_atomic32_t chk_cpu_bind; /* Only used when common run queue */
-#endif
-
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_alloc_verify_func_t verify_unused_temp_alloc;
Allctr_t *verify_unused_temp_alloc_data;
@@ -1079,7 +1062,7 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
void erts_pre_init_process(void);
void erts_late_init_process(void);
void erts_early_init_scheduling(int);
-void erts_init_scheduling(int, int, int);
+void erts_init_scheduling(int, int);
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
@@ -1464,8 +1447,7 @@ erts_get_runq_proc(Process *p)
ASSERT(p->run_queue);
return p->run_queue;
#else
- ASSERT(erts_common_run_queue);
- return erts_common_run_queue;
+ return ERTS_RUNQ_IX(0);
#endif
}
@@ -1478,8 +1460,7 @@ erts_get_runq_current(ErtsSchedulerData *esdp)
esdp = erts_get_scheduler_data();
return esdp->run_queue;
#else
- ASSERT(erts_common_run_queue);
- return erts_common_run_queue;
+ return ERTS_RUNQ_IX(0);
#endif
}
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index b4d20480c5..a5a753b798 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -123,10 +123,10 @@ erts_init_proc_lock(int cpus)
erts_smp_spinlock_init(&qs_lock, "proc_lck_qs_alloc");
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck,
- "pix_lock", make_small(i));
+ erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
+ "pix_lock", make_small(i));
#else
- erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
+ erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
#endif
}
queue_free_list = NULL;
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 97f250138e..97e554914e 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -255,8 +255,8 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
typedef struct {
union {
- erts_smp_spinlock_t spnlck;
- char buf[64]; /* Try to get locks in different cache lines */
+ erts_mtx_t mtx;
+ char buf[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_mtx_t))];
} u;
} erts_pix_lock_t;
@@ -380,18 +380,18 @@ ERTS_GLB_INLINE void erts_proc_lock_op_debug(Process *, ErtsProcLocks, int);
ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
- erts_smp_spin_lock(&pixlck->u.spnlck);
+ erts_mtx_lock(&pixlck->u.mtx);
}
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *pixlck)
{
ERTS_LC_ASSERT(pixlck);
- erts_smp_spin_unlock(&pixlck->u.spnlck);
+ erts_mtx_unlock(&pixlck->u.mtx);
}
ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
{
- return erts_smp_lc_spinlock_is_locked(&pixlck->u.spnlck);
+ return erts_lc_mtx_is_locked(&pixlck->u.mtx);
}
/*
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index b247576f1c..d5b74efd98 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -205,7 +205,7 @@ erts_port_runq(Port *prt)
rq1 = rq2;
}
#else
- return erts_common_run_queue;
+ return ERTS_RUNQ_IX(0);
#endif
}
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index e4ad7dcb24..df03f5e42c 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -45,6 +45,7 @@
#include "erl_thr_progress.h"
#include "erl_thr_queue.h"
#include "erl_sched_spec_pre_alloc.h"
+#include "beam_bp.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -2647,7 +2648,7 @@ tailrecur_ne:
FloatDef f1, f2;
Eterm big;
#if HEAP_ON_C_STACK
- Eterm big_buf[32]; /* If HEAP_ON_C_STACK */
+ Eterm big_buf[CMP_TMP_HEAP_SIZE]; /* If HEAP_ON_C_STACK */
#else
Eterm *big_buf = erts_get_scheduler_data()->cmp_tmp_heap;
#endif