aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_init.c')
-rw-r--r--erts/emulator/beam/erl_init.c263
1 files changed, 137 insertions, 126 deletions
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 066ceff194..faf980b237 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -42,6 +42,9 @@
#include "erl_misc_utils.h"
#include "packet_parser.h"
#include "erl_cpu_topology.h"
+#include "erl_thr_progress.h"
+#include "erl_thr_queue.h"
+#include "erl_async.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -68,8 +71,11 @@ static void erl_init(int ncpu);
#define ERTS_MIN_COMPAT_REL 7
+static erts_atomic_t exiting;
+
#ifdef ERTS_SMP
-erts_smp_atomic_t erts_writing_erl_crash_dump;
+erts_smp_atomic32_t erts_writing_erl_crash_dump;
+erts_tsd_key_t erts_is_crash_dumping_key;
#else
volatile int erts_writing_erl_crash_dump = 0;
#endif
@@ -86,7 +92,6 @@ int erts_use_sender_punish;
*/
Uint display_items; /* no of items to display in traces etc */
-Uint display_loads; /* print info about loaded modules */
int H_MIN_SIZE; /* The minimum heap grain */
int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/
@@ -98,8 +103,6 @@ int erts_backtrace_depth; /* How many functions to show in a backtrace
* in error codes.
*/
-int erts_async_max_threads; /* number of threads for async support */
-int erts_async_thread_suggested_stack_size;
erts_smp_atomic32_t erts_max_gen_gcs;
Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
@@ -108,7 +111,6 @@ Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
int erts_compat_rel;
-static int use_multi_run_queue;
static int no_schedulers;
static int no_schedulers_online;
@@ -126,6 +128,8 @@ int erts_modified_timing_level;
int erts_no_crash_dump = 0; /* Use -d to suppress crash dump. */
+int erts_no_line_info = 0; /* -L: Don't load line information */
+
/*
* Other global variables.
*/
@@ -244,19 +248,16 @@ erl_init(int ncpu)
{
init_benchmarking();
-#ifdef ERTS_SMP
- erts_system_block_init();
-#endif
-
erts_init_monitors();
erts_init_gc();
erts_init_time();
erts_init_sys_common_misc();
erts_init_process(ncpu);
- erts_init_scheduling(use_multi_run_queue,
- no_schedulers,
+ erts_init_scheduling(no_schedulers,
no_schedulers_online);
erts_init_cpu_topology(); /* Must be after init_scheduling */
+ erts_alloc_late_init();
+
H_MIN_SIZE = erts_next_heap_size(H_MIN_SIZE, 0);
BIN_VH_MIN_SIZE = erts_next_heap_size(BIN_VH_MIN_SIZE, 0);
@@ -278,6 +279,7 @@ erl_init(int ncpu)
erts_init_node_tables();
init_dist();
erl_drv_thr_init();
+ erts_init_async();
init_io();
init_copy();
init_load();
@@ -323,7 +325,7 @@ init_shared_memory(int argc, char **argv)
#endif
global_gen_gcs = 0;
- global_max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
+ global_max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
global_gc_flags = erts_default_process_flags;
erts_global_offheap.mso = NULL;
@@ -432,7 +434,7 @@ static void
load_preloaded(void)
{
int i;
- int res;
+ Eterm res;
Preload* preload_p;
Eterm module_name;
byte* code;
@@ -451,8 +453,9 @@ load_preloaded(void)
name);
res = erts_load_module(NULL, 0, NIL, &module_name, code, length);
sys_preload_end(&preload_p[i]);
- if (res < 0)
- erl_exit(1,"Failed loading preloaded module %s\n", name);
+ if (res != NIL)
+ erl_exit(1,"Failed loading preloaded module %s (%T)\n",
+ name, res);
i++;
}
}
@@ -494,8 +497,6 @@ void erts_usage(void)
erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
- erts_fprintf(stderr, "-l turn on auto load tracing\n");
-
erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n");
@@ -608,13 +609,13 @@ early_init(int *argc, char **argv) /*
int max_main_threads;
int max_reader_groups;
int reader_groups;
+ char envbuf[21]; /* enough for any 64-bit integer */
+ size_t envbufsz;
erts_sched_compact_load = 1;
- use_multi_run_queue = 1;
erts_printf_eterm_func = erts_printf_term;
erts_disable_tolerant_timeofday = 0;
display_items = 200;
- display_loads = 0;
erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
erts_async_max_threads = 0;
erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
@@ -646,17 +647,23 @@ early_init(int *argc, char **argv) /*
erts_use_r9_pids_ports = 0;
erts_sys_pre_init();
+ erts_atomic_init_nob(&exiting, 0);
+#ifdef ERTS_SMP
+ erts_thr_progress_pre_init();
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_init();
#endif
#ifdef ERTS_SMP
- erts_smp_atomic_init(&erts_writing_erl_crash_dump, 0L);
+ erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
+ erts_tsd_key_create(&erts_is_crash_dumping_key);
#else
erts_writing_erl_crash_dump = 0;
#endif
- erts_smp_atomic32_init(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1));
+ erts_smp_atomic32_init_nob(&erts_max_gen_gcs,
+ (erts_aint32_t) ((Uint16) -1));
erts_pre_init_process();
#if defined(USE_THREADS) && !defined(ERTS_SMP)
@@ -675,6 +682,16 @@ early_init(int *argc, char **argv) /*
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
+ envbufsz = sizeof(envbuf);
+
+ /* erts_sys_getenv() not initialized yet; need erts_sys_getenv__() */
+ if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0)
+ erts_async_max_threads = atoi(envbuf);
+ else
+ erts_async_max_threads = 0;
+ if (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)
+ erts_async_max_threads = ERTS_MAX_NO_OF_ASYNC_THREADS;
+
if (argc && argv) {
int i = 1;
while (i < *argc) {
@@ -702,6 +719,20 @@ early_init(int *argc, char **argv) /*
}
break;
}
+ case 'A': {
+ /* set number of threads in thread pool */
+ char *arg = get_arg(argv[i]+2, argv[i+1], &i);
+ if (((erts_async_max_threads = atoi(arg)) < 0) ||
+ (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
+ erts_fprintf(stderr,
+ "bad number of async threads %s\n",
+ arg);
+ erts_usage();
+ VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n",
+ erts_async_max_threads));
+ }
+ break;
+ }
case 'S' : {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -767,11 +798,29 @@ early_init(int *argc, char **argv) /*
erts_no_schedulers = (Uint) no_schedulers;
#endif
+ erts_early_init_scheduling(no_schedulers);
+ alloc_opts.ncpu = ncpu;
erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes)
-M flags. */
/* Require allocators */
- erts_early_init_scheduling();
+#ifdef ERTS_SMP
+ /*
+ * Thread progress management:
+ *
+ * * Managed threads:
+ * ** Scheduler threads (see erl_process.c)
+ * ** Aux thread (see erl_process.c)
+ * ** Sys message dispatcher thread (see erl_trace.c)
+ *
+ * * Unmanaged threads that need to register:
+ * ** Async threads (see erl_async.c)
+ */
+ erts_thr_progress_init(no_schedulers,
+ no_schedulers+2,
+ erts_async_max_threads);
+#endif
+ erts_thr_q_init();
erts_init_utils();
erts_early_init_cpu_topology(no_schedulers,
&max_main_threads,
@@ -808,10 +857,12 @@ early_init(int *argc, char **argv) /*
#if defined(HIPE)
hipe_signal_init(); /* must be done very early */
#endif
- erl_sys_init();
erl_sys_args(argc, argv);
+ /* Creates threads on Windows that depend on the arguments, so has to be after erl_sys_args */
+ erl_sys_init();
+
erts_ets_realloc_always_moves = 0;
erts_ets_always_compress = 0;
erts_dist_buf_busy_limit = ERTS_DE_BUSY_LIMIT;
@@ -849,7 +900,6 @@ erl_start(int argc, char **argv)
int have_break_handler = 1;
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
- int async_max_threads = erts_async_max_threads;
int ncpu = early_init(&argc, argv);
envbufsz = sizeof(envbuf);
@@ -861,12 +911,8 @@ erl_start(int argc, char **argv)
envbufsz = sizeof(envbuf);
if (erts_sys_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
Uint16 max_gen_gcs = atoi(envbuf);
- erts_smp_atomic32_set(&erts_max_gen_gcs, (erts_aint32_t) max_gen_gcs);
- }
-
- envbufsz = sizeof(envbuf);
- if (erts_sys_getenv("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) {
- async_max_threads = atoi(envbuf);
+ erts_smp_atomic32_set_nob(&erts_max_gen_gcs,
+ (erts_aint32_t) max_gen_gcs);
}
#if (defined(__APPLE__) && defined(__MACH__)) || defined(__DARWIN__)
@@ -934,10 +980,9 @@ erl_start(int argc, char **argv)
erts_fprintf(stderr, "%s unknown flag %s\n", argv[0], argv[i]);
erts_usage();
}
- case 'l':
- display_loads++;
+ case 'L':
+ erts_no_line_info = 1;
break;
-
case 'v':
#ifdef DEBUG
if (argv[i][2] == '\0') {
@@ -1211,12 +1256,8 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
- else if (sys_strcmp("mrq", sub_param) == 0)
- use_multi_run_queue = 1;
else if (sys_strcmp("nsp", sub_param) == 0)
erts_use_sender_punish = 0;
- else if (sys_strcmp("srq", sub_param) == 0)
- use_multi_run_queue = 0;
else if (sys_strcmp("wt", sub_param) == 0) {
arg = get_arg(sub_param+2, argv[i+1], &i);
if (erts_sched_set_wakeup_other_thresold(arg) != 0) {
@@ -1319,17 +1360,8 @@ erl_start(int argc, char **argv)
break;
}
- case 'A':
- /* set number of threads in thread pool */
- arg = get_arg(argv[i]+2, argv[i+1], &i);
- if (((async_max_threads = atoi(arg)) < 0) ||
- (async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
- erts_fprintf(stderr, "bad number of async threads %s\n", arg);
- erts_usage();
- }
-
- VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n",
- async_max_threads));
+ case 'A': /* Was handled in early init just read past it */
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
break;
case 'a':
@@ -1418,10 +1450,6 @@ erl_start(int argc, char **argv)
i++;
}
-#ifdef USE_THREADS
- erts_async_max_threads = async_max_threads;
-#endif
-
/* Delayed check of +P flag */
if (erts_max_processes < ERTS_MIN_PROCESSES
|| erts_max_processes > ERTS_MAX_PROCESSES
@@ -1467,6 +1495,10 @@ erl_start(int argc, char **argv)
erts_sys_main_thread(); /* May or may not return! */
#else
erts_thr_set_main_status(1, 1);
+#if ERTS_USE_ASYNC_READY_Q
+ erts_get_scheduler_data()->aux_work_data.async_ready.queue
+ = erts_get_async_ready_queue(1);
+#endif
set_main_stack_size();
process_main();
#endif
@@ -1490,8 +1522,31 @@ __decl_noreturn void erts_thr_fatal_error(int err, char *what)
#endif
static void
-system_cleanup(int exit_code)
+system_cleanup(int flush_async)
{
+ /*
+ * Make sure only one thread exits the runtime system.
+ */
+ if (erts_atomic_inc_read_nob(&exiting) != 1) {
+ /*
+ * Another thread is currently exiting the system;
+ * wait for it to do its job.
+ */
+#ifdef ERTS_SMP
+ if (erts_thr_progress_is_managed_thread()) {
+ /*
+ * The exiting thread might be waiting for
+ * us to block; need to update status...
+ */
+ erts_thr_progress_active(NULL, 0);
+ erts_thr_progress_prepare_wait(NULL);
+ }
+#endif
+ /* Wait forever... */
+ while (1)
+ erts_milli_sleep(10000000);
+ }
+
/* No cleanup wanted if ...
* 1. we are about to do an abnormal exit
* 2. we haven't finished initializing, or
@@ -1499,7 +1554,7 @@ system_cleanup(int exit_code)
* (in threaded non smp case).
*/
- if (exit_code != 0
+ if (!flush_async
|| !erts_initialized
#if defined(USE_THREADS) && !defined(ERTS_SMP)
|| !erts_equal_tids(main_thread, erts_thr_self())
@@ -1511,7 +1566,6 @@ system_cleanup(int exit_code)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
#endif
- erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC); /* We never release it... */
#endif
#ifdef HYBRID
@@ -1540,103 +1594,60 @@ system_cleanup(int exit_code)
erts_cleanup_incgc();
#endif
-#if defined(USE_THREADS)
- exit_async();
-#endif
-#if HAVE_ERTS_MSEG
- erts_mseg_exit();
-#endif
-
- /*
- * A lot more cleaning could/should have been done...
- */
-
+ erts_exit_flush_async();
}
-/*
- * Common exit function, all exits from the system go through here.
- * n <= 0 -> normal exit with status n;
- * n = 127 -> Erlang crash dump produced, exit with status 1;
- * other positive n -> Erlang crash dump and core dump produced.
- */
-
-__decl_noreturn void erl_exit0(char *file, int line, int n, char *fmt,...)
+static __decl_noreturn void __noreturn
+erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
{
unsigned int an;
- va_list args;
- va_start(args, fmt);
+ system_cleanup(flush_async);
save_statistics();
- system_cleanup(n);
-
an = abs(n);
if (erts_mtrace_enabled)
erts_mtrace_exit((Uint32) an);
/* Produce an Erlang core dump if error */
- if (n > 0 && erts_initialized &&
- (erts_no_crash_dump == 0 || n == ERTS_DUMP_EXIT)) {
- erl_crash_dump_v(file, line, fmt, args);
+ if (((n > 0 && erts_no_crash_dump == 0) || n == ERTS_DUMP_EXIT)
+ && erts_initialized) {
+ erl_crash_dump_v((char*) NULL, 0, fmt, args1);
}
- /* need to reinitialize va_args thing */
- va_end(args);
- va_start(args, fmt);
-
if (fmt != NULL && *fmt != '\0')
- erl_error(fmt, args); /* Print error message. */
- va_end(args);
+ erl_error(fmt, args2); /* Print error message. */
sys_tty_reset(n);
if (n == ERTS_INTR_EXIT)
exit(0);
- else if (n == 127)
+ else if (n == ERTS_DUMP_EXIT)
ERTS_EXIT_AFTER_DUMP(1);
else if (n > 0 || n == ERTS_ABORT_EXIT)
abort();
exit(an);
}
-__decl_noreturn void erl_exit(int n, char *fmt,...)
+/* Exit without flushing async threads */
+__decl_noreturn void __noreturn erl_exit(int n, char *fmt, ...)
{
- unsigned int an;
- va_list args;
-
- va_start(args, fmt);
-
- save_statistics();
-
- system_cleanup(n);
-
- an = abs(n);
-
- if (erts_mtrace_enabled)
- erts_mtrace_exit((Uint32) an);
-
- /* Produce an Erlang core dump if error */
- if (n > 0 && erts_initialized &&
- (erts_no_crash_dump == 0 || n == ERTS_DUMP_EXIT)) {
- erl_crash_dump_v((char*) NULL, 0, fmt, args);
- }
-
- /* need to reinitialize va_args thing */
- va_end(args);
- va_start(args, fmt);
-
- if (fmt != NULL && *fmt != '\0')
- erl_error(fmt, args); /* Print error message. */
- va_end(args);
- sys_tty_reset(n);
-
- if (n == ERTS_INTR_EXIT)
- exit(0);
- else if (n == ERTS_DUMP_EXIT)
- ERTS_EXIT_AFTER_DUMP(1);
- else if (n > 0 || n == ERTS_ABORT_EXIT)
- abort();
- exit(an);
+ va_list args1, args2;
+ va_start(args1, fmt);
+ va_start(args2, fmt);
+ erl_exit_vv(n, 0, fmt, args1, args2);
+ va_end(args2);
+ va_end(args1);
}
+/* Exit after flushing async threads */
+__decl_noreturn void __noreturn erl_exit_flush_async(int n, char *fmt, ...)
+{
+ va_list args1, args2;
+ va_start(args1, fmt);
+ va_start(args2, fmt);
+ erl_exit_vv(n, 1, fmt, args1, args2);
+ va_end(args2);
+ va_end(args1);
+}