diff options
Diffstat (limited to 'erts/emulator')
32 files changed, 1994 insertions, 341 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index 4de4e03c01..523130d01a 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -518,10 +518,9 @@ ifdef DTRACE_ENABLED # global.h causes problems by including dtrace-wrapper.h which includes # the autogenerated erlang_dtrace.h ... so make erlang_dtrace.h very early. DTRACE_HEADERS = $(TARGET)/erlang_dtrace.h -generate: $(DTRACE_HEADERS) $(GENERATE) +GENERATE+= $(DTRACE_HEADERS) else DTRACE_HEADERS = -generate: $(GENERATE) endif ifdef HIPE_ENABLED @@ -579,8 +578,8 @@ $(TTF_DIR)/erl_alloc_types.h: beam/erl_alloc.types utils/make_alloc_types GENERATE += $(TTF_DIR)/erl_alloc_types.h # version include file -$(TARGET)/erl_version.h: ../vsn.mk - $(gen_verbose)LANG=C $(PERL) utils/make_version -o $@ $(SYSTEM_VSN) $(SYSTEM_CP_VSN) $(VSN)$(SERIALNO) $(TARGET) +$(TARGET)/erl_version.h: ../vsn.mk $(ERL_TOP)/make/$(TARGET)/otp.mk + $(gen_verbose)LANG=C $(PERL) utils/make_version -o $@ $(SYSTEM_VSN) $(OTP_VERSION) $(VSN)$(SERIALNO) $(TARGET) GENERATE += $(TARGET)/erl_version.h # driver table diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 96547ba743..d28e519ae1 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -179,6 +179,7 @@ atom dexit atom depth atom dgroup_leader atom dictionary +atom dirty_cpu_schedulers_online atom disable_trace atom disabled atom display_items diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index e36ec2a93e..a3cd08834f 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -635,6 +635,11 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr) case op_i_put_tuple_rI: case op_i_put_tuple_xI: case op_i_put_tuple_yI: + case op_new_map_jdII: + case op_update_map_assoc_jsdII: + case op_update_map_exact_jsdII: + case op_i_has_map_fields_fsI: + case op_i_get_map_elements_fsI: { int n = unpacked[-1]; diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index c82d2fb2be..0cec9ea3ec 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -1209,7 +1209,11 @@ void process_main(void) if (start_time != 0) { Sint64 diff = erts_timestamp_millis() - start_time; - if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) { + if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule +#ifdef ERTS_DIRTY_SCHEDULERS + && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data) +#endif + ) { BeamInstr *inptr = find_function_from_pc(start_time_i); BeamInstr *outptr = find_function_from_pc(c_p->i); monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff); @@ -2361,7 +2365,127 @@ void process_main(void) Next(4+Arg(3)); } - OpCase(update_map_assoc_jddII): { + OpCase(i_has_map_fields_fsI): { + map_t* mp; + Eterm map; + Eterm field; + Eterm *ks; + BeamInstr* fs; + Uint sz,n; + + GetArg1(1, map); + + /* this instruction assumes Arg1 is a map, + * i.e. that it follows a test is_map if needed. + */ + + mp = (map_t *)map_val(map); + sz = map_get_size(mp); + + if (sz == 0) { + SET_I((BeamInstr *) Arg(0)); + goto has_map_fields_fail; + } + + ks = map_get_keys(mp); + n = (Uint)Arg(2); + fs = &Arg(3); /* pattern fields */ + + ASSERT(n>0); + + while(sz) { + field = (Eterm)*fs; + if (EQ(field,*ks)) { + n--; + fs++; + if (n == 0) break; + } + ks++; sz--; + } + + if (n) { + SET_I((BeamInstr *) Arg(0)); + goto has_map_fields_fail; + } + + I += 4 + Arg(2); +has_map_fields_fail: + ASSERT(VALID_INSTR(*I)); + Goto(*I); + } + +#define PUT_TERM_REG(term, desc) \ +do { \ + switch ((desc) & _TAG_IMMED1_MASK) { \ + case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \ + r(0) = (term); \ + break; \ + case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \ + x((desc) >> _TAG_IMMED1_SIZE) = (term); \ + break; \ + case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER: \ + y((desc) >> _TAG_IMMED1_SIZE) = (term); \ + break; \ + default: \ + ASSERT(0); \ + break; \ + } \ +} while(0) + + OpCase(i_get_map_elements_fsI): { + Eterm map; + map_t *mp; + Eterm field; + Eterm *ks; + Eterm *vs; + BeamInstr *fs; + Uint sz,n; + + GetArg1(1, map); + + /* this instruction assumes Arg1 is a map, + * i.e. that it follows a test is_map if needed. + */ + + mp = (map_t *)map_val(map); + sz = map_get_size(mp); + + if (sz == 0) { + SET_I((BeamInstr *) Arg(0)); + goto get_map_elements_fail; + } + + n = (Uint)Arg(2) / 2; + fs = &Arg(3); /* pattern fields and target registers */ + ks = map_get_keys(mp); + vs = map_get_values(mp); + + while(sz) { + field = (Eterm)*fs; + if (EQ(field,*ks)) { + PUT_TERM_REG(*vs, fs[1]); + n--; + fs += 2; + /* no more values to fetch, we are done */ + if (n == 0) break; + } + ks++; sz--; + vs++; + } + + if (n) { + SET_I((BeamInstr *) Arg(0)); + goto get_map_elements_fail; + } + + I += 4 + Arg(2); +get_map_elements_fail: + ASSERT(VALID_INSTR(*I)); + Goto(*I); + } +#undef PUT_TERM_REG + + OpCase(update_map_assoc_jsdII): { Eterm res; Eterm map; @@ -2379,7 +2503,7 @@ void process_main(void) } } - OpCase(update_map_exact_jddII): { + OpCase(update_map_exact_jsdII): { Eterm res; Eterm map; @@ -6620,7 +6744,7 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I) */ if (num_old == 0) { - return new_map(p, reg, I+1); + return THE_NON_VALUE; } /* diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index c983615827..e96177cfd9 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -409,7 +409,7 @@ typedef struct LoaderState { __result = __result << 8 | *Stp->file_p++; \ } \ Dest = __result; \ - } while (0) + } #define GetByte(Stp, Dest) \ if ((Stp)->file_left < 1) { \ @@ -506,6 +506,9 @@ static GenOp* gen_select_literals(LoaderState* stp, GenOpArg S, static GenOp* const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, GenOpArg Size, GenOpArg* Rest); +static GenOp* gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src, + GenOpArg Size, GenOpArg* Rest); + static int freeze_code(LoaderState* stp); static void final_touch(LoaderState* stp); @@ -3951,6 +3954,49 @@ tuple_append_put(LoaderState* stp, GenOpArg Arity, GenOpArg Dst, return op; } +/* + * Replace a get_map_elements with one key to an instruction with one + * element + */ + +static GenOp* +gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src, + GenOpArg Size, GenOpArg* Rest) +{ + GenOp* op; + + ASSERT(Size.type == TAG_u); + + NEW_GENOP(stp, op); + op->next = NULL; + op->op = genop_get_map_element_4; + op->arity = 4; + + op->a[0] = Fail; + op->a[1] = Src; + op->a[2] = Rest[0]; + op->a[3] = Rest[1]; + return op; +} + +static GenOp* +gen_has_map_field(LoaderState* stp, GenOpArg Fail, GenOpArg Src, + GenOpArg Size, GenOpArg* Rest) +{ + GenOp* op; + + ASSERT(Size.type == TAG_u); + + NEW_GENOP(stp, op); + op->next = NULL; + op->op = genop_has_map_field_3; + op->arity = 4; + + op->a[0] = Fail; + op->a[1] = Src; + op->a[2] = Rest[0]; + return op; +} /* * Freeze the code in memory, move the string table into place, diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 1f568726a6..06a1230ca0 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -4333,7 +4333,11 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) switch (erts_set_schedulers_online(BIF_P, ERTS_PROC_LOCK_MAIN, signed_val(BIF_ARG_2), - &old_no)) { + &old_no +#ifdef ERTS_DIRTY_SCHEDULERS + , 0 +#endif + )) { case ERTS_SCHDLR_SSPND_DONE: BIF_RET(make_small(old_no)); case ERTS_SCHDLR_SSPND_YIELD_RESTART: @@ -4465,6 +4469,33 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) ref, old ? am_true : am_false); } +#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS) + } else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) { + Sint old_no; + if (!is_small(BIF_ARG_2)) + goto error; + switch (erts_set_schedulers_online(BIF_P, + ERTS_PROC_LOCK_MAIN, + signed_val(BIF_ARG_2), + &old_no, + 1)) { + case ERTS_SCHDLR_SSPND_DONE: + BIF_RET(make_small(old_no)); + case ERTS_SCHDLR_SSPND_YIELD_RESTART: + ERTS_VBUMP_ALL_REDS(BIF_P); + BIF_TRAP2(bif_export[BIF_system_flag_2], + BIF_P, BIF_ARG_1, BIF_ARG_2); + case ERTS_SCHDLR_SSPND_YIELD_DONE: + ERTS_BIF_YIELD_RETURN_X(BIF_P, make_small(old_no), + am_dirty_cpu_schedulers_online); + case ERTS_SCHDLR_SSPND_EINVAL: + goto error; + default: + ASSERT(0); + BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); + break; + } +#endif } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) { int what; if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2)) diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index c2bca9e54f..05ac24e04d 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -630,8 +630,21 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) init.fix_alloc.thr_spec = 0; #endif + /* Make adjustments for carrier migration support */ + init.temp_alloc.init.util.acul = 0; + adjust_carrier_migration_support(&init.sl_alloc); + adjust_carrier_migration_support(&init.std_alloc); + adjust_carrier_migration_support(&init.ll_alloc); + adjust_carrier_migration_support(&init.eheap_alloc); + adjust_carrier_migration_support(&init.binary_alloc); + adjust_carrier_migration_support(&init.ets_alloc); + adjust_carrier_migration_support(&init.driver_alloc); + adjust_carrier_migration_support(&init.fix_alloc); + if (init.erts_alloc_config) { /* Adjust flags that erts_alloc_config won't like */ + + /* No thread specific instances */ init.temp_alloc.thr_spec = 0; init.sl_alloc.thr_spec = 0; init.std_alloc.thr_spec = 0; @@ -640,20 +653,20 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) init.binary_alloc.thr_spec = 0; init.ets_alloc.thr_spec = 0; init.driver_alloc.thr_spec = 0; - init.fix_alloc.thr_spec = 0; + init.fix_alloc.thr_spec = 0; + + /* No carrier migration */ + init.temp_alloc.init.util.acul = 0; + init.sl_alloc.init.util.acul = 0; + init.std_alloc.init.util.acul = 0; + init.ll_alloc.init.util.acul = 0; + init.eheap_alloc.init.util.acul = 0; + init.binary_alloc.init.util.acul = 0; + init.ets_alloc.init.util.acul = 0; + init.driver_alloc.init.util.acul = 0; + init.fix_alloc.init.util.acul = 0; } - /* Make adjustments for carrier migration support */ - init.temp_alloc.init.util.acul = 0; - adjust_carrier_migration_support(&init.sl_alloc); - adjust_carrier_migration_support(&init.std_alloc); - adjust_carrier_migration_support(&init.ll_alloc); - adjust_carrier_migration_support(&init.eheap_alloc); - adjust_carrier_migration_support(&init.binary_alloc); - adjust_carrier_migration_support(&init.ets_alloc); - adjust_carrier_migration_support(&init.driver_alloc); - adjust_carrier_migration_support(&init.fix_alloc); - #ifdef ERTS_SMP /* Only temp_alloc can use thread specific interface */ if (init.temp_alloc.thr_spec) diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index f83f6b39cf..942eaa47d0 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -209,8 +209,8 @@ int erts_is_allctr_wrapper_prelocked(void); void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size); #ifndef ERTS_CACHE_LINE_SIZE -/* Assume a cache line size of 64 bytes */ -# define ERTS_CACHE_LINE_SIZE ((UWord) 64) +/* Assumed cache line size */ +# define ERTS_CACHE_LINE_SIZE ((UWord) ASSUMED_CACHE_LINE_SIZE) # define ERTS_CACHE_LINE_MASK (ERTS_CACHE_LINE_SIZE - 1) #endif diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index f25b4dbae5..2adba9b240 100755 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -64,7 +64,7 @@ static Export *gather_gc_info_res_trap; #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) -static char otp_correction_package[] = ERLANG_OTP_CORRECTION_PACKAGE; +static char otp_version[] = ERLANG_OTP_VERSION; /* Keep erts_system_version as a global variable for easy access from a core */ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE "%s" @@ -312,7 +312,7 @@ erts_print_system_version(int to, void *arg, Process *c_p) int i, rc = -1; char *rc_str = ""; char rc_buf[100]; - char *ocp = otp_correction_package; + char *ov = otp_version; #ifdef ERTS_SMP Uint total, online, active; #ifdef ERTS_DIRTY_SCHEDULERS @@ -323,9 +323,9 @@ erts_print_system_version(int to, void *arg, Process *c_p) (void) erts_schedulers_state(&total, &online, &active, NULL, NULL, NULL, 0); #endif #endif - for (i = 0; i < sizeof(otp_correction_package)-4; i++) { - if (ocp[i] == '-' && ocp[i+1] == 'r' && ocp[i+2] == 'c') - rc = atoi(&ocp[i+3]); + for (i = 0; i < sizeof(otp_version)-4; i++) { + if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c') + rc = atoi(&ov[i+3]); } if (rc >= 0) { if (rc == 0) @@ -2448,10 +2448,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) DECL_AM(unknown); BIF_RET(AM_unknown); } - } else if (ERTS_IS_ATOM_STR("otp_correction_package", BIF_ARG_1)) { - int n = sizeof(ERLANG_OTP_CORRECTION_PACKAGE)-1; - hp = HAlloc(BIF_P, 2*n); - BIF_RET(buf_to_intlist(&hp, ERLANG_OTP_CORRECTION_PACKAGE, n, NIL)); } else if (ERTS_IS_ATOM_STR("otp_release", BIF_ARG_1)) { int n = sizeof(ERLANG_OTP_RELEASE)-1; hp = HAlloc(BIF_P, 2*n); diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 41e64fcd4f..a5d67571e2 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -125,6 +125,7 @@ get_meta_main_tab_lock(unsigned slot) static erts_smp_spinlock_t meta_main_tab_main_lock; static Uint meta_main_tab_first_free; /* Index of first free slot */ static int meta_main_tab_cnt; /* Number of active tables */ +static int meta_main_tab_top; /* Highest ever used slot + 1 */ static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed table id */ static Uint meta_main_tab_seq_incr; static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */ @@ -1469,6 +1470,10 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) ASSERT(slot>=0 && slot<db_max_tabs); meta_main_tab_first_free = GET_NEXT_FREE_SLOT(slot); meta_main_tab_cnt++; + if (slot >= meta_main_tab_top) { + ASSERT(slot == meta_main_tab_top); + meta_main_tab_top = slot + 1; + } if (is_named) { ret = BIF_ARG_1; @@ -2058,27 +2063,31 @@ BIF_RETTYPE ets_all_0(BIF_ALIST_0) { DbTable* tb; Eterm previous; - int i, j; + int i; Eterm* hp; Eterm* hendp; int t_tabs_cnt; - int t_max_tabs; + int t_top; erts_smp_spin_lock(&meta_main_tab_main_lock); t_tabs_cnt = meta_main_tab_cnt; - t_max_tabs = db_max_tabs; + t_top = meta_main_tab_top; erts_smp_spin_unlock(&meta_main_tab_main_lock); hp = HAlloc(BIF_P, 2*t_tabs_cnt); hendp = hp + 2*t_tabs_cnt; previous = NIL; - j = 0; - for(i = 0; (i < t_max_tabs && j < t_tabs_cnt); i++) { + for(i = 0; i < t_top; i++) { erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i); erts_smp_rwmtx_rlock(mmtl); if (IS_SLOT_ALIVE(i)) { - j++; + if (hp == hendp) { + /* Racing table creator, grab some more heap space */ + t_tabs_cnt = 10; + hp = HAlloc(BIF_P, 2*t_tabs_cnt); + hendp = hp + 2*t_tabs_cnt; + } tb = meta_main_tab[i].u.tb; previous = CONS(hp, tb->common.id, previous); hp += 2; @@ -2849,6 +2858,7 @@ void init_db(void) ERTS_ETS_MISC_MEM_ADD(size); meta_main_tab_cnt = 0; + meta_main_tab_top = 0; for (i=1; i<db_max_tabs; i++) { SET_NEXT_FREE_SLOT(i-1,i); } diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h index ea013a49a3..3f829ea7ea 100644 --- a/erts/emulator/beam/erl_drv_nif.h +++ b/erts/emulator/beam/erl_drv_nif.h @@ -41,6 +41,13 @@ typedef struct { int suggested_stack_size; } ErlDrvThreadOpts; +#if defined(ERL_DRV_DIRTY_SCHEDULER_SUPPORT) || defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) +typedef enum { + ERL_DRV_DIRTY_JOB_CPU_BOUND = 1, + ERL_DRV_DIRTY_JOB_IO_BOUND = 2 +} ErlDrvDirtyJobFlags; +#endif + #endif /* __ERL_DRV_NIF_H__ */ diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 6b190326aa..d54658f1ea 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -178,6 +178,11 @@ int erts_compat_rel; static int no_schedulers; static int no_schedulers_online; +#ifdef ERTS_DIRTY_SCHEDULERS +static int no_dirty_cpu_schedulers; +static int no_dirty_cpu_schedulers_online; +static int no_dirty_io_schedulers; +#endif #ifdef DEBUG Uint32 verbose; /* See erl_debug.h for information about verbose */ @@ -304,7 +309,13 @@ erl_init(int ncpu, erts_init_sys_common_misc(); erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab); erts_init_scheduling(no_schedulers, - no_schedulers_online); + no_schedulers_online +#ifdef ERTS_DIRTY_SCHEDULERS + , no_dirty_cpu_schedulers, + no_dirty_cpu_schedulers_online, + no_dirty_io_schedulers +#endif + ); erts_init_cpu_topology(); /* Must be after init_scheduling */ erts_init_gc(); /* Must be after init_scheduling */ erts_alloc_late_init(); @@ -835,7 +846,7 @@ early_init(int *argc, char **argv) /* else if (argv[i][2] == 'D') { char *arg; char *type = argv[i]+3; - if (strcmp(type, "Pcpu") == 0) { + if (strncmp(type, "Pcpu", 4) == 0) { int ptot, ponln; arg = get_arg(argv[i]+7, argv[i+1], &i); switch (sscanf(arg, "%d:%d", &ptot, &ponln)) { @@ -872,7 +883,7 @@ early_init(int *argc, char **argv) /* VERBOSE(DEBUG_SYSTEM, ("using %d:%d dirty CPU scheduler percentages\n", dirty_cpu_scheds_pctg, dirty_cpu_scheds_onln_pctg)); - } else if (strcmp(type, "cpu") == 0) { + } else if (strncmp(type, "cpu", 3) == 0) { int tot, onln; arg = get_arg(argv[i]+6, argv[i+1], &i); switch (sscanf(arg, "%d:%d", &tot, &onln)) { @@ -923,7 +934,7 @@ early_init(int *argc, char **argv) /* } VERBOSE(DEBUG_SYSTEM, ("using %d:%d dirty CPU scheduler(s)\n", tot, onln)); - } else if (strcmp(type, "io") == 0) { + } else if (strncmp(type, "io", 2) == 0) { arg = get_arg(argv[i]+5, argv[i+1], &i); dirty_io_scheds = atoi(arg); if (dirty_io_scheds < 0 || @@ -1055,9 +1066,9 @@ early_init(int *argc, char **argv) /* erts_no_schedulers = (Uint) no_schedulers; #endif #ifdef ERTS_DIRTY_SCHEDULERS - erts_no_dirty_cpu_schedulers = dirty_cpu_scheds; - erts_no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online; - erts_no_dirty_io_schedulers = dirty_io_scheds; + erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds; + no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online; + erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds; #endif erts_early_init_scheduling(no_schedulers); diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index d467d129e8..40860e141c 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -1237,6 +1237,19 @@ static void steal_resource_type(ErlNifResourceType* type) } } +/* The opened_rt_list is used by enif_open_resource_type() + * in order to rollback "creates" and "take-overs" in case the load fails. + */ +struct opened_resource_type +{ + struct opened_resource_type* next; + + ErlNifResourceFlags op; + ErlNifResourceType* type; + ErlNifResourceDtor* new_dtor; +}; +static struct opened_resource_type* opened_rt_list = NULL; + ErlNifResourceType* enif_open_resource_type(ErlNifEnv* env, const char* module_str, @@ -1258,22 +1271,21 @@ enif_open_resource_type(ErlNifEnv* env, if (type == NULL) { if (flags & ERL_NIF_RT_CREATE) { type = erts_alloc(ERTS_ALC_T_NIF, - sizeof(struct enif_resource_type_t)); - type->dtor = dtor; + sizeof(struct enif_resource_type_t)); type->module = module_am; type->name = name_am; erts_refc_init(&type->refc, 1); - type->owner = env->mod_nif; - type->prev = &resource_type_list; - type->next = resource_type_list.next; - type->next->prev = type; - type->prev->next = type; op = ERL_NIF_RT_CREATE; + #ifdef DEBUG + type->dtor = (void*)1; + type->owner = (void*)2; + type->prev = (void*)3; + type->next = (void*)4; + #endif } } else { - if (flags & ERL_NIF_RT_TAKEOVER) { - steal_resource_type(type); + if (flags & ERL_NIF_RT_TAKEOVER) { op = ERL_NIF_RT_TAKEOVER; } else { @@ -1281,12 +1293,13 @@ enif_open_resource_type(ErlNifEnv* env, } } if (type != NULL) { - type->owner = env->mod_nif; - type->dtor = dtor; - if (type->dtor != NULL) { - erts_refc_inc(&type->owner->rt_dtor_cnt, 1); - } - erts_refc_inc(&type->owner->rt_cnt, 1); + struct opened_resource_type* ort = erts_alloc(ERTS_ALC_T_TMP, + sizeof(struct opened_resource_type)); + ort->op = op; + ort->type = type; + ort->new_dtor = dtor; + ort->next = opened_rt_list; + opened_rt_list = ort; } if (tried != NULL) { *tried = op; @@ -1294,6 +1307,51 @@ enif_open_resource_type(ErlNifEnv* env, return type; } +static void commit_opened_resource_types(struct erl_module_nif* lib) +{ + while (opened_rt_list) { + struct opened_resource_type* ort = opened_rt_list; + + ErlNifResourceType* type = ort->type; + + if (ort->op == ERL_NIF_RT_CREATE) { + type->prev = &resource_type_list; + type->next = resource_type_list.next; + type->next->prev = type; + type->prev->next = type; + } + else { /* ERL_NIF_RT_TAKEOVER */ + steal_resource_type(type); + } + + type->owner = lib; + type->dtor = ort->new_dtor; + + if (type->dtor != NULL) { + erts_refc_inc(&lib->rt_dtor_cnt, 1); + } + erts_refc_inc(&lib->rt_cnt, 1); + + opened_rt_list = ort->next; + erts_free(ERTS_ALC_T_TMP, ort); + } +} + +static void rollback_opened_resource_types(void) +{ + while (opened_rt_list) { + struct opened_resource_type* ort = opened_rt_list; + + if (ort->op == ERL_NIF_RT_CREATE) { + erts_free(ERTS_ALC_T_NIF, ort->type); + } + + opened_rt_list = ort->next; + erts_free(ERTS_ALC_T_TMP, ort); + } +} + + static void nif_resource_dtor(Binary* bin) { ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(bin); @@ -1319,6 +1377,8 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t size) { Binary* bin = erts_create_magic_binary(SIZEOF_ErlNifResource(size), &nif_resource_dtor); ErlNifResource* resource = ERTS_MAGIC_BIN_DATA(bin); + + ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */ resource->type = type; erts_refc_inc(&bin->refc, 1); #ifdef DEBUG @@ -1510,6 +1570,13 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags, a = erts_smp_atomic32_read_acqb(&proc->state); while (1) { n = state = a; + /* + * clear any current dirty flags and dirty queue indicators, + * in case the application is shifting a job from one type + * of dirty scheduler to the other + */ + n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC + |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q); if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND) n |= ERTS_PSFLG_DIRTY_CPU_PROC; else @@ -1540,22 +1607,15 @@ enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result, ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM)) { #ifdef USE_THREADS - erts_aint32_t state, n, a; Process* proc = env->proc; Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; Export* ep; - a = erts_smp_atomic32_read_acqb(&proc->state); - while (1) { - n = state = a; - if (!(n & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))) - break; - n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC - |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q); - a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state); - if (a == state) - break; - } + erts_smp_atomic32_read_band_mb(&proc->state, + ~(ERTS_PSFLG_DIRTY_CPU_PROC + |ERTS_PSFLG_DIRTY_IO_PROC + |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q + |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); if (!(ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc))) alloc_proc_psd(proc, &ep); ERTS_VBUMP_ALL_REDS(proc); @@ -2040,9 +2100,15 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) lib->entry = entry; erts_refc_init(&lib->rt_cnt, 0); erts_refc_init(&lib->rt_dtor_cnt, 0); + ASSERT(opened_rt_list == NULL); lib->mod = mod; env.mod_nif = lib; - if (mod->curr.nif != NULL) { /* Reload */ + if (mod->curr.nif != NULL) { /*************** Reload ******************/ + /* + * Repeated load_nif calls from same Erlang module instance ("reload") + * is deprecated and was only ment as a development feature not to + * be used in production systems. (See warning below) + */ int k; lib->priv_data = mod->curr.nif->priv_data; @@ -2074,6 +2140,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful."); } else { + commit_opened_resource_types(lib); mod->curr.nif->entry = NULL; /* to prevent 'unload' callback */ erts_unload_nif(mod->curr.nif); reload_warning = 1; @@ -2081,7 +2148,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } else { lib->priv_data = NULL; - if (mod->old.nif != NULL) { /* Upgrade */ + if (mod->old.nif != NULL) { /**************** Upgrade ***************/ void* prev_old_data = mod->old.nif->priv_data; if (entry->upgrade == NULL) { ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library."); @@ -2094,17 +2161,18 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) mod->old.nif->priv_data = prev_old_data; ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful."); } - /*else if (mod->old_nif->priv_data != prev_old_data) { - refresh_cached_nif_data(mod->old_code, mod->old_nif); - }*/ + else + commit_opened_resource_types(lib); } - else if (entry->load != NULL) { /* Initial load */ + else if (entry->load != NULL) { /********* Initial load ***********/ erts_pre_nif(&env, BIF_P, lib); veto = entry->load(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful."); } + else + commit_opened_resource_types(lib); } } if (ret == am_ok) { @@ -2133,6 +2201,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } else { error: + rollback_opened_resource_types(); ASSERT(ret != am_ok); if (lib != NULL) { erts_free(ERTS_ALC_T_NIF, lib); diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h index 7613446f64..c12ba4d554 100644 --- a/erts/emulator/beam/erl_nif.h +++ b/erts/emulator/beam/erl_nif.h @@ -172,8 +172,8 @@ typedef ErlDrvThreadOpts ErlNifThreadOpts; #ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT typedef enum { - ERL_NIF_DIRTY_JOB_CPU_BOUND = 1, - ERL_NIF_DIRTY_JOB_IO_BOUND = 2 + ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DRV_DIRTY_JOB_CPU_BOUND, + ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DRV_DIRTY_JOB_IO_BOUND }ErlNifDirtyTaskFlags; #endif diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 83856ab983..37e1d07107 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -153,7 +153,6 @@ int erts_sched_balance_util = 0; Uint erts_no_schedulers; #ifdef ERTS_DIRTY_SCHEDULERS Uint erts_no_dirty_cpu_schedulers; -Uint erts_no_dirty_cpu_schedulers_online; Uint erts_no_dirty_io_schedulers; #endif @@ -193,6 +192,13 @@ static ErtsAuxWorkData *aux_thread_aux_work_data; #define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \ erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL)) +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \ + erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_cpu_changing, (VAL)) +#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \ + erts_smp_atomic32_set_nob(&schdlr_sspnd.dirty_io_changing, (VAL)) +#endif + #else #define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \ @@ -203,6 +209,23 @@ do { \ ASSERT(old_val__ == (OLD_VAL)); \ } while (0) +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(VAL, OLD_VAL) \ +do { \ + erts_aint32_t old_val__; \ + old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_cpu_changing, \ + (VAL)); \ + ASSERT(old_val__ == (OLD_VAL)); \ +} while (0) +#define ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(VAL, OLD_VAL) \ +do { \ + erts_aint32_t old_val__; \ + old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.dirty_io_changing, \ + (VAL)); \ + ASSERT(old_val__ == (OLD_VAL)); \ +} while (0) +#endif + #endif @@ -212,11 +235,29 @@ static struct { int online; int curr_online; int wait_curr_online; +#ifdef ERTS_DIRTY_SCHEDULERS + int dirty_cpu_online; + int dirty_cpu_curr_online; + int dirty_cpu_wait_curr_online; + int dirty_io_online; + int dirty_io_curr_online; + int dirty_io_wait_curr_online; +#endif erts_smp_atomic32_t changing; erts_smp_atomic32_t active; +#ifdef ERTS_DIRTY_SCHEDULERS + erts_smp_atomic32_t dirty_cpu_changing; + erts_smp_atomic32_t dirty_cpu_active; + erts_smp_atomic32_t dirty_io_changing; + erts_smp_atomic32_t dirty_io_active; +#endif struct { int ongoing; long wait_active; +#ifdef ERTS_DIRTY_SCHEDULERS + long dirty_cpu_wait_active; + long dirty_io_wait_active; +#endif ErtsProcList *procs; } msb; /* Multi Scheduling Block */ } schdlr_sspnd; @@ -1311,6 +1352,9 @@ static ERTS_INLINE void haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp) { ErtsThrPrgrVal current = awdp->current_thr_prgr; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif if (current != ERTS_THR_PRGR_INVALID && !erts_thr_progress_equal(current, erts_thr_progress_current())) { /* @@ -1327,6 +1371,10 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, in { int jix, max_jix; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif + ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY); if (!waiting && awdp->delayed_wakeup.next > awdp->esdp->reductions) @@ -1482,6 +1530,9 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), awdp->misc.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR; @@ -1566,6 +1617,9 @@ handle_async_ready(ErtsAuxWorkData *awdp, int waiting) { ErtsSchedulerSleepInfo *ssi = awdp->ssi; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY); if (erts_check_async_ready(awdp->async_ready.queue)) { if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY) @@ -1590,6 +1644,9 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp, { void *thr_prgr_p; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif #ifdef ERTS_SMP if (awdp->async_ready.need_thr_prgr && !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), @@ -1627,6 +1684,9 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) ErtsSchedulerSleepInfo *ssi = awdp->ssi; erts_aint32_t res; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC)); aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM @@ -1660,11 +1720,7 @@ erts_alloc_ensure_handle_delayed_dealloc_call(int ix) { #ifdef DEBUG ErtsSchedulerData *esdp = erts_get_scheduler_data(); -#ifdef ERTS_DIRTY_SCHEDULERS - if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp)) - return; -#endif - ASSERT(!esdp || ix == (int) esdp->no); + ASSERT(!esdp || (ERTS_SCHEDULER_IS_DIRTY(esdp) || ix == (int) esdp->no)); #endif set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(ix-1), ERTS_SSI_AUX_WORK_DD); @@ -1678,6 +1734,9 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; int more_work = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD); erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp, &need_thr_progress, @@ -1717,6 +1776,9 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR; @@ -1764,6 +1826,9 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait int lops; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) { ErtsThrPrgrLaterOp *lop = awdp->later_op.first; if (!erts_thr_progress_has_reached_this(current, lop->later)) @@ -1922,6 +1987,14 @@ erts_smp_notify_check_children_needed(void) for (i = 0; i < erts_no_schedulers; i++) set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i), ERTS_SSI_AUX_WORK_CHECK_CHILDREN); +#ifdef ERTS_DIRTY_SCHEDULERS + for (i = 0; i < erts_no_dirty_cpu_schedulers; i++) + set_aux_work_flags_wakeup_nob(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(i), + ERTS_SSI_AUX_WORK_CHECK_CHILDREN); + for (i = 0; i < erts_no_dirty_io_schedulers; i++) + set_aux_work_flags_wakeup_nob(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(i), + ERTS_SSI_AUX_WORK_CHECK_CHILDREN); +#endif } static ERTS_INLINE erts_aint32_t @@ -2714,15 +2787,15 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) while (1) { - aux_work = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : - erts_atomic32_read_acqb(&ssi->aux_work); + aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work) { - if (!thr_prgr_active) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); sched_wall_time_change(esdp, 1); } aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); - if (aux_work && erts_thr_progress_update(esdp)) + if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp) + && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); } @@ -3012,7 +3085,7 @@ wake_scheduler(ErtsRunQueue *rq) #ifdef ERTS_DIRTY_SCHEDULERS static void -wake_dirty_scheduler(ErtsRunQueue *rq) +wake_dirty_schedulers(ErtsRunQueue *rq, int one) { ErtsSchedulerSleepInfo *ssi; ErtsSchedulerSleepList *sl; @@ -3022,9 +3095,27 @@ wake_dirty_scheduler(ErtsRunQueue *rq) sl = &rq->sleepers; erts_smp_spin_lock(&sl->lock); ssi = sl->list; - if (!ssi) + if (!ssi) { erts_smp_spin_unlock(&sl->lock); - else { + if (one) + wake_scheduler(rq); + } else if (one) { + erts_aint32_t flgs; + if (ssi->prev) + ssi->prev->next = ssi->next; + else { + ASSERT(sl->list == ssi); + sl->list = ssi->next; + } + if (ssi->next) + ssi->next->prev = ssi->prev; + + erts_smp_spin_unlock(&sl->lock); + + ERTS_THR_MEMORY_BARRIER; + flgs = ssi_flags_set_wake(ssi); + erts_sched_finish_poke(ssi, flgs); + } else { sl->list = NULL; erts_smp_spin_unlock(&sl->lock); @@ -3176,7 +3267,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq) if (runq) { #ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) - wake_dirty_scheduler(runq); + wake_dirty_schedulers(runq, 1); else #endif wake_scheduler(runq); @@ -3474,6 +3565,7 @@ suspend_run_queue(ErtsRunQueue *rq) } static void scheduler_ix_resume_wake(Uint ix); +static void scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi); static ERTS_INLINE void resume_run_queue(ErtsRunQueue *rq) @@ -3500,7 +3592,10 @@ resume_run_queue(ErtsRunQueue *rq) erts_smp_runq_unlock(rq); - scheduler_ix_resume_wake(rq->ix); +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) +#endif + scheduler_ix_resume_wake(rq->ix); } typedef struct { @@ -3531,20 +3626,28 @@ evacuate_run_queue(ErtsRunQueue *rq, int prio_q; ErtsRunQueue *to_rq; ErtsMigrationPaths *mps; - ErtsMigrationPath *mp; + ErtsMigrationPath *mp = NULL; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - mps = erts_get_migration_paths_managed(); - mp = &mps->mpath[rq->ix]; +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) +#endif + { + mps = erts_get_migration_paths_managed(); + mp = &mps->mpath[rq->ix]; + } /* Evacuate scheduled misc ops */ if (rq->misc.start) { ErtsMiscOpList *start, *end; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); +#endif to_rq = mp->misc_evac_runq; if (!to_rq) return; @@ -3573,6 +3676,9 @@ evacuate_run_queue(ErtsRunQueue *rq, if (rq->ports.start) { Port *prt; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); +#endif to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq; if (!to_rq) return; @@ -3608,15 +3714,26 @@ evacuate_run_queue(ErtsRunQueue *rq, erts_aint32_t state; Process *proc; int notify = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + int requeue; +#endif to_rq = NULL; - if (!mp->prio[prio_q].runq) - return; - if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq) - return; +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) +#endif + { + if (!mp->prio[prio_q].runq) + return; + if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq) + return; + } proc = dequeue_process(rq, prio_q, &state); while (proc) { +#ifdef ERTS_DIRTY_SCHEDULERS + requeue = 1; +#endif if (ERTS_PSFLG_BOUND & state) { /* Bound processes get stuck here... */ proc->next = NULL; @@ -3625,12 +3742,43 @@ evacuate_run_queue(ErtsRunQueue *rq, else sbpp->first = proc; sbpp->last = proc; +#ifdef ERTS_DIRTY_SCHEDULERS + requeue = 0; +#endif + } +#ifdef ERTS_DIRTY_SCHEDULERS + else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) { + erts_aint32_t old; + old = erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_CPU_PROC + | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)); + /* assert that no other dirty flags are set */ + ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))); + } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) { + erts_aint32_t old; + old = erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_IO_PROC + | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); + /* assert that no other dirty flags are set */ + ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q))); } + if (requeue) { +#else else { +#endif int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); erts_smp_runq_unlock(rq); - to_rq = mp->prio[prio].runq; +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) + /* + * dirty run queues evacuate only to run + * queue 0 during multi-scheduling blocking + */ + to_rq = ERTS_RUNQ_IX(0); + else +#endif + to_rq = mp->prio[prio].runq; RUNQ_SET_RQ(&proc->run_queue, to_rq); erts_smp_runq_lock(to_rq); @@ -4744,13 +4892,20 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) rq->wakeup_other += (left_len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC); if (rq->wakeup_other > wakeup_other.limit) { - int empty_rqs = - erts_smp_atomic32_read_acqb(&no_empty_run_queues); - if (flags & ERTS_RUNQ_FLG_PROTECTED) - (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - if (empty_rqs != 0) - wake_scheduler_on_empty_runq(rq); - rq->wakeup_other = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->waiting) + wake_dirty_schedulers(rq, 1); + else +#endif + { + int empty_rqs = + erts_smp_atomic32_read_acqb(&no_empty_run_queues); + if (flags & ERTS_RUNQ_FLG_PROTECTED) + (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); + if (empty_rqs != 0) + wake_scheduler_on_empty_runq(rq); + rq->wakeup_other = 0; + } } } rq->wakeup_other_reds = 0; @@ -5037,8 +5192,12 @@ erts_sched_set_wake_cleanup_threshold(char *str) static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) { - if (!esdp || ERTS_SCHEDULER_IS_DIRTY(esdp)) + if (!esdp) awdp->sched_id = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + else if (ERTS_SCHEDULER_IS_DIRTY(esdp)) + awdp->sched_id = (int) ERTS_DIRTY_SCHEDULER_NO(esdp); +#endif else awdp->sched_id = (int) esdp->no; awdp->esdp = esdp; @@ -5104,11 +5263,11 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, #ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) { esdp->no = 0; - esdp->dirty_no = (Uint) num; + ERTS_DIRTY_SCHEDULER_NO(esdp) = (Uint) num; } else { esdp->no = (Uint) num; - esdp->dirty_no = 0; + ERTS_DIRTY_SCHEDULER_NO(esdp) = 0; } #else esdp->no = (Uint) num; @@ -5139,7 +5298,12 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, } void -erts_init_scheduling(int no_schedulers, int no_schedulers_online) +erts_init_scheduling(int no_schedulers, int no_schedulers_online +#ifdef ERTS_DIRTY_SCHEDULERS + , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online, + int no_dirty_io_schedulers +#endif + ) { int ix, n, no_ssi; char *daww_ptr; @@ -5161,6 +5325,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) ASSERT(no_schedulers_online <= no_schedulers); ASSERT(no_schedulers_online >= 1); ASSERT(no_schedulers >= 1); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(no_dirty_cpu_schedulers <= no_schedulers); + ASSERT(no_dirty_cpu_schedulers >= 1); + ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online); + ASSERT(no_dirty_cpu_schedulers_online >= 1); +#endif /* Create and initialize run queues */ @@ -5197,10 +5367,9 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) #ifdef ERTS_DIRTY_SCHEDULERS #ifdef ERTS_SMP - if (ERTS_RUNQ_IX_IS_DIRTY(ix)) { + if (ERTS_RUNQ_IX_IS_DIRTY(ix)) erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list"); - rq->sleepers.list = NULL; - } + rq->sleepers.list = NULL; #endif #endif @@ -5264,6 +5433,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) n = (int) no_schedulers; erts_no_schedulers = n; +#ifdef ERTS_DIRTY_SCHEDULERS + erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers; + erts_no_dirty_io_schedulers = no_dirty_io_schedulers; +#endif /* Create and initialize scheduler sleep info */ #ifdef ERTS_SMP @@ -5295,21 +5468,21 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) aligned_dirty_cpu_sched_sleep_info = erts_alloc_permanent_cache_aligned( ERTS_ALC_T_SCHDLR_SLP_INFO, - erts_no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); - for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { + no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); + for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi; erts_smp_atomic32_init_nob(&ssi->flags, 0); - ssi->event = NULL; /* initialized in sched_thread_func */ + ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */ erts_atomic32_init_nob(&ssi->aux_work, 0); } aligned_dirty_io_sched_sleep_info = erts_alloc_permanent_cache_aligned( ERTS_ALC_T_SCHDLR_SLP_INFO, - erts_no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); - for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { + no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); + for (ix = 0; ix < no_dirty_io_schedulers; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi; erts_smp_atomic32_init_nob(&ssi->flags, 0); - ssi->event = NULL; /* initialized in sched_thread_func */ + ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */ erts_atomic32_init_nob(&ssi->aux_work, 0); } #endif @@ -5342,8 +5515,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) erts_aligned_dirty_cpu_scheduler_data = erts_alloc_permanent_cache_aligned( ERTS_ALC_T_SCHDLR_DATA, - erts_no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData)); - for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { + no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData)); + for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix), ERTS_DIRTY_CPU_RUNQ, NULL, 0); @@ -5351,8 +5524,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) erts_aligned_dirty_io_scheduler_data = erts_alloc_permanent_cache_aligned( ERTS_ALC_T_SCHDLR_DATA, - erts_no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData)); - for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { + no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData)); + for (ix = 0; ix < no_dirty_io_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix); init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix), ERTS_DIRTY_IO_RUNQ, NULL, 0); @@ -5382,6 +5555,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) schdlr_sspnd.curr_online = no_schedulers; schdlr_sspnd.msb.ongoing = 0; erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers); +#ifdef ERTS_DIRTY_SCHEDULERS + erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_changing, 0); + schdlr_sspnd.dirty_cpu_online = no_dirty_cpu_schedulers_online; + schdlr_sspnd.dirty_cpu_curr_online = no_dirty_cpu_schedulers; + erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_cpu_active, no_dirty_cpu_schedulers); + erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_changing, 0); + schdlr_sspnd.dirty_io_online = no_dirty_io_schedulers; + schdlr_sspnd.dirty_io_curr_online = no_dirty_io_schedulers; + erts_smp_atomic32_init_nob(&schdlr_sspnd.dirty_io_active, no_dirty_io_schedulers); +#endif schdlr_sspnd.msb.procs = NULL; init_no_runqs(no_schedulers_online, no_schedulers_online); balance_info.last_active_runqs = no_schedulers; @@ -5407,6 +5590,21 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) schdlr_sspnd.curr_online *= 2; /* Boot strapping... */ ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); +#ifdef ERTS_DIRTY_SCHEDULERS + schdlr_sspnd.dirty_cpu_wait_curr_online = no_dirty_cpu_schedulers_online; + schdlr_sspnd.dirty_cpu_curr_online *= 2; + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) { + ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); + erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED); + } + + schdlr_sspnd.dirty_io_wait_curr_online = no_dirty_io_schedulers; + schdlr_sspnd.dirty_io_curr_online *= 2; + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); +#endif erts_smp_atomic32_init_nob(&doing_sys_schedule, 0); @@ -5525,9 +5723,16 @@ free_proxy_proc(Process *proxy) erts_free(ERTS_ALC_T_PROC, proxy); } +#define ERTS_ENQUEUE_NOT 0 +#define ERTS_ENQUEUE_NORMAL_QUEUE 1 +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2 +#define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3 +#endif static ERTS_INLINE int -check_enqueue_in_prio_queue(erts_aint32_t *prq_prio_p, +check_enqueue_in_prio_queue(Process *c_p, + erts_aint32_t *prq_prio_p, erts_aint32_t *newp, erts_aint32_t actual) { @@ -5539,56 +5744,105 @@ check_enqueue_in_prio_queue(erts_aint32_t *prq_prio_p, *prq_prio_p = aprio; #ifdef ERTS_DIRTY_SCHEDULERS - if (!(actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))) { + if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) { + /* + * If we have system tasks of a priority higher + * or equal to the user priority, we enqueue + * on ordinary run-queue and take care of + * those system tasks first. + */ + if (actual & ERTS_PSFLG_ACTIVE_SYS) { + erts_aint32_t uprio, stprio, qmask; + uprio = (actual >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK; + if (aprio < uprio) + goto enqueue_normal_runq; /* system tasks with higher prio */ + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + qmask = c_p->sys_task_qs->qmask; + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + switch (qmask & -qmask) { + case MAX_BIT: + stprio = PRIORITY_MAX; + break; + case HIGH_BIT: + stprio = PRIORITY_HIGH; + break; + case NORMAL_BIT: + stprio = PRIORITY_NORMAL; + break; + case LOW_BIT: + stprio = PRIORITY_LOW; + break; + default: + stprio = PRIORITY_LOW+1; + break; + } + if (stprio <= uprio) + goto enqueue_normal_runq; /* system tasks with higher prio */ + } + + /* Enqueue in dirty run queue if not already enqueued */ + if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)) + return ERTS_ENQUEUE_NOT; /* already in queue */ + if (actual & ERTS_PSFLG_DIRTY_CPU_PROC) { + *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q; + if (actual & ERTS_PSFLG_IN_RUNQ) + return -ERTS_ENQUEUE_DIRTY_CPU_QUEUE; /* use proxy */ + *newp |= ERTS_PSFLG_IN_RUNQ; + return ERTS_ENQUEUE_DIRTY_CPU_QUEUE; + } + *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q; + if (actual & ERTS_PSFLG_IN_RUNQ) + return -ERTS_ENQUEUE_DIRTY_IO_QUEUE; /* use proxy */ + *newp |= ERTS_PSFLG_IN_RUNQ; + return ERTS_ENQUEUE_DIRTY_IO_QUEUE; + } + + enqueue_normal_runq: #endif - max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK; - max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS; - max_qbit &= -max_qbit; - /* - * max_qbit now either contain bit set for highest prio queue or a bit - * out of range (which will have a value larger than valid range). - */ + max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK; + max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS; + max_qbit &= -max_qbit; + /* + * max_qbit now either contain bit set for highest prio queue or a bit + * out of range (which will have a value larger than valid range). + */ - if (qbit >= max_qbit) - return 0; /* Already queued in higher or equal prio */ + if (qbit >= max_qbit) + return ERTS_ENQUEUE_NOT; /* Already queued in higher or equal prio */ - /* Need to enqueue (if already enqueued, it is in lower prio) */ - *newp |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET; + /* Need to enqueue (if already enqueued, it is in lower prio) */ + *newp |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET; - if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK)) - != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) { - /* - * Process struct already enqueued, or actual prio not - * equal to user prio, i.e., enqueue using proxy. - */ - return -1; - } -#ifdef ERTS_DIRTY_SCHEDULERS - } else { - if (actual & ERTS_PSFLG_DIRTY_CPU_PROC) - *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q; - else - *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q; + if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK)) + != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) { + /* + * Process struct already enqueued, or actual prio not + * equal to user prio, i.e., enqueue using proxy. + */ + return -ERTS_ENQUEUE_NORMAL_QUEUE; } -#endif /* * Enqueue using process struct. */ *newp &= ~ERTS_PSFLGS_PRQ_PRIO_MASK; *newp |= ERTS_PSFLG_IN_RUNQ | (aprio << ERTS_PSFLGS_PRQ_PRIO_OFFSET); - return 1; + return ERTS_ENQUEUE_NORMAL_QUEUE; } /* - * scheduler_out_process() return with c_rq locked. + * schedule_out_process() return with c_rq locked. */ static ERTS_INLINE int schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy) { erts_aint32_t a, e, n, enq_prio = -1; - int res = 0; int enqueue; /* < 0 -> use proxy */ + Process* sched_p; + ErtsRunQueue* runq; +#ifdef ERTS_SMP + int check_emigration_need; +#endif a = state; @@ -5597,20 +5851,20 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)); - enqueue = 0; + enqueue = ERTS_ENQUEUE_NOT; n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS); if (a & ERTS_PSFLG_ACTIVE_SYS || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { - enqueue = check_enqueue_in_prio_queue(&enq_prio, &n, a); + enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); } a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } - if (!enqueue) { - + switch (enqueue) { + case ERTS_ENQUEUE_NOT: if (erts_system_profile_flags.runnable_procs) { if (!(a & ERTS_PSFLG_ACTIVE_SYS) @@ -5623,60 +5877,76 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces if (proxy) free_proxy_proc(proxy); - } - else { - Process *sched_p; - ErtsRunQueue *runq; - ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS)); + erts_smp_runq_lock(c_rq); + return 0; #ifdef ERTS_DIRTY_SCHEDULERS #ifdef ERTS_SMP - if (ERTS_PSFLG_DIRTY_CPU_PROC & a) - runq = ERTS_DIRTY_CPU_RUNQ; - else if (ERTS_PSFLG_DIRTY_IO_PROC & a) - runq = ERTS_DIRTY_IO_RUNQ; - else + case ERTS_ENQUEUE_DIRTY_CPU_QUEUE: + case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE: + runq = ERTS_DIRTY_CPU_RUNQ; + ASSERT(ERTS_SCHEDULER_IS_DIRTY_CPU(runq->scheduler)); +#ifdef ERTS_SMP + check_emigration_need = 0; +#endif + break; + + case ERTS_ENQUEUE_DIRTY_IO_QUEUE: + case -ERTS_ENQUEUE_DIRTY_IO_QUEUE: + runq = ERTS_DIRTY_IO_RUNQ; + ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(runq->scheduler)); +#ifdef ERTS_SMP + check_emigration_need = 0; +#endif + break; #endif #endif - runq = erts_get_runq_proc(p); - if (enqueue < 0) - sched_p = make_proxy_proc(proxy, p, enq_prio); - else { - sched_p = p; - if (proxy) - free_proxy_proc(proxy); - } + default: + ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE + || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE); + runq = erts_get_runq_proc(p); #ifdef ERTS_SMP - if (!(ERTS_PSFLG_BOUND & n) -#ifdef ERTS_DIRTY_SCHEDULERS - && !(n & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)) + check_emigration_need = !(ERTS_PSFLG_BOUND & n); #endif - ) { - ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio); - if (new_runq) { - RUNQ_SET_RQ(&sched_p->run_queue, new_runq); - runq = new_runq; - } + break; + } + + ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS)); + + if (enqueue < 0) + sched_p = make_proxy_proc(proxy, p, enq_prio); + else { + sched_p = p; + if (proxy) + free_proxy_proc(proxy); + } + +#ifdef ERTS_SMP + if (check_emigration_need) { + ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio); + if (new_runq) { + RUNQ_SET_RQ(&sched_p->run_queue, new_runq); + runq = new_runq; } + } #endif - ASSERT(runq); - res = 1; - erts_smp_runq_lock(runq); + ASSERT(runq); - /* Enqueue the process */ - enqueue_process(runq, (int) enq_prio, sched_p); + erts_smp_runq_lock(runq); - if (runq == c_rq) - return res; - erts_smp_runq_unlock(runq); - smp_notify_inc_runq(runq); - } + /* Enqueue the process */ + enqueue_process(runq, (int) enq_prio, sched_p); + + if (runq == c_rq) + return 1; + erts_smp_runq_unlock(runq); + smp_notify_inc_runq(runq); erts_smp_runq_lock(c_rq); - return res; + return 1; } static ERTS_INLINE void @@ -5732,7 +6002,7 @@ change_proc_schedule_state(Process *p, erts_aint32_t e; n = e = a; - enqueue = 0; + enqueue = ERTS_ENQUEUE_NOT; if (a & ERTS_PSFLG_FREE) break; /* We don't want to schedule free processes... */ @@ -5753,13 +6023,13 @@ change_proc_schedule_state(Process *p, * process may be in a run queue via proxy, need * further inspection... */ - enqueue = check_enqueue_in_prio_queue(enq_prio_p, &n, a); + enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a); } a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; - if (enqueue == 0 && n == a) + if (enqueue == ERTS_ENQUEUE_NOT && n == a) break; } @@ -5793,7 +6063,7 @@ schedule_process(Process *p, erts_aint32_t in_state) ERTS_PSFLG_ACTIVE, &state, &enq_prio); - if (enqueue) + if (enqueue != ERTS_ENQUEUE_NOT) add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), state, enq_prio); @@ -5820,14 +6090,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) if (a & ERTS_PSFLG_FREE) return; /* We don't want to schedule free processes... */ - enqueue = 0; + enqueue = ERTS_ENQUEUE_NOT; n |= ERTS_PSFLG_ACTIVE_SYS; if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) - enqueue = check_enqueue_in_prio_queue(&enq_prio, &n, a); + enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; - if (a == n && !enqueue) + if (a == n && enqueue == ERTS_ENQUEUE_NOT) goto cleanup; } @@ -5843,7 +6113,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) } - if (enqueue) { + if (enqueue != ERTS_ENQUEUE_NOT) { Process *sched_p; if (enqueue > 0) sched_p = p; @@ -5966,6 +6236,12 @@ static void scheduler_ix_resume_wake(Uint ix) { ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix); + scheduler_ssi_resume_wake(ssi); +} + +static void +scheduler_ssi_resume_wake(ErtsSchedulerSleepInfo *ssi) +{ erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_TSE_SLEEPING | ERTS_SSI_FLG_WAITING @@ -6056,12 +6332,20 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi) } } +#ifdef ERTS_DIRTY_SCHEDULERS + static void suspend_scheduler(ErtsSchedulerData *esdp) { erts_aint32_t flgs; erts_aint32_t changing; +#ifdef ERTS_DIRTY_SCHEDULERS + long no = (long) (ERTS_SCHEDULER_IS_DIRTY(esdp) + ? ERTS_DIRTY_SCHEDULER_NO(esdp) + : esdp->no); +#else long no = (long) esdp->no; +#endif ErtsSchedulerSleepInfo *ssi = esdp->ssi; long active_schedulers; int curr_online = 1; @@ -6069,21 +6353,305 @@ suspend_scheduler(ErtsSchedulerData *esdp) erts_aint32_t aux_work; int thr_prgr_active = 1; ErtsStuckBoundProcesses sbp = {NULL, NULL}; + int* ss_onlinep; + int* ss_curr_onlinep; + int* ss_wait_curr_onlinep; + long* ss_wait_activep; + long ss_wait_active_target; + erts_smp_atomic32_t* ss_changingp; + erts_smp_atomic32_t* ss_activep; /* * Schedulers may be suspended in two different ways: * - A scheduler may be suspended since it is not online. * All schedulers with scheduler ids greater than - * schdlr_sspnd.online are suspended. + * schdlr_sspnd.online are suspended; same for dirty + * schedulers and schdlr_sspnd.dirty_cpu_online and + * schdlr_sspnd.dirty_io_online. * - Multi scheduling is blocked. All schedulers except the - * scheduler with scheduler id 1 are suspended. + * scheduler with scheduler id 1 are suspended, and all + * dirty CPU and dirty I/O schedulers are suspended. * * Regardless of why a scheduler is suspended, it ends up here. */ + ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) || no != 1); + #ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) { + erts_smp_runq_unlock(esdp->run_queue); + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_smp_runq_lock(esdp->run_queue); + } + if (ongoing_multi_scheduling_block()) + evacuate_run_queue(esdp->run_queue, &sbp); + } else +#endif + evacuate_run_queue(esdp->run_queue, &sbp); + + erts_smp_runq_unlock(esdp->run_queue); + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) #endif + { + erts_sched_check_cpu_bind_prep_suspend(esdp); + + if (erts_system_profile_flags.scheduler) + profile_scheduler(make_small(esdp->no), am_inactive); + + sched_wall_time_change(esdp, 0); + + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + } + + flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED); + if (flgs & ERTS_SSI_FLG_SUSPENDED) { + +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) { + active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_cpu_active); + ASSERT(active_schedulers >= 0); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing); + ss_onlinep = &schdlr_sspnd.dirty_cpu_online; + ss_curr_onlinep = &schdlr_sspnd.dirty_cpu_curr_online; + ss_wait_curr_onlinep = &schdlr_sspnd.dirty_cpu_wait_curr_online; + ss_changingp = &schdlr_sspnd.dirty_cpu_changing; + ss_wait_activep = &schdlr_sspnd.msb.dirty_cpu_wait_active; + ss_activep = &schdlr_sspnd.dirty_cpu_active; + } else { + active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.dirty_io_active); + ASSERT(active_schedulers >= 0); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing); + ss_onlinep = &schdlr_sspnd.dirty_io_online; + ss_curr_onlinep = &schdlr_sspnd.dirty_io_curr_online; + ss_wait_curr_onlinep = &schdlr_sspnd.dirty_io_wait_curr_online; + ss_changingp = &schdlr_sspnd.dirty_io_changing; + ss_wait_activep = &schdlr_sspnd.msb.dirty_io_wait_active; + ss_activep = &schdlr_sspnd.dirty_io_active; + } + ss_wait_active_target = 0; + } + else +#endif + { + active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active); + ASSERT(active_schedulers >= 1); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + ss_onlinep = &schdlr_sspnd.online; + ss_curr_onlinep = &schdlr_sspnd.curr_online; + ss_wait_curr_onlinep = &schdlr_sspnd.wait_curr_online; + ss_changingp = &schdlr_sspnd.changing; + ss_wait_activep = &schdlr_sspnd.msb.wait_active; + ss_activep = &schdlr_sspnd.active; + ss_wait_active_target = 1; + } + if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) { + if (active_schedulers == *ss_wait_activep) + wake = 1; + if (active_schedulers == ss_wait_active_target) { + changing = erts_smp_atomic32_read_band_nob(ss_changingp, + ~ERTS_SCHDLR_SSPND_CHNG_MSB); + changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB; + } + } + + while (1) { + if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) { + int changed = 0; + if (no > *ss_onlinep && curr_online) { + (*ss_curr_onlinep)--; + curr_online = 0; + changed = 1; + } + else if (no <= *ss_onlinep && !curr_online) { + (*ss_curr_onlinep)++; + curr_online = 1; + changed = 1; + } + if (changed + && *ss_curr_onlinep == *ss_wait_curr_onlinep) + wake = 1; + if (*ss_onlinep == *ss_curr_onlinep) { + changing = erts_smp_atomic32_read_band_nob(ss_changingp, + ~ERTS_SCHDLR_SSPND_CHNG_ONLN); + changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN; + } + } + + if (wake) { + erts_smp_cnd_signal(&schdlr_sspnd.cnd); + wake = 0; + } + + if (curr_online && !ongoing_multi_scheduling_block()) { + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) + break; + } + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + + while (1) { + erts_aint32_t qmask; + erts_aint32_t flgs; + + qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue) + & ERTS_RUNQ_FLGS_QMASK); + aux_work = erts_atomic32_read_acqb(&ssi->aux_work); + if (aux_work|qmask) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + if (aux_work) + aux_work = handle_aux_work(&esdp->aux_work_data, + aux_work, + 1); + + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && + (aux_work && erts_thr_progress_update(esdp))) + erts_thr_progress_leader_update(esdp); + if (qmask) { +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_smp_runq_lock(esdp->run_queue); + if (ongoing_multi_scheduling_block()) + evacuate_run_queue(esdp->run_queue, &sbp); + erts_smp_runq_unlock(esdp->run_queue); + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + } else +#endif + { + erts_smp_runq_lock(esdp->run_queue); + evacuate_run_queue(esdp->run_queue, &sbp); + erts_smp_runq_unlock(esdp->run_queue); + } + } + } + + if (!aux_work) { +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + { + if (thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } + erts_thr_progress_prepare_wait(esdp); + } + flgs = sched_spin_suspended(ssi, + ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT); + if (flgs == (ERTS_SSI_FLG_SLEEPING + | ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)) { + flgs = sched_set_suspended_sleeptype(ssi); + if (flgs == (ERTS_SSI_FLG_SLEEPING + | ERTS_SSI_FLG_TSE_SLEEPING + | ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)) { + int res; + + do { + res = erts_tse_wait(ssi->event); + } while (res == EINTR); + } + } +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + erts_thr_progress_finalize_wait(esdp); + } + + flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)); + if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) + break; + changing = erts_smp_atomic32_read_nob(ss_changingp); + if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER) + break; + } + + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + changing = erts_smp_atomic32_read_nob(ss_changingp); + } + + active_schedulers = erts_smp_atomic32_inc_read_nob(ss_activep); + changing = erts_smp_atomic32_read_nob(ss_changingp); + if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB) + && *ss_onlinep == active_schedulers) { + erts_smp_atomic32_read_band_nob(ss_changingp, + ~ERTS_SCHDLR_SSPND_CHNG_MSB); + } + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + ASSERT(no <= *ss_onlinep); + ASSERT(!ongoing_multi_scheduling_block()); + + } + + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + + ASSERT(curr_online); + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + { + if (erts_system_profile_flags.scheduler) + profile_scheduler(make_small(esdp->no), am_active); + + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + } + + erts_smp_runq_lock(esdp->run_queue); + non_empty_runq(esdp->run_queue); + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) +#endif + { + schedule_bound_processes(esdp->run_queue, &sbp); + + erts_sched_check_cpu_bind_post_suspend(esdp); + } +} + +#else /* !ERTS_DIRTY_SCHEDULERS */ + +static void +suspend_scheduler(ErtsSchedulerData *esdp) +{ + erts_aint32_t flgs; + erts_aint32_t changing; + long no = (long) esdp->no; + ErtsSchedulerSleepInfo *ssi = esdp->ssi; + long active_schedulers; + int curr_online = 1; + int wake = 0; + erts_aint32_t aux_work; + int thr_prgr_active = 1; + ErtsStuckBoundProcesses sbp = {NULL, NULL}; + + /* + * Schedulers may be suspended in two different ways: + * - A scheduler may be suspended since it is not online. + * All schedulers with scheduler ids greater than + * schdlr_sspnd.online are suspended. + * - Multi scheduling is blocked. All schedulers except the + * scheduler with scheduler id 1 are suspended. + * + * Regardless of why a scheduler is suspended, it ends up here. + */ + ASSERT(no != 1); evacuate_run_queue(esdp->run_queue, &sbp); @@ -6247,6 +6815,8 @@ suspend_scheduler(ErtsSchedulerData *esdp) erts_sched_check_cpu_bind_post_suspend(esdp); } +#endif + ErtsSchedSuspendResult erts_schedulers_state(Uint *total, Uint *online, @@ -6258,40 +6828,315 @@ erts_schedulers_state(Uint *total, { int res = ERTS_SCHDLR_SSPND_EINVAL; erts_aint32_t changing; - if (total) { - ASSERT(online); - ASSERT(active); - erts_smp_mtx_lock(&schdlr_sspnd.mtx); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); - if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)) - res = ERTS_SCHDLR_SSPND_YIELD_RESTART; - else { - *active = *online = schdlr_sspnd.online; - if (ongoing_multi_scheduling_block()) - *active = 1; - res = ERTS_SCHDLR_SSPND_DONE; - } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); - *total = erts_no_schedulers; + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); +#ifdef ERTS_DIRTY_SCHEDULERS + changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing) + | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing)); +#endif + if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)) + res = ERTS_SCHDLR_SSPND_YIELD_RESTART; + else { + if (active) + *active = schdlr_sspnd.online; + if (online) + *online = schdlr_sspnd.online; + if (ongoing_multi_scheduling_block() && active) + *active = 1; +#ifdef ERTS_DIRTY_SCHEDULERS + if (dirty_cpu_online) + *dirty_cpu_online = schdlr_sspnd.dirty_cpu_online; +#endif + res = ERTS_SCHDLR_SSPND_DONE; } + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + if (total) + *total = erts_no_schedulers; #ifdef ERTS_DIRTY_SCHEDULERS if (dirty_cpu) *dirty_cpu = erts_no_dirty_cpu_schedulers; - if (dirty_cpu_online) - *dirty_cpu_online = erts_no_dirty_cpu_schedulers_online; if (dirty_io) *dirty_io = erts_no_dirty_io_schedulers; -#else - if (dirty_cpu) - *dirty_cpu = 0; - if (dirty_cpu_online) - *dirty_cpu_online = 0; - if (dirty_io) - *dirty_io = 0; #endif return res; } +#ifdef ERTS_DIRTY_SCHEDULERS + +ErtsSchedSuspendResult +erts_set_schedulers_online(Process *p, + ErtsProcLocks plocks, + Sint new_no, + Sint *old_no +#ifdef ERTS_DIRTY_SCHEDULERS + , int dirty_only +#endif + ) +{ + ErtsSchedulerData *esdp; + int ix, res = -1, no, have_unlocked_plocks, end_wait; + erts_aint32_t changing = 0; +#ifdef ERTS_DIRTY_SCHEDULERS + ErtsSchedulerSleepInfo* ssi; + int dirty_no, change_dirty; +#endif + + if (new_no < 1) + return ERTS_SCHDLR_SSPND_EINVAL; +#ifdef ERTS_DIRTY_SCHEDULERS + else if (dirty_only && erts_no_dirty_cpu_schedulers < new_no) + return ERTS_SCHDLR_SSPND_EINVAL; +#endif + else if (erts_no_schedulers < new_no) + return ERTS_SCHDLR_SSPND_EINVAL; + + esdp = ERTS_PROC_GET_SCHDATA(p); + end_wait = 0; + + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + + have_unlocked_plocks = 0; + no = (int) new_no; + +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(schdlr_sspnd.dirty_cpu_online <= erts_no_dirty_cpu_schedulers); + if (dirty_only) { + if (no > schdlr_sspnd.online) { + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + return ERTS_SCHDLR_SSPND_EINVAL; + } + dirty_no = no; + } else { + /* + * Adjust the number of dirty CPU schedulers online relative to the + * adjustment made to the number of normal schedulers online. + */ + int total_pct = erts_no_dirty_cpu_schedulers*100/erts_no_schedulers; + int onln_pct = no*total_pct/schdlr_sspnd.online; + dirty_no = schdlr_sspnd.dirty_cpu_online*onln_pct/100; + if (dirty_no == 0) + dirty_no = 1; + ASSERT(dirty_no <= erts_no_dirty_cpu_schedulers); + } +#endif + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); +#ifdef ERTS_DIRTY_SCHEDULERS + changing |= erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing); +#endif + if (changing) { + res = ERTS_SCHDLR_SSPND_YIELD_RESTART; + } + else { + int online = *old_no = schdlr_sspnd.online; +#ifdef ERTS_DIRTY_SCHEDULERS + int dirty_online = schdlr_sspnd.dirty_cpu_online; + + if (dirty_only) { + *old_no = schdlr_sspnd.dirty_cpu_online; + if (dirty_no == schdlr_sspnd.dirty_cpu_online) { + res = ERTS_SCHDLR_SSPND_DONE; + } + change_dirty = 1; + } else { +#endif + if (no == schdlr_sspnd.online) { +#ifdef ERTS_DIRTY_SCHEDULERS + dirty_only = 1; + if (dirty_no == schdlr_sspnd.dirty_cpu_online) +#endif + res = ERTS_SCHDLR_SSPND_DONE; +#ifdef ERTS_DIRTY_SCHEDULERS + else + change_dirty = 1; +#endif + } +#ifdef ERTS_DIRTY_SCHEDULERS + else + change_dirty = (dirty_no != schdlr_sspnd.dirty_cpu_online); + } +#endif + if (res == -1) + { + int increase = (no > online); +#ifdef ERTS_DIRTY_SCHEDULERS + if (!dirty_only) { +#endif + ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + schdlr_sspnd.online = no; +#ifdef ERTS_DIRTY_SCHEDULERS + } else + increase = (dirty_no > dirty_online); + if (change_dirty) { + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + schdlr_sspnd.dirty_cpu_online = dirty_no; + } +#endif + if (increase) { + int ix; +#ifdef ERTS_DIRTY_SCHEDULERS + if (!dirty_only) { +#endif + schdlr_sspnd.wait_curr_online = no; + if (ongoing_multi_scheduling_block()) { + for (ix = online; ix < no; ix++) + erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix)); + } + else { + if (plocks) { + have_unlocked_plocks = 1; + erts_smp_proc_unlock(p, plocks); + } + change_no_used_runqs(no); + + for (ix = online; ix < no; ix++) + resume_run_queue(ERTS_RUNQ_IX(ix)); + + for (ix = no; ix < erts_no_run_queues; ix++) + suspend_run_queue(ERTS_RUNQ_IX(ix)); + } +#ifdef ERTS_DIRTY_SCHEDULERS + } + if (change_dirty) { + schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no; + ASSERT(schdlr_sspnd.dirty_cpu_curr_online != + schdlr_sspnd.dirty_cpu_wait_curr_online); + if (ongoing_multi_scheduling_block()) { + for (ix = dirty_online; ix < dirty_no; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + erts_sched_poke(ssi); + } + } else { + for (ix = dirty_online; ix < dirty_no; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + scheduler_ssi_resume_wake(ssi); + erts_smp_atomic32_read_band_nob(&ssi->flags, + ~ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + } + } +#endif + res = ERTS_SCHDLR_SSPND_DONE; + } + else /* if (no < online) */ { +#ifdef ERTS_DIRTY_SCHEDULERS + if (change_dirty) { + schdlr_sspnd.dirty_cpu_wait_curr_online = dirty_no; + ASSERT(schdlr_sspnd.dirty_cpu_curr_online != + schdlr_sspnd.dirty_cpu_wait_curr_online); + if (ongoing_multi_scheduling_block()) { + for (ix = dirty_no; ix < dirty_online; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + erts_sched_poke(ssi); + } + } else { + for (ix = dirty_no; ix < dirty_online; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + erts_smp_atomic32_read_bor_nob(&ssi->flags, + ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + } + } + if (dirty_only) { + res = ERTS_SCHDLR_SSPND_DONE; + } + else +#endif + { + if (p->scheduler_data->no <= no) { + res = ERTS_SCHDLR_SSPND_DONE; + schdlr_sspnd.wait_curr_online = no; + } + else { + /* + * Yield! Current process needs to migrate + * before bif returns. + */ + res = ERTS_SCHDLR_SSPND_YIELD_DONE; + schdlr_sspnd.wait_curr_online = no+1; + } + + if (ongoing_multi_scheduling_block()) { + for (ix = no; ix < online; ix++) + erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(ix)); + } + else { + if (plocks) { + have_unlocked_plocks = 1; + erts_smp_proc_unlock(p, plocks); + } + + change_no_used_runqs(no); + for (ix = no; ix < erts_no_run_queues; ix++) + suspend_run_queue(ERTS_RUNQ_IX(ix)); + + for (ix = no; ix < online; ix++) { + ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); + wake_scheduler(rq); + } + } + } + } + +#ifdef ERTS_DIRTY_SCHEDULERS + if (change_dirty) { + while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing, + ~ERTS_SCHDLR_SSPND_CHNG_WAITER); + } + if (!dirty_only) +#endif + { + if (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) { + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + if (plocks && !have_unlocked_plocks) { + have_unlocked_plocks = 1; + erts_smp_proc_unlock(p, plocks); + } + erts_thr_progress_active(esdp, 0); + erts_thr_progress_prepare_wait(esdp); + end_wait = 1; + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + } + + while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + + ASSERT(res != ERTS_SCHDLR_SSPND_DONE + ? (ERTS_SCHDLR_SSPND_CHNG_WAITER + & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)) + : (ERTS_SCHDLR_SSPND_CHNG_WAITER + == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))); + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_WAITER); + } + } + } + + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(schdlr_sspnd.dirty_cpu_online <= schdlr_sspnd.online); + if (!dirty_only) +#endif + { + if (end_wait) { + erts_thr_progress_finalize_wait(esdp); + erts_thr_progress_active(esdp, 1); + } + if (have_unlocked_plocks) + erts_smp_proc_lock(p, plocks); + } + + return res; +} + +#else /* !ERTS_DIRTY_SCHEDULERS */ + ErtsSchedSuspendResult erts_set_schedulers_online(Process *p, ErtsProcLocks plocks, @@ -6380,10 +7225,6 @@ erts_set_schedulers_online(Process *p, ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); wake_scheduler(rq); } -#ifdef ERTS_DIRTY_SCHEDULERS - wake_dirty_scheduler(ERTS_DIRTY_CPU_RUNQ); - wake_dirty_scheduler(ERTS_DIRTY_IO_RUNQ); -#endif } } @@ -6424,15 +7265,24 @@ erts_set_schedulers_online(Process *p, return res; } +#endif + ErtsSchedSuspendResult erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) { - int ix, res, have_unlocked_plocks = 0; + int ix, res, have_unlocked_plocks = 0, online; erts_aint32_t changing; ErtsProcList *plp; +#ifdef ERTS_DIRTY_SCHEDULERS + ErtsSchedulerSleepInfo* ssi; +#endif erts_smp_mtx_lock(&schdlr_sspnd.mtx); changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); +#ifdef ERTS_DIRTY_SCHEDULERS + changing |= (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing) + | erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing)); +#endif if (changing) { res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */ } @@ -6442,10 +7292,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp); p->flags |= F_HAVE_BLCKD_MSCHED; ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 0); + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) == 0); +#endif ASSERT(p->scheduler_data->no == 1); res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED; - } - else { + } else { int online = schdlr_sspnd.online; p->flags |= F_HAVE_BLCKD_MSCHED; if (plocks) { @@ -6457,6 +7310,35 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) if (online == 1) { res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED; ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) == 1); + ASSERT(!(erts_smp_atomic32_read_nob(&ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0)->flags) + & ERTS_SSI_FLG_SUSPENDED)); + schdlr_sspnd.msb.dirty_cpu_wait_active = 0; + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(0); + erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) + != schdlr_sspnd.msb.dirty_cpu_wait_active) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + + schdlr_sspnd.msb.dirty_io_wait_active = 0; + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { + ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix); + erts_smp_atomic32_read_bor_nob(&ssi->flags, + ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0); + while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) + != schdlr_sspnd.msb.dirty_io_wait_active) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); +#endif ASSERT(p->scheduler_data->no == 1); } else { @@ -6475,6 +7357,37 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) schdlr_sspnd.msb.wait_active = 2; } +#ifdef ERTS_DIRTY_SCHEDULERS + schdlr_sspnd.msb.dirty_cpu_wait_active = 0; + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + erts_smp_atomic32_read_bor_nob(&ssi->flags, + ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_active) + != schdlr_sspnd.msb.dirty_cpu_wait_active) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + ASSERT(schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_online); + + schdlr_sspnd.msb.dirty_io_wait_active = 0; + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_MSB + | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); + for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { + ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix); + erts_smp_atomic32_read_bor_nob(&ssi->flags, + ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0); + while (erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_active) + != schdlr_sspnd.msb.dirty_io_wait_active) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + ASSERT(schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_online); +#endif change_no_used_runqs(1); for (ix = 1; ix < erts_no_run_queues; ix++) suspend_run_queue(ERTS_RUNQ_IX(ix)); @@ -6515,6 +7428,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) erts_smp_mtx_lock(&schdlr_sspnd.mtx); } + ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED ? (ERTS_SCHDLR_SSPND_CHNG_WAITER & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)) @@ -6555,12 +7469,12 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) p->flags &= ~F_HAVE_BLCKD_MSCHED; schdlr_sspnd.msb.ongoing = 0; if (schdlr_sspnd.online == 1) { - /* No schedulers to resume */ + /* No normal schedulers to resume */ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB); } else { - int online = schdlr_sspnd.online; + online = schdlr_sspnd.online; if (plocks) { have_unlocked_plocks = 1; erts_smp_proc_unlock(p, plocks); @@ -6575,6 +7489,27 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) for (ix = online; ix < erts_no_run_queues; ix++) suspend_run_queue(ERTS_RUNQ_IX(ix)); } +#ifdef ERTS_DIRTY_SCHEDULERS + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0); + schdlr_sspnd.msb.dirty_cpu_wait_active = schdlr_sspnd.dirty_cpu_online; + for (ix = 0; ix < schdlr_sspnd.dirty_cpu_online; ix++) { + ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); + scheduler_ssi_resume_wake(ssi); + erts_smp_atomic32_read_band_nob(&ssi->flags, + ~ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_CPU_RUNQ, 0); + + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0); + schdlr_sspnd.msb.dirty_io_wait_active = erts_no_dirty_io_schedulers; + for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { + ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix); + scheduler_ssi_resume_wake(ssi); + erts_smp_atomic32_read_band_nob(&ssi->flags, + ~ERTS_SSI_FLG_SUSPENDED); + } + wake_dirty_schedulers(ERTS_DIRTY_IO_RUNQ, 0); +#endif res = ERTS_SCHDLR_SSPND_DONE; } } @@ -6701,7 +7636,11 @@ sched_thread_func(void *vesdp) erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_ONLN); if (no != 1) +#ifdef ERTS_DIRTY_SCHEDULERS + erts_smp_cnd_broadcast(&schdlr_sspnd.cnd); +#else erts_smp_cnd_signal(&schdlr_sspnd.cnd); +#endif } if (no == 1) { @@ -6738,7 +7677,9 @@ sched_dirty_cpu_thread_func(void *vesdp) { ErtsThrPrgrCallbacks callbacks; ErtsSchedulerData *esdp = vesdp; - Uint no = esdp->dirty_no; + Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp); + ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_CPU_SCHEDULER; + ASSERT(no != 0); ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch(); callbacks.arg = (void *) esdp->ssi; callbacks.wakeup = thr_prgr_wakeup; @@ -6766,6 +7707,24 @@ sched_dirty_cpu_thread_func(void *vesdp) #endif erts_thread_init_float(); + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_cpu_changing) + & ERTS_SCHDLR_SSPND_CHNG_ONLN); + + if (--schdlr_sspnd.dirty_cpu_curr_online == schdlr_sspnd.dirty_cpu_wait_curr_online) { + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_cpu_changing, + ~ERTS_SCHDLR_SSPND_CHNG_ONLN); + if (no != 1) + erts_smp_cnd_broadcast(&schdlr_sspnd.cnd); + } + + if (no == 1) { + while (schdlr_sspnd.dirty_cpu_curr_online != schdlr_sspnd.dirty_cpu_wait_curr_online) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_CPU_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + } + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + process_main(); /* No schedulers should *ever* terminate */ erl_exit(ERTS_ABORT_EXIT, @@ -6779,7 +7738,9 @@ sched_dirty_io_thread_func(void *vesdp) { ErtsThrPrgrCallbacks callbacks; ErtsSchedulerData *esdp = vesdp; - Uint no = esdp->dirty_no; + Uint no = ERTS_DIRTY_SCHEDULER_NO(esdp); + ERTS_DIRTY_SCHEDULER_TYPE(esdp) = ERTS_DIRTY_IO_SCHEDULER; + ASSERT(no != 0); ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(no-1)->event = erts_tse_fetch(); callbacks.arg = (void *) esdp->ssi; callbacks.wakeup = thr_prgr_wakeup; @@ -6807,6 +7768,24 @@ sched_dirty_io_thread_func(void *vesdp) #endif erts_thread_init_float(); + erts_smp_mtx_lock(&schdlr_sspnd.mtx); + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.dirty_io_changing) + & ERTS_SCHDLR_SSPND_CHNG_ONLN); + + if (--schdlr_sspnd.dirty_io_curr_online == schdlr_sspnd.dirty_io_wait_curr_online) { + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.dirty_io_changing, + ~ERTS_SCHDLR_SSPND_CHNG_ONLN); + if (no != 1) + erts_smp_cnd_broadcast(&schdlr_sspnd.cnd); + } + + if (no == 1) { + while (schdlr_sspnd.dirty_io_curr_online != schdlr_sspnd.dirty_io_wait_curr_online) + erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); + ERTS_SCHDLR_SSPND_DIRTY_IO_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_WAITER); + } + erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + process_main(); /* No schedulers should *ever* terminate */ erl_exit(ERTS_ABORT_EXIT, @@ -8058,7 +9037,6 @@ Process *schedule(Process *p, int calls) || flags & ERTS_RUNQ_FLG_NONEMPTY); if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) { - if (flags & ERTS_RUNQ_FLG_SUSPENDED) { suspend_scheduler(esdp); flags = ERTS_RUNQ_FLGS_GET_NOB(rq); @@ -8069,10 +9047,17 @@ Process *schedule(Process *p, int calls) erts_sched_check_cpu_bind(esdp); } } +#ifdef ERTS_DIRTY_SCHEDULERS + else if (ERTS_SCHEDULER_IS_DIRTY(esdp) + && (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) + & ERTS_SSI_FLG_SUSPENDED)) + suspend_scheduler(esdp); +#endif - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + { erts_aint32_t aux_work; - int leader_update = erts_thr_progress_update(esdp); + int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 + : erts_thr_progress_update(esdp); aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); if (aux_work | leader_update | ERTS_SCHED_FAIR) { erts_smp_runq_unlock(rq); @@ -8085,7 +9070,8 @@ Process *schedule(Process *p, int calls) erts_smp_runq_lock(rq); } - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) + || !erts_thr_progress_is_blocking()); } ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); @@ -8100,6 +9086,17 @@ Process *schedule(Process *p, int calls) flags = ERTS_RUNQ_FLGS_GET_NOB(rq); +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->halt_in_progress) { + /* + * TODO: if halt in progress, need to put the dirty scheduler + * to sleep somewhere around here to prevent it from picking up + * new work + */ + } + else +#endif + if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start) || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) { /* Prepare for scheduler wait */ @@ -8248,11 +9245,15 @@ Process *schedule(Process *p, int calls) + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET)); #ifdef ERTS_DIRTY_SCHEDULERS - /* if a non-dirty scheduler picks up a process marked as already being - in a dirty run queue, just drop it and go get another process */ - if (state & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) && - !ERTS_SCHEDULER_IS_DIRTY(esdp)) - goto pick_next_process; + ASSERT((state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) != + (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)); + if (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) { + ASSERT((ERTS_SCHEDULER_IS_DIRTY_CPU(esdp) && (state & ERTS_PSFLG_DIRTY_CPU_PROC)) || + (ERTS_SCHEDULER_IS_DIRTY_IO(esdp) && (state & ERTS_PSFLG_DIRTY_IO_PROC))); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !(state & ERTS_PSFLG_ACTIVE_SYS)) + goto pick_next_process; + state &= ~(ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q); + } #endif if (!(state & ERTS_PSFLG_PROXY)) @@ -8388,7 +9389,11 @@ Process *schedule(Process *p, int calls) if (state & ERTS_PSFLG_RUNNING_SYS) { reds -= execute_sys_tasks(p, &state, reds); - if (reds <= 0) { + if (reds <= 0 +#ifdef ERTS_DIRTY_SCHEDULERS + || (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) +#endif + ) { p->fcalls = reds; goto sched_out_proc; } @@ -10153,7 +11158,7 @@ save_pending_exiter(Process *p) erts_smp_runq_unlock(rq); #ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) - wake_dirty_scheduler(rq); + wake_dirty_schedulers(rq, 0); else #endif wake_scheduler(rq); @@ -11214,6 +12219,10 @@ void erl_halt(int code) if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress, erts_no_schedulers, -1)) { +#ifdef ERTS_DIRTY_SCHEDULERS + ERTS_DIRTY_CPU_RUNQ->halt_in_progress = 1; + ERTS_DIRTY_IO_RUNQ->halt_in_progress = 1; +#endif erts_halt_code = code; notify_reap_ports_relb(); } diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index a86ac65aa2..ed6dadbffa 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -109,7 +109,6 @@ extern int erts_sched_balance_util; extern Uint erts_no_schedulers; #ifdef ERTS_DIRTY_SCHEDULERS extern Uint erts_no_dirty_cpu_schedulers; -extern Uint erts_no_dirty_cpu_schedulers_online; extern Uint erts_no_dirty_io_schedulers; #endif extern Uint erts_no_run_queues; @@ -544,6 +543,21 @@ typedef struct { #endif } ErtsAuxWorkData; +#ifdef ERTS_DIRTY_SCHEDULERS +typedef enum { + ERTS_DIRTY_CPU_SCHEDULER, + ERTS_DIRTY_IO_SCHEDULER +} ErtsDirtySchedulerType; + +typedef union { + struct { + ErtsDirtySchedulerType type: 1; + unsigned num: 31; + } s; + Uint no; +} ErtsDirtySchedId; +#endif + struct ErtsSchedulerData_ { /* * Keep X registers first (so we get as many low @@ -570,7 +584,7 @@ struct ErtsSchedulerData_ { Process *current_process; Uint no; /* Scheduler number for normal schedulers */ #ifdef ERTS_DIRTY_SCHEDULERS - Uint dirty_no; /* Scheduler number for dirty schedulers */ + ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */ #endif Port *current_port; ErtsRunQueue *run_queue; @@ -1299,6 +1313,8 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags; &erts_aligned_run_queues[(IX)].runq) #define ERTS_DIRTY_CPU_RUNQ (&erts_aligned_run_queues[-1].runq) #define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[-2].runq) +#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ)->ix == -1) +#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ)->ix == -2) #else #define ERTS_RUNQ_IX_IS_DIRTY(IX) 0 #endif @@ -1312,21 +1328,37 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags; #define ERTS_DIRTY_IO_SCHEDULER_IX(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_dirty_io_schedulers), \ &erts_aligned_dirty_io_scheduler_data[(IX)].esd) +#define ERTS_DIRTY_SCHEDULER_NO(ESDP) \ + ((ESDP)->dirty_no.s.num) +#define ERTS_DIRTY_SCHEDULER_TYPE(ESDP) \ + ((ESDP)->dirty_no.s.type) #ifdef ERTS_SMP #define ERTS_SCHEDULER_IS_DIRTY(ESDP) \ - ((ESDP)->dirty_no != 0) + ((ESDP)->dirty_no.s.num != 0) +#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) \ + ((ESDP)->dirty_no.s.type == 0) +#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \ + ((ESDP)->dirty_no.s.type == 1) #else #define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0 +#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0 +#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0 #endif #else #define ERTS_RUNQ_IX_IS_DIRTY(IX) 0 #define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0 +#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0 +#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0 #endif void erts_pre_init_process(void); void erts_late_init_process(void); void erts_early_init_scheduling(int); -void erts_init_scheduling(int, int); +void erts_init_scheduling(int, int +#ifdef ERTS_DIRTY_SCHEDULERS + , int, int, int +#endif + ); int erts_set_gc_state(Process *c_p, int enable); Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable); @@ -1533,7 +1565,11 @@ ErtsSchedSuspendResult erts_set_schedulers_online(Process *p, ErtsProcLocks plocks, Sint new_no, - Sint *old_no); + Sint *old_no +#ifdef ERTS_DIRTY_SCHEDULERS + , int dirty_only +#endif + ); ErtsSchedSuspendResult erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int); int erts_is_multi_scheduling_blocked(void); diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h index 4e61429cd1..0807649ea1 100644 --- a/erts/emulator/beam/erl_utils.h +++ b/erts/emulator/beam/erl_utils.h @@ -202,17 +202,18 @@ int eq(Eterm, Eterm); #define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y)))) #if HALFWORD_HEAP -Sint cmp_rel_opt(Eterm, Eterm*, Eterm, Eterm*, int); -#define cmp_rel(A,A_BASE,B,B_BASE) cmp_rel_opt(A,A_BASE,B,B_BASE,0) -#define cmp_rel_term(A,A_BASE,B,B_BASE) cmp_rel_opt(A,A_BASE,B,B_BASE,1) -#define CMP(A,B) cmp_rel_opt(A,NULL,B,NULL,0) -#define CMP_TERM(A,B) cmp_rel_opt(A,NULL,B,NULL,1) +Sint erts_cmp_rel_opt(Eterm, Eterm*, Eterm, Eterm*, int); +#define cmp_rel(A,A_BASE,B,B_BASE) erts_cmp_rel_opt(A,A_BASE,B,B_BASE,0) +#define cmp_rel_term(A,A_BASE,B,B_BASE) erts_cmp_rel_opt(A,A_BASE,B,B_BASE,1) +#define CMP(A,B) erts_cmp_rel_opt(A,NULL,B,NULL,0) +#define CMP_TERM(A,B) erts_cmp_rel_opt(A,NULL,B,NULL,1) #else -Sint cmp(Eterm, Eterm, int); -#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B,0) -#define cmp_rel_term(A,A_BASE,B,B_BASE) cmp(A,B,1) -#define CMP(A,B) cmp(A,B,0) -#define CMP_TERM(A,B) cmp(A,B,1) +Sint cmp(Eterm, Eterm); +Sint erts_cmp(Eterm, Eterm, int); +#define cmp_rel(A,A_BASE,B,B_BASE) erts_cmp(A,B,0) +#define cmp_rel_term(A,A_BASE,B,B_BASE) erts_cmp(A,B,1) +#define CMP(A,B) erts_cmp(A,B,0) +#define CMP_TERM(A,B) erts_cmp(A,B,1) #endif #define cmp_lt(a,b) (CMP((a),(b)) < 0) diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index a4cc3435c3..9fb2dbd8bf 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -1169,6 +1169,7 @@ typedef struct { Eterm* hp_end; int remaining_n; char* remaining_bytes; + Eterm* maps_head; } B2TDecodeContext; typedef struct { @@ -1486,6 +1487,7 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con ctx->u.dc.hp_start = HAlloc(p, ctx->heap_size); ctx->u.dc.hp = ctx->u.dc.hp_start; ctx->u.dc.hp_end = ctx->u.dc.hp_start + ctx->heap_size; + ctx->u.dc.maps_head = NULL; ctx->state = B2TDecode; /*fall through*/ case B2TDecode: @@ -2878,7 +2880,7 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, int n; ErtsAtomEncoding char_enc; register Eterm* hp; /* Please don't take the address of hp */ - Eterm *maps_head = NULL; /* for validation of maps */ + Eterm *maps_head; /* for validation of maps */ Eterm* next; SWord reds; @@ -2888,6 +2890,7 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, next = ctx->u.dc.next; ep = ctx->u.dc.ep; hpp = &ctx->u.dc.hp; + maps_head = ctx->u.dc.maps_head; if (ctx->state != B2TDecode) { int n_limit = reds; @@ -2968,6 +2971,7 @@ dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, reds = ERTS_SWORD_MAX; next = objp; *next = (Eterm) (UWord) NULL; + maps_head = NULL; } hp = *hpp; @@ -3780,6 +3784,7 @@ dec_term_atom_common: ctx->u.dc.ep = ep; ctx->u.dc.next = next; ctx->u.dc.hp = hp; + ctx->u.dc.maps_head = maps_head; ctx->reds = 0; return NULL; } diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index f35997efee..73630fda8e 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -1469,21 +1469,20 @@ apply_last I P # Map instructions in R17. # -# put_map Fail Src Dst Live Size Rest=* => jump Fail -# is_map Fail Src => jump Fail -# has_map_field Fail Src Key => jump Fail -# get_map_element Fail Src Key Dst => jump Fail - put_map_assoc F n Dst Live Size Rest=* => new_map F Dst Live Size Rest -put_map_exact F n Dst Live Size Rest=* => new_map F Dst Live Size Rest -put_map_assoc F Src Dst Live Size Rest=* => \ +put_map_assoc F Src=s Dst Live Size Rest=* => \ update_map_assoc F Src Dst Live Size Rest -put_map_exact F Src Dst Live Size Rest=* => \ +put_map_assoc F Src Dst Live Size Rest=* => \ + move Src x | update_map_assoc F x Dst Live Size Rest +put_map_exact F n Dst Live Size Rest=* => new_map F Dst Live Size Rest +put_map_exact F Src=s Dst Live Size Rest=* => \ update_map_exact F Src Dst Live Size Rest +put_map_exact F Src Dst Live Size Rest=* => \ + move Src x | update_map_exact F x Dst Live Size Rest new_map j d I I -update_map_assoc j d d I I -update_map_exact j d d I I +update_map_assoc j s d I I +update_map_exact j s d I I is_map Fail cq => jump Fail @@ -1492,6 +1491,15 @@ is_map f r is_map f x is_map f y +## Transform has_map_field(s) #{ K1 := _, K2 := _ } + +has_map_field/3 + +has_map_fields Fail Src Size=u==1 Rest=* => gen_has_map_field(Fail,Src,Size,Rest) +has_map_fields Fail Src Size Rest=* => i_has_map_fields Fail Src Size Rest + +i_has_map_fields f s I + has_map_field Fail Src=rxy Key=arxy => i_has_map_field Fail Src Key has_map_field Fail Src Key => move Key x | i_has_map_field Fail Src x @@ -1509,6 +1517,15 @@ i_has_map_field f r y i_has_map_field f x y i_has_map_field f y y +## Transform get_map_elements(s) #{ K1 := V1, K2 := V2 } + +get_map_element/4 + +get_map_elements Fail Src=rxy Size=u==2 Rest=* => gen_get_map_element(Fail,Src,Size,Rest) +get_map_elements Fail Src Size Rest=* => i_get_map_elements Fail Src Size Rest + +i_get_map_elements f s I + get_map_element Fail Src=rxy Key=ax Dst => i_get_map_element Fail Src Key Dst get_map_element Fail Src=rxy Key=rycq Dst => \ move Key x | i_get_map_element Fail Src x Dst diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index d92909c673..5b43d25e3c 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -2425,14 +2425,24 @@ static int cmp_atoms(Eterm a, Eterm b) bb->name+3, bb->len-3); } -/* cmp(Eterm a, Eterm b, int exact) +#if !HALFWORD_HEAP +/* cmp(Eterm a, Eterm b) + * For compatibility with HiPE - arith-based compare. + */ +Sint cmp(Eterm a, Eterm b) +{ + return erts_cmp(a, b, 0); +} +#endif + +/* erts_cmp(Eterm a, Eterm b, int exact) * exact = 1 -> term-based compare * exact = 0 -> arith-based compare */ #if HALFWORD_HEAP -Sint cmp_rel_opt(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base, int exact) +Sint erts_cmp_rel_opt(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base, int exact) #else -Sint cmp(Eterm a, Eterm b, int exact) +Sint erts_cmp(Eterm a, Eterm b, int exact) #endif { DECLARE_WSTACK(stack); diff --git a/erts/emulator/drivers/common/gzio.c b/erts/emulator/drivers/common/gzio.c index 9a9e297fca..ef539f8f9b 100644 --- a/erts/emulator/drivers/common/gzio.c +++ b/erts/emulator/drivers/common/gzio.c @@ -74,15 +74,15 @@ typedef struct gz_stream { int transparent; /* 1 if input file is not a .gz file */ char mode; /* 'w' or 'r' */ int position; /* Position (for seek) */ - int (*destroy)OF((struct gz_stream*)); /* Function to destroy + int (*destroy)(struct gz_stream*); /* Function to destroy * this structure. */ } gz_stream; -local ErtsGzFile gz_open OF((const char *path, const char *mode)); -local int get_byte OF((gz_stream *s)); -local void check_header OF((gz_stream *s)); -local int destroy OF((gz_stream *s)); -local uLong getLong OF((gz_stream *s)); +local ErtsGzFile gz_open (const char *path, const char *mode); +local int get_byte (gz_stream *s); +local void check_header (gz_stream *s); +local int destroy (gz_stream *s); +local uLong getLong (gz_stream *s); #ifdef UNIX /* diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c index a5294ad84e..865cb50a56 100644 --- a/erts/emulator/sys/unix/sys.c +++ b/erts/emulator/sys/unix/sys.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -2561,7 +2561,7 @@ void *erts_sys_aligned_alloc(UWord alignment, UWord size) #ifdef HAVE_POSIX_MEMALIGN void *ptr = NULL; int error; - ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + ASSERT(alignment && (alignment & (alignment-1)) == 0); /* power of 2 */ error = posix_memalign(&ptr, (size_t) alignment, (size_t) size); #if HAVE_ERTS_MSEG if (error || !ptr) { @@ -2584,7 +2584,7 @@ void *erts_sys_aligned_alloc(UWord alignment, UWord size) void erts_sys_aligned_free(UWord alignment, void *ptr) { - ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + ASSERT(alignment && (alignment & (alignment-1)) == 0); /* power of 2 */ free(ptr); } diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c index 296da90c6c..0ded6b274e 100755 --- a/erts/emulator/sys/win32/sys.c +++ b/erts/emulator/sys/win32/sys.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -2757,7 +2757,7 @@ void erts_sys_free(ErtsAlcType_t t, void *x, void *p) void *erts_sys_aligned_alloc(UWord alignment, UWord size) { void *ptr; - ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + ASSERT(alignment && (alignment & (alignment-1)) == 0); /* power of 2 */ ptr = _aligned_malloc((size_t) size, (size_t) alignment); ASSERT(!ptr || (((UWord) ptr) & (alignment - 1)) == 0); return ptr; @@ -2765,14 +2765,14 @@ void *erts_sys_aligned_alloc(UWord alignment, UWord size) void erts_sys_aligned_free(UWord alignment, void *ptr) { - ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + ASSERT(alignment && (alignment & (alignment-1)) == 0); /* power of 2 */ _aligned_free(ptr); } void *erts_sys_aligned_realloc(UWord alignment, void *ptr, UWord size, UWord old_size) { void *new_ptr; - ASSERT(alignment && (alignment & ~alignment) == 0); /* power of 2 */ + ASSERT(alignment && (alignment & (alignment-1)) == 0); /* power of 2 */ new_ptr = _aligned_realloc(ptr, (size_t) size, (size_t) alignment); ASSERT(!new_ptr || (((UWord) new_ptr) & (alignment - 1)) == 0); return new_ptr; diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl index 06211406b4..c62bc0c454 100644 --- a/erts/emulator/test/driver_SUITE.erl +++ b/erts/emulator/test/driver_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2013. All Rights Reserved. +%% Copyright Ericsson AB 1997-2014. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -1945,6 +1945,14 @@ otp_9302(Config) when is_list(Config) -> end. thr_free_drv(Config) when is_list(Config) -> + case erlang:system_info(threads) of + false -> + {skipped, "No thread support"}; + true -> + thr_free_drv_do(Config) + end. + +thr_free_drv_do(Config) -> ?line Path = ?config(data_dir, Config), ?line erl_ddll:start(), ?line ok = load_driver(Path, thr_free_drv), diff --git a/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c b/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c index 7c144d20cf..ad29d17f06 100644 --- a/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c +++ b/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2011-2013. All Rights Reserved. + * Copyright Ericsson AB 2011-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -94,7 +94,7 @@ DRIVER_INIT(otp_9302_drv) static void stop(ErlDrvData drv_data) { Otp9302Data *data = (Otp9302Data *) drv_data; - if (!data->smp) + if (data->msgq.mtx) erl_drv_mutex_destroy(data->msgq.mtx); driver_free(data); } @@ -114,13 +114,16 @@ static ErlDrvData start(ErlDrvPort port, driver_system_info(&sys_info, sizeof(ErlDrvSysInfo)); data->smp = sys_info.smp_support; + data->msgq.mtx = NULL; if (!data->smp) { data->msgq.start = NULL; data->msgq.end = NULL; - data->msgq.mtx = erl_drv_mutex_create(""); - if (!data->msgq.mtx) { - driver_free(data); - return ERL_DRV_ERROR_GENERAL; + if (sys_info.thread_support) { + data->msgq.mtx = erl_drv_mutex_create(""); + if (!data->msgq.mtx) { + driver_free(data); + return ERL_DRV_ERROR_GENERAL; + } } } @@ -143,19 +146,22 @@ static void enqueue_reply(Otp9302AsyncData *adata) Otp9302MsgQ *msgq = adata->msgq; adata->next = NULL; adata->refc++; - erl_drv_mutex_lock(msgq->mtx); + if (msgq->mtx) + erl_drv_mutex_lock(msgq->mtx); if (msgq->end) msgq->end->next = adata; else msgq->end = msgq->start = adata; msgq->end = adata; - erl_drv_mutex_unlock(msgq->mtx); + if (msgq->mtx) + erl_drv_mutex_unlock(msgq->mtx); } static void dequeue_replies(Otp9302AsyncData *adata) { Otp9302MsgQ *msgq = adata->msgq; - erl_drv_mutex_lock(msgq->mtx); + if (msgq->mtx) + erl_drv_mutex_lock(msgq->mtx); if (--adata->refc == 0) driver_free(adata); while (msgq->start) { @@ -166,7 +172,8 @@ static void dequeue_replies(Otp9302AsyncData *adata) driver_free(adata); } msgq->start = msgq->end = NULL; - erl_drv_mutex_unlock(msgq->mtx); + if (msgq->mtx) + erl_drv_mutex_unlock(msgq->mtx); } static void async_invoke(void *data) diff --git a/erts/emulator/test/map_SUITE.erl b/erts/emulator/test/map_SUITE.erl index 31c1486f1c..8cc5621181 100644 --- a/erts/emulator/test/map_SUITE.erl +++ b/erts/emulator/test/map_SUITE.erl @@ -254,7 +254,15 @@ t_update_exact(Config) when is_list(Config) -> M2 = M0#{3.0:=new}, #{1:=a,2:=b,3.0:=new,4:=d,5:=e} = M2, M2 = M0#{3.0=>wrong,3.0:=new}, - M2 = M0#{3=>wrong,3.0:=new}, + true = M2 =/= M0#{3=>right,3.0:=new}, + #{ 3 := right, 3.0 := new } = M0#{3=>right,3.0:=new}, + + M3 = id(#{ 1 => val}), + #{1 := update2,1.0 := new_val4} = M3#{ + 1.0 => new_val1, 1 := update, 1=> update3, + 1 := update2, 1.0 := new_val2, 1.0 => new_val3, + 1.0 => new_val4 }, + %% Errors cases. {'EXIT',{badarg,_}} = (catch M0#{nonexisting:=val}), diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl index bcc1f9e5af..a854d3f05b 100644 --- a/erts/emulator/test/nif_SUITE.erl +++ b/erts/emulator/test/nif_SUITE.erl @@ -850,6 +850,138 @@ resource_takeover(Config) when is_list(Config) -> ?line ok = forget_resource(AN4), ?line [] = nif_mod_call_history(), + + %% + %% Test rollback after failed upgrade of same lib-version + %% + + {A5,BinA5} = make_resource(2, Holder, "A5"), + {NA5,BinNA5} = make_resource(0, Holder, "NA5"), + {AN5,_BinAN5} = make_resource(1, Holder, "AN5"), + + {A6,BinA6} = make_resource(2, Holder, "A6"), + {NA6,BinNA6} = make_resource(0, Holder, "NA6"), + {AN6,_BinAN6} = make_resource(1, Holder, "AN6"), + + {module,nif_mod} = erlang:load_module(nif_mod,ModBin), + undefined = nif_mod:lib_version(), + {error,{upgrade,_}} = + nif_mod:load_nif_lib(Config, 1, + [{resource_type, 4, ?RT_TAKEOVER, "resource_type_A",resource_dtor_B, + ?RT_TAKEOVER}, + {resource_type, 4, ?RT_TAKEOVER bor ?RT_CREATE, "resource_type_null_A",null, + ?RT_TAKEOVER}, + {resource_type, 4, ?RT_TAKEOVER, "resource_type_A_null",resource_dtor_A, + ?RT_TAKEOVER}, + {resource_type, 4, ?RT_CREATE, "Mr Pink", resource_dtor_A, + ?RT_CREATE}, + + {return, 1} % FAIL + ]), + + undefined = nif_mod:lib_version(), + [{upgrade,1,5,105}] = nif_mod_call_history(), + + %% Make sure dtor was not changed (from A to B) + ok = forget_resource(A5), + [{{resource_dtor_A_v1,BinA5},1,6,106}] = nif_mod_call_history(), + + %% Make sure dtor was not nullified (from A to null) + ok = forget_resource(NA5), + [{{resource_dtor_A_v1,BinNA5},1,7,107}] = nif_mod_call_history(), + + %% Make sure dtor was not added (from null to A) + ok = forget_resource(AN5), + [] = nif_mod_call_history(), + + %% + %% Test rollback after failed upgrade of other lib-version + %% + + {error,{upgrade,_}} = + nif_mod:load_nif_lib(Config, 2, + [{resource_type, 4, ?RT_TAKEOVER, "resource_type_A",resource_dtor_B, + ?RT_TAKEOVER}, + {resource_type, 4, ?RT_TAKEOVER bor ?RT_CREATE, "resource_type_null_A",null, + ?RT_TAKEOVER}, + {resource_type, 4, ?RT_TAKEOVER, "resource_type_A_null",resource_dtor_A, + ?RT_TAKEOVER}, + {resource_type, null, ?RT_TAKEOVER, "Mr Pink", resource_dtor_A, + ?RT_TAKEOVER}, + {resource_type, 4, ?RT_CREATE, "Mr Pink", resource_dtor_A, + ?RT_CREATE}, + + {return, 1} % FAIL + ]), + + undefined = nif_mod:lib_version(), + [{upgrade,2,_,_}] = nif_mod_call_history(), + + %% Make sure dtor was not changed (from A to B) + ok = forget_resource(A6), + [{{resource_dtor_A_v1,BinA6},1,_,_}] = nif_mod_call_history(), + + %% Make sure dtor was not nullified (from A to null) + ok = forget_resource(NA6), + [{{resource_dtor_A_v1,BinNA6},1,_,_}] = nif_mod_call_history(), + + %% Make sure dtor was not added (from null to A) + ok = forget_resource(AN6), + [] = nif_mod_call_history(), + + %% + %% Test rolback after failed initial load + %% + false = code:purge(nif_mod), + [{unload,1,_,_}] = nif_mod_call_history(), + true = code:delete(nif_mod), + false = code:purge(nif_mod), + [] = nif_mod_call_history(), + + + {module,nif_mod} = erlang:load_module(nif_mod,ModBin), + undefined = nif_mod:lib_version(), + {error,{load,_}} = + nif_mod:load_nif_lib(Config, 1, + [{resource_type, null, ?RT_TAKEOVER, "resource_type_A",resource_dtor_A, + ?RT_TAKEOVER}, + {resource_type, 4, ?RT_TAKEOVER bor ?RT_CREATE, "resource_type_null_A",null, + ?RT_CREATE}, + {resource_type, 4, ?RT_CREATE, "resource_type_A_null",resource_dtor_A, + ?RT_CREATE}, + {resource_type, 4, ?RT_CREATE, "Mr Pink", resource_dtor_A, + ?RT_CREATE}, + + {return, 1} % FAIL + ]), + + undefined = nif_mod:lib_version(), + ok = nif_mod:load_nif_lib(Config, 1, + [{resource_type, null, ?RT_TAKEOVER, "resource_type_A",resource_dtor_A, + ?RT_TAKEOVER}, + {resource_type, 0, ?RT_TAKEOVER bor ?RT_CREATE, "resource_type_null_A", + resource_dtor_A, ?RT_CREATE}, + + {resource_type, 1, ?RT_CREATE, "resource_type_A_null", null, + ?RT_CREATE}, + {resource_type, null, ?RT_TAKEOVER, "Mr Pink", resource_dtor_A, + ?RT_TAKEOVER}, + + {return, 0} % SUCCESS + ]), + + ?line hold_nif_mod_priv_data(nif_mod:get_priv_data_ptr()), + ?line [{load,1,1,101},{get_priv_data_ptr,1,2,102}] = nif_mod_call_history(), + + {NA7,BinNA7} = make_resource(0, Holder, "NA7"), + {AN7,BinAN7} = make_resource(1, Holder, "AN7"), + + ok = forget_resource(NA7), + [{{resource_dtor_A_v1,BinNA7},1,_,_}] = nif_mod_call_history(), + + ok = forget_resource(AN7), + [] = nif_mod_call_history(), + ?line true = lists:member(?MODULE, erlang:system_info(taints)), ?line true = lists:member(nif_mod, erlang:system_info(taints)), ?line verify_tmpmem(TmpMem), @@ -1406,7 +1538,7 @@ dirty_nif(Config) when is_list(Config) -> {skipped,"No dirty scheduler support"} end. -next_msg(Pid) -> +next_msg(_Pid) -> receive M -> M after 100 -> diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c index b550d1f16d..160f4843ad 100644 --- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c +++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c @@ -1477,7 +1477,6 @@ static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NI { int percent; char atom[10]; - int do_repeat; if (!enif_get_int(env, argv[0], &percent) || !enif_get_atom(env, argv[1], atom, sizeof(atom), ERL_NIF_LATIN1)) { diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.c b/erts/emulator/test/nif_SUITE_data/nif_mod.c index e32d10057c..55a0d2ac4f 100644 --- a/erts/emulator/test/nif_SUITE_data/nif_mod.c +++ b/erts/emulator/test/nif_SUITE_data/nif_mod.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2009-2010. All Rights Reserved. + * Copyright Ericsson AB 2009-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -41,6 +41,7 @@ static ERL_NIF_TERM am_null; static ERL_NIF_TERM am_resource_type; static ERL_NIF_TERM am_resource_dtor_A; static ERL_NIF_TERM am_resource_dtor_B; +static ERL_NIF_TERM am_return; static NifModPrivData* priv_data(ErlNifEnv* env) { @@ -54,6 +55,7 @@ static void init(ErlNifEnv* env) am_resource_type = enif_make_atom(env, "resource_type"); am_resource_dtor_A = enif_make_atom(env, "resource_dtor_A"); am_resource_dtor_B = enif_make_atom(env, "resource_dtor_B"); + am_return = enif_make_atom(env, "return"); } static void add_call_with_arg(ErlNifEnv* env, NifModPrivData* data, const char* func_name, @@ -105,19 +107,15 @@ static void resource_dtor_B(ErlNifEnv* env, void* a) } /* {resource_type, Ix|null, ErlNifResourceFlags in, "TypeName", dtor(A|B|null), ErlNifResourceFlags out}*/ -static void open_resource_type(ErlNifEnv* env, ERL_NIF_TERM op_tpl) +static void open_resource_type(ErlNifEnv* env, const ERL_NIF_TERM* arr) { NifModPrivData* data = priv_data(env); - const ERL_NIF_TERM* arr; - int arity; char rt_name[30]; union { ErlNifResourceFlags e; int i; } flags, exp_res, got_res; unsigned ix; ErlNifResourceDtor* dtor; ErlNifResourceType* got_ptr; - CHECK(enif_get_tuple(env, op_tpl, &arity, &arr)); - CHECK(arity == 6); CHECK(enif_is_identical(arr[0], am_resource_type)); CHECK(enif_get_int(env, arr[2], &flags.i)); CHECK(enif_get_string(env, arr[3], rt_name, sizeof(rt_name), ERL_NIF_LATIN1) > 0); @@ -147,18 +145,32 @@ static void open_resource_type(ErlNifEnv* env, ERL_NIF_TERM op_tpl) CHECK(got_res.e == exp_res.e); } -static void do_load_info(ErlNifEnv* env, ERL_NIF_TERM load_info) +static void do_load_info(ErlNifEnv* env, ERL_NIF_TERM load_info, int* retvalp) { NifModPrivData* data = priv_data(env); ERL_NIF_TERM head, tail; unsigned ix; + for (ix=0; ix<RT_MAX; ix++) { data->rt_arr[ix] = NULL; } for (head = load_info; enif_get_list_cell(env, head, &head, &tail); head = tail) { - - open_resource_type(env, head); + const ERL_NIF_TERM* arr; + int arity; + + CHECK(enif_get_tuple(env, head, &arity, &arr)); + switch (arity) { + case 6: + open_resource_type(env, arr); + break; + case 2: + CHECK(arr[0] == am_return); + CHECK(enif_get_int(env, arr[1], retvalp)); + break; + default: + CHECK(0); + } } CHECK(enif_is_empty_list(env, head)); } @@ -166,6 +178,7 @@ static void do_load_info(ErlNifEnv* env, ERL_NIF_TERM load_info) static int load(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info) { NifModPrivData* data; + int retval = 0; init(env); data = (NifModPrivData*) enif_alloc(sizeof(NifModPrivData)); @@ -177,38 +190,41 @@ static int load(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info) add_call(env, data, "load"); - do_load_info(env, load_info); - data->calls = 0; - return 0; + do_load_info(env, load_info, &retval); + if (retval) + NifModPrivData_release(data); + return retval; } static int reload(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info) { NifModPrivData* data = (NifModPrivData*) *priv; + int retval = 0; init(env); add_call(env, data, "reload"); - do_load_info(env, load_info); - return 0; + do_load_info(env, load_info, &retval); + return retval; } static int upgrade(ErlNifEnv* env, void** priv, void** old_priv_data, ERL_NIF_TERM load_info) { NifModPrivData* data = (NifModPrivData*) *old_priv_data; + int retval = 0; init(env); add_call(env, data, "upgrade"); data->ref_cnt++; *priv = *old_priv_data; - do_load_info(env, load_info); + do_load_info(env, load_info, &retval); - return 0; + return retval; } static void unload(ErlNifEnv* env, void* priv) { NifModPrivData* data = (NifModPrivData*) priv; - int is_last; + add_call(env, data, "unload"); NifModPrivData_release(data); } diff --git a/erts/emulator/test/nif_SUITE_data/nif_mod.h b/erts/emulator/test/nif_SUITE_data/nif_mod.h index cd0ecf4b54..fb14fee815 100644 --- a/erts/emulator/test/nif_SUITE_data/nif_mod.h +++ b/erts/emulator/test/nif_SUITE_data/nif_mod.h @@ -14,7 +14,6 @@ typedef struct call_info_t typedef struct { ErlNifMutex* mtx; - int calls; int ref_cnt; CallInfo* call_history; ErlNifResourceType* rt_arr[RT_MAX]; @@ -28,6 +27,11 @@ typedef struct enif_mutex_unlock((NMPD)->mtx); \ if (is_last) { \ enif_mutex_destroy((NMPD)->mtx); \ + while ((NMPD)->call_history) { \ + CallInfo* next = (NMPD)->call_history->next; \ + enif_free((NMPD)->call_history); \ + (NMPD)->call_history = next; \ + } \ enif_free((NMPD)); \ } \ }while (0) diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl index 6a43e2b0e7..3906471f87 100644 --- a/erts/emulator/test/scheduler_SUITE.erl +++ b/erts/emulator/test/scheduler_SUITE.erl @@ -54,6 +54,7 @@ sbt_cmd/1, scheduler_threads/1, scheduler_suspend/1, + dirty_scheduler_threads/1, reader_groups/1]). -define(DEFAULT_TIMEOUT, ?t:minutes(15)). @@ -68,6 +69,7 @@ all() -> equal_and_high_with_part_time_max, equal_with_high, equal_with_high_max, bound_process, {group, scheduler_bind}, scheduler_threads, scheduler_suspend, + dirty_scheduler_threads, reader_groups]. groups() -> @@ -1092,6 +1094,55 @@ scheduler_threads(Config) when is_list(Config) -> end, ok. +dirty_scheduler_threads(Config) when is_list(Config) -> + SmpSupport = erlang:system_info(smp_support), + try + erlang:system_info(dirty_cpu_schedulers), + dirty_scheduler_threads_test(Config, SmpSupport) + catch + error:badarg -> + {skipped, "No dirty scheduler support"} + end. + +dirty_scheduler_threads_test(Config, SmpSupport) -> + {Sched, SchedOnln, _} = get_dsstate(Config, ""), + {HalfSched, HalfSchedOnln} = case SmpSupport of + false -> {1,1}; + true -> + {Sched div 2, + SchedOnln div 2} + end, + Cmd1 = "+SDcpu "++integer_to_list(HalfSched)++":"++ + integer_to_list(HalfSchedOnln), + {HalfSched, HalfSchedOnln, _} = get_dsstate(Config, Cmd1), + {HalfSched, HalfSchedOnln, _} = get_dsstate(Config, "+SDPcpu 50:50"), + IOSched = 20, + {_, _, IOSched} = get_dsstate(Config, "+SDio "++integer_to_list(IOSched)), + {ok, Node} = start_node(Config, ""), + [ok] = mcall(Node, [fun() -> dirty_schedulers_online_test() end]), + ok. + +dirty_schedulers_online_test() -> + dirty_schedulers_online_test(erlang:system_info(smp_support)). +dirty_schedulers_online_test(false) -> ok; +dirty_schedulers_online_test(true) -> + dirty_schedulers_online_smp_test(erlang:system_info(schedulers_online)). +dirty_schedulers_online_smp_test(SchedOnln) when SchedOnln < 4 -> ok; +dirty_schedulers_online_smp_test(SchedOnln) -> + DirtyCPUSchedOnln = erlang:system_info(dirty_cpu_schedulers_online), + SchedOnln = DirtyCPUSchedOnln, + HalfSchedOnln = SchedOnln div 2, + SchedOnln = erlang:system_flag(schedulers_online, HalfSchedOnln), + HalfDirtyCPUSchedOnln = DirtyCPUSchedOnln div 2, + HalfDirtyCPUSchedOnln = erlang:system_flag(schedulers_online, SchedOnln), + DirtyCPUSchedOnln = erlang:system_flag(dirty_cpu_schedulers_online, + HalfDirtyCPUSchedOnln), + HalfDirtyCPUSchedOnln = erlang:system_info(dirty_cpu_schedulers_online), + QrtrDirtyCPUSchedOnln = HalfDirtyCPUSchedOnln div 2, + SchedOnln = erlang:system_flag(schedulers_online, HalfSchedOnln), + QrtrDirtyCPUSchedOnln = erlang:system_info(dirty_cpu_schedulers_online), + ok. + get_sstate(Config, Cmd) -> {ok, Node} = start_node(Config, Cmd), [SState] = mcall(Node, [fun () -> @@ -1100,6 +1151,20 @@ get_sstate(Config, Cmd) -> stop_node(Node), SState. +get_dsstate(Config, Cmd) -> + {ok, Node} = start_node(Config, Cmd), + [DSCPU] = mcall(Node, [fun () -> + erlang:system_info(dirty_cpu_schedulers) + end]), + [DSCPUOnln] = mcall(Node, [fun () -> + erlang:system_info(dirty_cpu_schedulers_online) + end]), + [DSIO] = mcall(Node, [fun () -> + erlang:system_info(dirty_io_schedulers) + end]), + stop_node(Node), + {DSCPU, DSCPUOnln, DSIO}. + scheduler_suspend(Config) when is_list(Config) -> ?line Dog = ?t:timetrap(?t:minutes(5)), ?line lists:foreach(fun (S) -> scheduler_suspend_test(Config, S) end, @@ -1171,16 +1236,40 @@ sst2_loop(N) -> erlang:system_flag(multi_scheduling, unblock), sst2_loop(N-1). -sst3_loop(_S, 0) -> - ok; sst3_loop(S, N) -> + try erlang:system_info(dirty_cpu_schedulers) of + DS -> + sst3_loop_with_dirty_schedulers(S, DS, N) + catch + error:badarg -> + sst3_loop_normal_schedulers_only(S, N) + end. + +sst3_loop_normal_schedulers_only(_S, 0) -> + ok; +sst3_loop_normal_schedulers_only(S, N) -> + erlang:system_flag(schedulers_online, (S div 2)+1), + erlang:system_flag(schedulers_online, 1), + erlang:system_flag(schedulers_online, (S div 2)+1), + erlang:system_flag(schedulers_online, S), + erlang:system_flag(schedulers_online, 1), + erlang:system_flag(schedulers_online, S), + sst3_loop_normal_schedulers_only(S, N-1). + +sst3_loop_with_dirty_schedulers(_S, _DS, 0) -> + ok; +sst3_loop_with_dirty_schedulers(S, DS, N) -> erlang:system_flag(schedulers_online, (S div 2)+1), + erlang:system_flag(dirty_cpu_schedulers_online, (DS div 2)+1), erlang:system_flag(schedulers_online, 1), erlang:system_flag(schedulers_online, (S div 2)+1), + erlang:system_flag(dirty_cpu_schedulers_online, 1), erlang:system_flag(schedulers_online, S), + erlang:system_flag(dirty_cpu_schedulers_online, DS), erlang:system_flag(schedulers_online, 1), erlang:system_flag(schedulers_online, S), - sst3_loop(S, N-1). + erlang:system_flag(dirty_cpu_schedulers_online, DS), + sst3_loop_with_dirty_schedulers(S, DS, N-1). reader_groups(Config) when is_list(Config) -> %% White box testing. These results are correct, but other results diff --git a/erts/emulator/utils/make_version b/erts/emulator/utils/make_version index 02b68f2b39..0ba1c77930 100755 --- a/erts/emulator/utils/make_version +++ b/erts/emulator/utils/make_version @@ -39,10 +39,10 @@ if ($ARGV[0] eq '-o') { } my $release = shift; -defined $release or die "No release specified"; +defined $release or die "No otp release specified"; -my $correction_package = shift; -defined $correction_package or die "No correction package specified"; +my $otp_version = shift; +defined $otp_version or die "No otp version specified"; my $version = shift; defined $version or die "No version name specified"; @@ -56,7 +56,7 @@ open(FILE, ">$outputfile") or die "Can't create $outputfile: $!"; print FILE <<EOF; /* This file was created by 'make_version' -- don't modify. */ #define ERLANG_OTP_RELEASE "$release" -#define ERLANG_OTP_CORRECTION_PACKAGE "$correction_package" +#define ERLANG_OTP_VERSION "$otp_version" #define ERLANG_VERSION "$version" #define ERLANG_COMPILE_DATE "$time_str" #define ERLANG_ARCHITECTURE "$architecture" |