From 6a6c72801e6fb021851b4f2591b03ed136a27363 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Fri, 17 Jun 2016 12:54:26 +0200 Subject: beam_makeops: Separate static information from counters The counters are only used in the special 'icount' emulator. We will save some memory by including the counters in the OpEntry. It will also make it possible to make opc 'const'. --- erts/emulator/beam/erl_bif_info.c | 2 +- erts/emulator/beam/erl_vm.h | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 3fb866733c..e5d3f38ce4 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -2394,7 +2394,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) ERTS_ATOM_ENC_LATIN1, 1), erts_bld_uint(hpp, hszp, - opc[i].count)), + erts_instr_count[i])), res); } diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index f97716d030..22c8e8ee12 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -146,12 +146,13 @@ typedef struct op_entry { int sz; /* Number of loaded words. */ char* pack; /* Instructions for packing engine. */ char* sign; /* Signature string. */ - unsigned count; /* Number of times executed. */ } OpEntry; extern OpEntry opc[]; /* Description of all instructions. */ extern int num_instructions; /* Number of instruction in opc[]. */ +extern Uint erts_instr_count[]; + /* some constants for various table sizes etc */ #define ATOM_TEXT_SIZE 32768 /* Increment for allocating atom text space */ -- cgit v1.2.3 From 644cc778e9770b847700dca41a0c3ebe20da64df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Fri, 17 Jun 2016 05:23:08 +0200 Subject: beam_makeops: Save some memory by making loader tables 'const' Before: $ size bin/x86_64-unknown-linux-gnu/beam.smp text data bss dec hex filename 3080982 188369 158472 3427823 344def bin/x86_64-unknown-linux-gnu/beam.smp After: $ size bin/x86_64-unknown-linux-gnu/beam.smp text data bss dec hex filename 3164694 104657 158472 3427823 344def bin/x86_64-unknown-linux-gnu/beam.smp --- erts/emulator/beam/beam_load.c | 4 ++-- erts/emulator/beam/beam_load.h | 2 +- erts/emulator/beam/erl_vm.h | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 0c2743beb2..4ca2c6a13f 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -4860,7 +4860,7 @@ transform_engine(LoaderState* st) { Uint op; int ap; /* Current argument. */ - Uint* restart; /* Where to restart if current match fails. */ + const Uint* restart; /* Where to restart if current match fails. */ GenOpArg var[TE_MAX_VARS]; /* Buffer for variables. */ GenOpArg* rest_args = NULL; int num_rest_args = 0; @@ -4869,7 +4869,7 @@ transform_engine(LoaderState* st) GenOp* instr; GenOp* first = st->genop; GenOp* keep = NULL; - Uint* pc; + const Uint* pc; static Uint restart_fail[1] = {TOP_fail}; ASSERT(gen_opc[first->op].transform != -1); diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h index fd2dd97fee..92a9abddbf 100644 --- a/erts/emulator/beam/beam_load.h +++ b/erts/emulator/beam/beam_load.h @@ -35,7 +35,7 @@ typedef struct gen_op_entry { int transform; } GenOpEntry; -extern GenOpEntry gen_opc[]; +extern const GenOpEntry gen_opc[]; #ifdef NO_JUMP_TABLE #define BeamOp(Op) (Op) diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index 22c8e8ee12..60c2349f36 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -148,8 +148,8 @@ typedef struct op_entry { char* sign; /* Signature string. */ } OpEntry; -extern OpEntry opc[]; /* Description of all instructions. */ -extern int num_instructions; /* Number of instruction in opc[]. */ +extern const OpEntry opc[]; /* Description of all instructions. */ +extern const int num_instructions; /* Number of instruction in opc[]. */ extern Uint erts_instr_count[]; -- cgit v1.2.3 From f0f4e72c8ec5c08993ff84d4eac5c48897a09657 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Mon, 20 Jun 2016 13:02:59 +0200 Subject: Simplify creation of new GC BIFs Add the BIF type "gcbif" in bif.tab for defining GC BIFs. That will eliminate some of the hand-written administrative code for handling GC BIFs, saving the developer's time. --- erts/emulator/beam/beam_emu.c | 31 +++--------- erts/emulator/beam/beam_load.c | 111 +++++++++++++---------------------------- erts/emulator/beam/bif.tab | 38 +++++++------- erts/emulator/beam/global.h | 12 ----- 4 files changed, 61 insertions(+), 131 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 4716460a6b..c9fb5a3022 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -5428,31 +5428,14 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) static BifFunction translate_gc_bif(void* gcf) { - if (gcf == erts_gc_length_1) { - return length_1; - } else if (gcf == erts_gc_size_1) { - return size_1; - } else if (gcf == erts_gc_bit_size_1) { - return bit_size_1; - } else if (gcf == erts_gc_byte_size_1) { - return byte_size_1; - } else if (gcf == erts_gc_map_size_1) { - return map_size_1; - } else if (gcf == erts_gc_abs_1) { - return abs_1; - } else if (gcf == erts_gc_float_1) { - return float_1; - } else if (gcf == erts_gc_round_1) { - return round_1; - } else if (gcf == erts_gc_trunc_1) { - return round_1; - } else if (gcf == erts_gc_binary_part_2) { - return binary_part_2; - } else if (gcf == erts_gc_binary_part_3) { - return binary_part_3; - } else { - erts_exit(ERTS_ERROR_EXIT, "bad gc bif"); + const ErtsGcBif* p; + + for (p = erts_gc_bifs; p->bif != 0; p++) { + if (p->gc_bif == gcf) { + return p->bif; + } } + erts_exit(ERTS_ERROR_EXIT, "bad gc bif"); } /* diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 0c2743beb2..897b0f7e6a 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -4013,60 +4013,52 @@ gen_make_fun2(LoaderState* stp, GenOpArg idx) op->next = NULL; return op; } + +static GenOp* +translate_gc_bif(LoaderState* stp, GenOp* op, GenOpArg Bif) +{ + const ErtsGcBif* p; + BifFunction bf; + + bf = stp->import[Bif.val].bf; + for (p = erts_gc_bifs; p->bif != 0; p++) { + if (p->bif == bf) { + op->a[1].type = TAG_u; + op->a[1].val = (BeamInstr) p->gc_bif; + return op; + } + } + + op->op = genop_unsupported_guard_bif_3; + op->arity = 3; + op->a[0].type = TAG_a; + op->a[0].val = stp->import[Bif.val].module; + op->a[1].type = TAG_a; + op->a[1].val = stp->import[Bif.val].function; + op->a[2].type = TAG_u; + op->a[2].val = stp->import[Bif.val].arity; + return op; +} + /* - * Rewrite gc_bifs with one parameter (the common case). Utilized - * in ops.tab to rewrite instructions calling bif's in guards - * to use a garbage collecting implementation. + * Rewrite gc_bifs with one parameter (the common case). */ static GenOp* gen_guard_bif1(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif, GenOpArg Src, GenOpArg Dst) { GenOp* op; - BifFunction bf; NEW_GENOP(stp, op); op->next = NULL; - bf = stp->import[Bif.val].bf; - /* The translations here need to have a reverse counterpart in - beam_emu.c:translate_gc_bif for error handling to work properly. */ - if (bf == length_1) { - op->a[1].val = (BeamInstr) (void *) erts_gc_length_1; - } else if (bf == size_1) { - op->a[1].val = (BeamInstr) (void *) erts_gc_size_1; - } else if (bf == bit_size_1) { - op->a[1].val = (BeamInstr) (void *) erts_gc_bit_size_1; - } else if (bf == byte_size_1) { - op->a[1].val = (BeamInstr) (void *) erts_gc_byte_size_1; - } else if (bf == map_size_1) { - op->a[1].val = (BeamInstr) (void *) erts_gc_map_size_1; - } else if (bf == abs_1) { - op->a[1].val = (BeamInstr) (void *) erts_gc_abs_1; - } else if (bf == float_1) { - op->a[1].val = (BeamInstr) (void *) erts_gc_float_1; - } else if (bf == round_1) { - op->a[1].val = (BeamInstr) (void *) erts_gc_round_1; - } else if (bf == trunc_1) { - op->a[1].val = (BeamInstr) (void *) erts_gc_trunc_1; - } else { - op->op = genop_unsupported_guard_bif_3; - op->arity = 3; - op->a[0].type = TAG_a; - op->a[0].val = stp->import[Bif.val].module; - op->a[1].type = TAG_a; - op->a[1].val = stp->import[Bif.val].function; - op->a[2].type = TAG_u; - op->a[2].val = stp->import[Bif.val].arity; - return op; - } op->op = genop_i_gc_bif1_5; op->arity = 5; op->a[0] = Fail; - op->a[1].type = TAG_u; + /* op->a[1] is set by translate_gc_bif() */ op->a[2] = Src; op->a[3] = Live; op->a[4] = Dst; - return op; + return translate_gc_bif(stp, op, Bif); } /* @@ -4077,35 +4069,18 @@ gen_guard_bif2(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif, GenOpArg S1, GenOpArg S2, GenOpArg Dst) { GenOp* op; - BifFunction bf; NEW_GENOP(stp, op); op->next = NULL; - bf = stp->import[Bif.val].bf; - /* The translations here need to have a reverse counterpart in - beam_emu.c:translate_gc_bif for error handling to work properly. */ - if (bf == binary_part_2) { - op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_2; - } else { - op->op = genop_unsupported_guard_bif_3; - op->arity = 3; - op->a[0].type = TAG_a; - op->a[0].val = stp->import[Bif.val].module; - op->a[1].type = TAG_a; - op->a[1].val = stp->import[Bif.val].function; - op->a[2].type = TAG_u; - op->a[2].val = stp->import[Bif.val].arity; - return op; - } op->op = genop_i_gc_bif2_6; op->arity = 6; op->a[0] = Fail; - op->a[1].type = TAG_u; + /* op->a[1] is set by translate_gc_bif() */ op->a[2] = Live; op->a[3] = S1; op->a[4] = S2; op->a[5] = Dst; - return op; + return translate_gc_bif(stp, op, Bif); } /* @@ -4116,37 +4091,19 @@ gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif, GenOpArg S1, GenOpArg S2, GenOpArg S3, GenOpArg Dst) { GenOp* op; - BifFunction bf; NEW_GENOP(stp, op); op->next = NULL; - bf = stp->import[Bif.val].bf; - /* The translations here need to have a reverse counterpart in - beam_emu.c:translate_gc_bif for error handling to work properly. */ - if (bf == binary_part_3) { - op->a[1].val = (BeamInstr) (void *) erts_gc_binary_part_3; - } else { - op->op = genop_unsupported_guard_bif_3; - op->arity = 3; - op->a[0].type = TAG_a; - op->a[0].val = stp->import[Bif.val].module; - op->a[1].type = TAG_a; - op->a[1].val = stp->import[Bif.val].function; - op->a[2].type = TAG_u; - op->a[2].val = stp->import[Bif.val].arity; - return op; - } op->op = genop_ii_gc_bif3_7; op->arity = 7; op->a[0] = Fail; - op->a[1].type = TAG_u; + /* op->a[1] is set by translate_gc_bif() */ op->a[2] = Live; op->a[3] = S1; op->a[4] = S2; op->a[5] = S3; op->a[6] = Dst; - op->next = NULL; - return op; + return translate_gc_bif(stp, op, Bif); } static GenOp* diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 065018514a..978bcd4bba 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -23,22 +23,24 @@ # # Lines starting with '#' are ignored. # -# ::= "bif" * | "ubif" * +# ::= "bif" * | +# "ubif" * | +# "gcbif" * # ::= ":" "/" # -# "ubif" is an unwrapped bif, i.e. a bif without a trace wrapper, -# or rather; the trace entry point in the export entry is the same -# as the normal entry point, and no trace wrapper is generated. +# ubif: Use for operators and guard BIFs that never build anything +# on the heap (such as tuple_size/1) and operators. # -# Important: Use "ubif" for guard BIFs and operators; use "bif" for ordinary BIFs. +# gcbif: Use for guard BIFs that may build on the heap (such as abs/1). +# +# bif: Use for all other BIFs. # # Add new BIFs to the end of the file. # -# Note: Guards BIFs require special support in the compiler (to be able to actually -# call them from within a guard). +# Note: Guards BIFs usually require special support in the compiler. # -ubif erlang:abs/1 +gcbif erlang:abs/1 bif erlang:adler32/1 bif erlang:adler32/2 bif erlang:adler32_combine/3 @@ -62,7 +64,7 @@ bif erlang:exit/1 bif erlang:exit/2 bif erlang:external_size/1 bif erlang:external_size/2 -ubif erlang:float/1 +gcbif erlang:float/1 bif erlang:float_to_list/1 bif erlang:float_to_list/2 bif erlang:fun_info/2 @@ -79,7 +81,7 @@ bif erlang:phash2/2 ubif erlang:hd/1 bif erlang:integer_to_list/1 bif erlang:is_alive/0 -ubif erlang:length/1 +gcbif erlang:length/1 bif erlang:link/1 bif erlang:list_to_atom/1 bif erlang:list_to_binary/1 @@ -126,10 +128,10 @@ bif erlang:processes/0 bif erlang:put/2 bif erlang:register/2 bif erlang:registered/0 -ubif erlang:round/1 +gcbif erlang:round/1 ubif erlang:self/0 bif erlang:setelement/3 -ubif erlang:size/1 +gcbif erlang:size/1 bif erlang:spawn/3 bif erlang:spawn_link/3 bif erlang:split_binary/2 @@ -139,7 +141,7 @@ bif erlang:term_to_binary/2 bif erlang:throw/1 bif erlang:time/0 ubif erlang:tl/1 -ubif erlang:trunc/1 +gcbif erlang:trunc/1 bif erlang:tuple_to_list/1 bif erlang:universaltime/0 bif erlang:universaltime_to_localtime/1 @@ -461,8 +463,8 @@ bif erlang:list_to_existing_atom/1 # ubif erlang:is_bitstring/1 ubif erlang:tuple_size/1 -ubif erlang:byte_size/1 -ubif erlang:bit_size/1 +gcbif erlang:byte_size/1 +gcbif erlang:bit_size/1 bif erlang:list_to_bitstring/1 bif erlang:bitstring_to_list/1 @@ -514,8 +516,8 @@ bif erlang:binary_to_term/2 # # The searching/splitting/substituting thingies # -ubif erlang:binary_part/2 -ubif erlang:binary_part/3 +gcbif erlang:binary_part/2 +gcbif erlang:binary_part/3 bif binary:compile_pattern/1 bif binary:match/2 @@ -607,7 +609,7 @@ bif os:unsetenv/1 bif re:inspect/2 ubif erlang:is_map/1 -ubif erlang:map_size/1 +gcbif erlang:map_size/1 bif maps:to_list/1 bif maps:find/2 bif maps:get/2 diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index f3d4ac56cd..961260041f 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -1427,18 +1427,6 @@ Eterm erts_gc_bor(Process* p, Eterm* reg, Uint live); Eterm erts_gc_bxor(Process* p, Eterm* reg, Uint live); Eterm erts_gc_bnot(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_map_size_1(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_binary_part_3(Process* p, Eterm* reg, Uint live); -Eterm erts_gc_binary_part_2(Process* p, Eterm* reg, Uint live); - Uint erts_current_reductions(Process* current, Process *p); int erts_print_system_version(int to, void *arg, Process *c_p); -- cgit v1.2.3 From eb9766ca5d507d8cb8e1448780f96ba1cb45ff36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Wed, 22 Jun 2016 12:57:56 +0200 Subject: beam_debug: Improve the disassembly of gc_bif instructions Using the translation table in erts_gc_bifs[], we can now print out the name of GC BIFs, instead of just the pointer value. --- erts/emulator/beam/beam_debug.c | 55 ++++++++++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 14 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index a4ad3e7886..8f47ed4d9d 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -51,6 +51,7 @@ void dbg_bt(Process* p, Eterm* sp); void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg); static int print_op(int to, void *to_arg, int op, int size, BeamInstr* addr); +static void print_bif_name(int to, void* to_arg, BifFunction bif); BIF_RETTYPE erts_debug_same_2(BIF_ALIST_2) @@ -520,7 +521,27 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr) break; case 'I': /* Untagged integer. */ case 't': - erts_print(to, to_arg, "%d", *ap); + switch (op) { + case op_i_gc_bif1_jIsId: + case op_i_gc_bif2_jIIssd: + case op_i_gc_bif3_jIIssd: + { + const ErtsGcBif* p; + BifFunction gcf = (BifFunction) *ap; + for (p = erts_gc_bifs; p->bif != 0; p++) { + if (p->gc_bif == gcf) { + print_bif_name(to, to_arg, p->bif); + break; + } + } + if (p->bif == 0) { + erts_print(to, to_arg, "%d", (Uint)gcf); + } + break; + } + default: + erts_print(to, to_arg, "%d", *ap); + } ap++; break; case 'f': /* Destination label */ @@ -560,19 +581,7 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr) case 'F': /* Function definition */ break; case 'b': - for (i = 0; i < BIF_SIZE; i++) { - BifFunction bif = (BifFunction) *ap; - if (bif == bif_table[i].f) { - break; - } - } - if (i == BIF_SIZE) { - erts_print(to, to_arg, "b(%d)", (Uint) *ap); - } else { - Eterm name = bif_table[i].name; - unsigned arity = bif_table[i].arity; - erts_print(to, to_arg, "%T/%u", name, arity); - } + print_bif_name(to, to_arg, (BifFunction) *ap); ap++; break; case 'P': /* Byte offset into tuple (see beam_load.c) */ @@ -731,3 +740,21 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr) return size; } + +static void print_bif_name(int to, void* to_arg, BifFunction bif) +{ + int i; + + for (i = 0; i < BIF_SIZE; i++) { + if (bif == bif_table[i].f) { + break; + } + } + if (i == BIF_SIZE) { + erts_print(to, to_arg, "b(%d)", (Uint) bif); + } else { + Eterm name = bif_table[i].name; + unsigned arity = bif_table[i].arity; + erts_print(to, to_arg, "%T/%u", name, arity); + } +} -- cgit v1.2.3 From a2359927ca4e7c315e9849deec3375b1f2ae4170 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Tue, 26 Jul 2016 10:19:31 +0200 Subject: erts: Remove the need for copying of literals * Literals are not copied between processes for messages or spawn Increases performance of message sent and processes spawned when literals are involved in messages or arguments. --- erts/emulator/beam/copy.c | 80 ++++++++++++++++++++++++++++------------ erts/emulator/beam/erl_gc.c | 8 ---- erts/emulator/beam/erl_message.c | 19 ++++++---- erts/emulator/beam/erl_process.c | 10 +++-- erts/emulator/beam/global.h | 20 ++++++---- 5 files changed, 88 insertions(+), 49 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index ccc4cbad43..7d3777f37e 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -40,7 +40,8 @@ static void move_one_frag(Eterm** hpp, ErlHeapFragment*, ErlOffHeap*, int); /* * Copy object "obj" to process p. */ -Eterm copy_object_x(Eterm obj, Process* to, Uint extra) { +Eterm copy_object_x(Eterm obj, Process* to, Uint extra) +{ if (!is_immed(obj)) { Uint size = size_object(obj); Eterm* hp = HAllocX(to, size, extra); @@ -70,7 +71,12 @@ Eterm copy_object_x(Eterm obj, Process* to, Uint extra) { * Return the "flat" size of the object. */ -Uint size_object(Eterm obj) +#define in_literal_purge_area(PTR) \ + (lit_purge_ptr && ( \ + (lit_purge_ptr <= (PTR) && \ + (PTR) < (lit_purge_ptr + lit_purge_sz)))) + +Uint size_object_x(Eterm obj, Eterm *lit_purge_ptr, Uint lit_purge_sz, Uint litopt) { Uint sum = 0; Eterm* ptr; @@ -78,7 +84,6 @@ Uint size_object(Eterm obj) #ifdef DEBUG Eterm mypid = erts_get_current_pid(); #endif - DECLARE_ESTACK(s); VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size_object %p\n", mypid, obj)); @@ -86,17 +91,25 @@ Uint size_object(Eterm obj) for (;;) { switch (primary_tag(obj)) { case TAG_PRIMARY_LIST: - sum += 2; ptr = list_val(obj); + if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) { + goto pop_next; + } + sum += 2; obj = *ptr++; if (!IS_CONST(obj)) { ESTACK_PUSH(s, obj); - } + } obj = *ptr; break; case TAG_PRIMARY_BOXED: { - Eterm hdr = *boxed_val(obj); + Eterm hdr; + ptr = boxed_val(obj); + if (litopt && erts_is_literal(obj,ptr) && !in_literal_purge_area(ptr)) { + goto pop_next; + } + hdr = *ptr; ASSERT(is_header(hdr)); switch (hdr & _TAG_HEADER_MASK) { case ARITYVAL_SUBTAG: @@ -279,10 +292,6 @@ do { \ #define COUNT_OFF_HEAP (0) -#define IN_LITERAL_PURGE_AREA(info, ptr) \ - ((info)->range_ptr && ( \ - (info)->range_ptr <= (ptr) && \ - (ptr) < ((info)->range_ptr + (info)->range_sz))) /* * Return the real size of an object and find sharing information * This currently returns the same as erts_debug:size/1. @@ -599,7 +608,8 @@ cleanup: /* * Copy a structure to a heap. */ -Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz) +Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz, + Eterm *lit_purge_ptr, Uint lit_purge_sz, Uint litopt) { char* hstart; Uint hsize; @@ -651,7 +661,6 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint L_copy: while (hp != htop) { obj = *hp; - switch (primary_tag(obj)) { case TAG_PRIMARY_IMMED1: hp++; @@ -667,6 +676,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint L_copy_list: tailp = argp; + if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) { + *tailp = obj; + goto L_copy; + } for (;;) { tp = tailp; elem = CAR(objp); @@ -674,18 +687,23 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint hbot -= 2; CAR(hbot) = elem; tailp = &CDR(hbot); - } - else { + } else { CAR(htop) = elem; tailp = &CDR(htop); htop += 2; } *tp = make_list(tailp - 1); obj = CDR(objp); + if (!is_list(obj)) { break; } objp = list_val(obj); + + if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) { + *tailp = obj; + goto L_copy; + } } switch (primary_tag(obj)) { case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy; @@ -695,7 +713,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint "%s, line %d: Internal error in copy_struct: 0x%08x\n", __FILE__, __LINE__,obj); } - + case TAG_PRIMARY_BOXED: if (ErtsInArea(boxed_val(obj),hstart,hsize)) { hp++; @@ -705,6 +723,10 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint L_copy_boxed: objp = boxed_val(obj); + if (litopt && erts_is_literal(obj,objp) && !in_literal_purge_area(objp)) { + *argp = obj; + break; + } hdr = *objp; switch (hdr & _TAG_HEADER_MASK) { case ARITYVAL_SUBTAG: @@ -765,7 +787,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint extra_bytes = 1; } else { extra_bytes = 0; - } + } real_size = size+extra_bytes; objp = binary_val(real_bin); if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) { @@ -780,7 +802,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint } else { ProcBin* from = (ProcBin *) objp; ProcBin* to; - + ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG); if (from->flags) { erts_emasculate_writable_binary(from); @@ -900,6 +922,12 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz = hend - hbot; } else { #ifdef DEBUG + if (!eq(org_obj, res)) { + erts_exit(ERTS_ABORT_EXIT, + "Internal error in copy_struct() when copying %T:" + " not equal to copy %T\n", + org_obj, res); + } if (htop != hbot) erts_exit(ERTS_ABORT_EXIT, "Internal error in copy_struct() when copying %T:" @@ -1036,6 +1064,8 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info) Uint e; unsigned sz; Eterm* ptr; + Eterm *lit_purge_ptr = info->lit_purge_ptr; + Uint lit_purge_sz = info->lit_purge_sz; #ifdef DEBUG Eterm mypid = erts_get_current_pid(); #endif @@ -1081,7 +1111,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info) /* off heap list pointers are copied verbatim */ if (erts_is_literal(obj,ptr)) { VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj)); - if (IN_LITERAL_PURGE_AREA(info,ptr)) + if (in_literal_purge_area(ptr)) info->literal_size += size_object(obj); goto pop_next; } @@ -1132,7 +1162,7 @@ Uint copy_shared_calculate(Eterm obj, erts_shcopy_t *info) /* off heap pointers to boxes are copied verbatim */ if (erts_is_literal(obj,ptr)) { VERBOSE(DEBUG_SHCOPY, ("[pid=%T] bypassed copying %p is %T\n", mypid, ptr, obj)); - if (IN_LITERAL_PURGE_AREA(info,ptr)) + if (in_literal_purge_area(ptr)) info->literal_size += size_object(obj); goto pop_next; } @@ -1298,6 +1328,8 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, Eterm* resp; Eterm *hbot, *hend; unsigned remaining; + Eterm *lit_purge_ptr = info->lit_purge_ptr; + Uint lit_purge_sz = info->lit_purge_sz; #ifdef DEBUG Eterm mypid = erts_get_current_pid(); Eterm saved_obj = obj; @@ -1347,11 +1379,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, ptr = list_val(obj); /* off heap list pointers are copied verbatim */ if (erts_is_literal(obj,ptr)) { - if (!IN_LITERAL_PURGE_AREA(info,ptr)) { + if (!in_literal_purge_area(ptr)) { *resp = obj; } else { Uint bsz = 0; - *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz); + *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL, 0, 0); /* copy literal */ hbot -= bsz; } goto cleanup_next; @@ -1415,11 +1447,11 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, ptr = boxed_val(obj); /* off heap pointers to boxes are copied verbatim */ if (erts_is_literal(obj,ptr)) { - if (!IN_LITERAL_PURGE_AREA(info,ptr)) { + if (!in_literal_purge_area(ptr)) { *resp = obj; } else { Uint bsz = 0; - *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz); + *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL, 0, 0); /* copy literal */ hbot -= bsz; } goto cleanup_next; @@ -1923,7 +1955,7 @@ move_one_frag(Eterm** hpp, ErlHeapFragment* frag, ErlOffHeap* off_heap, int lite if (is_header(val)) { struct erl_off_heap_header* hdr = (struct erl_off_heap_header*)hp; ASSERT(ptr + header_arity(val) < end); - MOVE_BOXED(ptr, val, hp, &dummy_ref); + MOVE_BOXED(ptr, val, hp, &dummy_ref); switch (val & _HEADER_SUBTAG_MASK) { case REFC_BINARY_SUBTAG: case FUN_SUBTAG: diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index a224383493..22a7d430c8 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -2155,26 +2155,18 @@ copy_one_frag(Eterm** hpp, ErlOffHeap* off_heap, *hp++ = val; break; case TAG_PRIMARY_LIST: -#ifdef SHCOPY_SEND if (erts_is_literal(val,list_val(val))) { *hp++ = val; } else { *hp++ = offset_ptr(val, offs); } -#else - *hp++ = offset_ptr(val, offs); -#endif break; case TAG_PRIMARY_BOXED: -#ifdef SHCOPY_SEND if (erts_is_literal(val,boxed_val(val))) { *hp++ = val; } else { *hp++ = offset_ptr(val, offs); } -#else - *hp++ = offset_ptr(val, offs); -#endif break; case TAG_PRIMARY_HEADER: *hp++ = val; diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 91e06cde2d..14ee07a304 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -696,6 +696,9 @@ erts_send_message(Process* sender, erts_aint32_t receiver_state; #ifdef SHCOPY_SEND erts_shcopy_t info; +#else + Eterm *lit_purge_ptr = erts_clrange.ptr; + Uint lit_purge_sz = erts_clrange.sz; #endif #ifdef USE_VM_PROBES @@ -725,7 +728,7 @@ erts_send_message(Process* sender, */ if (have_seqtrace(stoken)) { seq_trace_update_send(sender); - seq_trace_output(stoken, message, SEQ_TRACE_SEND, + seq_trace_output(stoken, message, SEQ_TRACE_SEND, receiver->common.id, sender); seq_trace_size = 6; /* TUPLE5 */ } @@ -741,7 +744,7 @@ erts_send_message(Process* sender, INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else - msize = size_object(message); + msize = size_object_litopt(message, lit_purge_ptr, lit_purge_sz); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, @@ -760,7 +763,8 @@ erts_send_message(Process* sender, DESTROY_SHCOPY(info); #else if (is_not_immed(message)) - message = copy_struct(message, msize, &hp, ohp); + message = copy_struct_litopt(message, msize, &hp, ohp, + lit_purge_ptr, lit_purge_sz); #endif if (is_immed(stoken)) token = stoken; @@ -796,7 +800,7 @@ erts_send_message(Process* sender, INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else - msize = size_object(message); + msize = size_object_litopt(message, lit_purge_ptr, lit_purge_sz); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, @@ -810,7 +814,8 @@ erts_send_message(Process* sender, DESTROY_SHCOPY(info); #else if (is_not_immed(message)) - message = copy_struct(message, msize, &hp, ohp); + message = copy_struct_litopt(message, msize, &hp, ohp, + lit_purge_ptr, lit_purge_sz); #endif } #ifdef USE_VM_PROBES @@ -998,8 +1003,8 @@ erts_move_messages_off_heap(Process *c_p) hp = hfrag->mem; if (is_not_immed(ERL_MESSAGE_TERM(mp))) ERL_MESSAGE_TERM(mp) = copy_struct(ERL_MESSAGE_TERM(mp), - msg_sz, &hp, - &hfrag->off_heap); + msg_sz, &hp, + &hfrag->off_heap); if (is_not_immed(ERL_MESSAGE_TOKEN(mp))) ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp), token_sz, &hp, diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index b5d8c5bc75..d6037e9f5d 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -11099,6 +11099,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #ifdef SHCOPY_SPAWN erts_shcopy_t info; INITIALIZE_SHCOPY(info); +#else + Eterm *lit_purge_ptr = erts_clrange.ptr; + Uint lit_purge_sz = erts_clrange.sz; #endif erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); @@ -11157,7 +11160,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #ifdef SHCOPY_SPAWN arg_size = copy_shared_calculate(args, &info); #else - arg_size = size_object(args); + arg_size = size_object_litopt(args, lit_purge_ptr, lit_purge_sz); #endif heap_need = arg_size; @@ -11181,7 +11184,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). } p->schedule_count = 0; ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0)); - + p->u.initial[INITIAL_MOD] = mod; p->u.initial[INITIAL_FUN] = func; p->u.initial[INITIAL_ARI] = (Uint) arity; @@ -11239,7 +11242,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap); DESTROY_SHCOPY(info); #else - p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap); + p->arg_reg[2] = copy_struct_litopt(args, arg_size, &p->htop, &p->off_heap, + lit_purge_ptr, lit_purge_sz); #endif p->arity = 3; diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index bf2e50e52f..67ce0d0723 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -1085,8 +1085,8 @@ typedef struct { Eterm* shtable_start; ErtsAlcType_t shtable_alloc_type; Uint literal_size; - Eterm *range_ptr; - Uint range_sz; + Eterm *lit_purge_ptr; + Uint lit_purge_sz; } erts_shcopy_t; #define INITIALIZE_SHCOPY(info) \ @@ -1095,8 +1095,8 @@ do { \ info.bitstore_start = info.bitstore_default; \ info.shtable_start = info.shtable_default; \ info.literal_size = 0; \ - info.range_ptr = erts_clrange.ptr; \ - info.range_sz = erts_clrange.sz; \ + info.lit_purge_ptr = erts_clrange.ptr; \ + info.lit_purge_sz = erts_clrange.sz; \ } while(0) #define DESTROY_SHCOPY(info) \ @@ -1116,15 +1116,21 @@ do { \ Eterm copy_object_x(Eterm, Process*, Uint); #define copy_object(Term, Proc) copy_object_x(Term,Proc,0) -Uint size_object(Eterm); +Uint size_object_x(Eterm,Eterm*,Uint,Uint); +#define size_object(Term) size_object_x(Term,NULL,0,0) +#define size_object_litopt(Term,LitPtr,LitSz) size_object_x(Term,LitPtr,LitSz,1) + Uint copy_shared_calculate(Eterm, erts_shcopy_t*); Eterm copy_shared_perform(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*); Uint size_shared(Eterm); -Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint* bsz); +Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint* bsz, Eterm *lit_ptr, Uint lit_sz, Uint litopt); #define copy_struct(Obj,Sz,HPP,OH) \ - copy_struct_x(Obj,Sz,HPP,OH,NULL) + copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL,0,0) +#define copy_struct_litopt(Obj,Sz,HPP,OH,LitPtr,LitSz) \ + copy_struct_x(Obj,Sz,HPP,OH,NULL,LitPtr,LitSz,1) + Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*); void erts_move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first, -- cgit v1.2.3 From a9368b3c1ee78a3c83fcff83590a3d94ea4ddbe0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Wed, 27 Jul 2016 17:00:32 +0200 Subject: erts: Fix literal size calculation in check_process_code We want to know the total size of literals of all heap fragmens. --- erts/emulator/beam/beam_bif_load.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 15e878ba65..2a165bed00 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -873,12 +873,12 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls continue; { ErlHeapFragment *hf; - Uint lit_sz; + Uint lit_sz = 0; for (hf=hfrag; hf; hf = hf->next) { if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)) return am_true; - lit_sz = hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size], - literals, lit_bsize); + lit_sz += hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size], + literals, lit_bsize); } if (lit_sz > 0) { ErlHeapFragment *bp = new_message_buffer(lit_sz); -- cgit v1.2.3 From 13ec43880b8565a5ddd7da590c36a520d4ff4b26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Fri, 29 Jul 2016 12:19:11 +0200 Subject: erts: Don't copy literals in enif_send --- erts/emulator/beam/erl_nif.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index c6127a4967..e1944fff29 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -627,7 +627,9 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, MBUF(&menv->phony_proc) = NULL; } } else { - Uint sz = size_object(msg); + Eterm *lit_purge_ptr = erts_clrange.ptr; + Uint lit_purge_sz = erts_clrange.sz; + Uint sz = size_object_litopt(msg, lit_purge_ptr, lit_purge_sz); ErlOffHeap *ohp; Eterm *hp; if (env && !env->tracee) { @@ -649,7 +651,8 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ohp = &bp->off_heap; } } - msg = copy_struct(msg, sz, &hp, ohp); + msg = copy_struct_litopt(msg, sz, &hp, ohp, + lit_purge_ptr, lit_purge_sz); } ERL_MESSAGE_TERM(mp) = msg; -- cgit v1.2.3 From dbea5c4023ec6ac59bb1988711532682a032536d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Fri, 29 Jul 2016 15:59:16 +0200 Subject: erts: No need to literal opt message token --- erts/emulator/beam/erl_gc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 22a7d430c8..cddcc847a3 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -2252,10 +2252,12 @@ move_msgq_to_heap(Process *p) ASSERT(mp->data.dist_ext->heap_size >= 0); if (is_not_nil(ERL_MESSAGE_TOKEN(mp))) { bp = erts_dist_ext_trailer(mp->data.dist_ext); + /* Tokens does not use literal optimization */ ERL_MESSAGE_TOKEN(mp) = copy_struct(ERL_MESSAGE_TOKEN(mp), bp->used_size, - &factory.hp, - factory.off_heap); + &factory.hp, + factory.off_heap); + erts_cleanup_offheap(&bp->off_heap); } ERL_MESSAGE_TERM(mp) = erts_decode_dist_ext(&factory, -- cgit v1.2.3 From e52fa24451306080be39a35444cf52210982b41a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Fri, 29 Jul 2016 17:16:53 +0200 Subject: erts: Refactor literal purge area arguments --- erts/emulator/beam/copy.c | 14 ++++++++------ erts/emulator/beam/erl_message.c | 14 ++++++-------- erts/emulator/beam/erl_nif.c | 10 +++++----- erts/emulator/beam/erl_process.c | 9 ++++----- erts/emulator/beam/global.h | 25 ++++++++++++++++++------- 5 files changed, 41 insertions(+), 31 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index 7d3777f37e..c4dcd6a3cc 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -76,16 +76,17 @@ Eterm copy_object_x(Eterm obj, Process* to, Uint extra) (lit_purge_ptr <= (PTR) && \ (PTR) < (lit_purge_ptr + lit_purge_sz)))) -Uint size_object_x(Eterm obj, Eterm *lit_purge_ptr, Uint lit_purge_sz, Uint litopt) +Uint size_object_x(Eterm obj, erts_literal_area_t *litopt) { Uint sum = 0; Eterm* ptr; int arity; + Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL; + Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0; #ifdef DEBUG Eterm mypid = erts_get_current_pid(); #endif DECLARE_ESTACK(s); - VERBOSE(DEBUG_SHCOPY, ("[pid=%T] size_object %p\n", mypid, obj)); for (;;) { @@ -608,8 +609,7 @@ cleanup: /* * Copy a structure to a heap. */ -Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz, - Eterm *lit_purge_ptr, Uint lit_purge_sz, Uint litopt) +Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint *bsz, erts_literal_area_t *litopt) { char* hstart; Uint hsize; @@ -626,6 +626,8 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint Eterm hdr; Eterm *hend; int i; + Eterm *lit_purge_ptr = litopt ? litopt->lit_purge_ptr : NULL; + Uint lit_purge_sz = litopt ? litopt->lit_purge_sz : 0; #ifdef DEBUG Eterm org_obj = obj; Uint org_sz = sz; @@ -1383,7 +1385,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, *resp = obj; } else { Uint bsz = 0; - *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL, 0, 0); /* copy literal */ + *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */ hbot -= bsz; } goto cleanup_next; @@ -1451,7 +1453,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, *resp = obj; } else { Uint bsz = 0; - *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL, 0, 0); /* copy literal */ + *resp = copy_struct_x(obj, hbot - hp, &hp, off_heap, &bsz, NULL); /* copy literal */ hbot -= bsz; } goto cleanup_next; diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 14ee07a304..e9f0586edd 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -697,8 +697,8 @@ erts_send_message(Process* sender, #ifdef SHCOPY_SEND erts_shcopy_t info; #else - Eterm *lit_purge_ptr = erts_clrange.ptr; - Uint lit_purge_sz = erts_clrange.sz; + erts_literal_area_t litarea; + INITIALIZE_LITERAL_PURGE_AREA(litarea); #endif #ifdef USE_VM_PROBES @@ -744,7 +744,7 @@ erts_send_message(Process* sender, INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else - msize = size_object_litopt(message, lit_purge_ptr, lit_purge_sz); + msize = size_object_litopt(message, &litarea); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, @@ -763,8 +763,7 @@ erts_send_message(Process* sender, DESTROY_SHCOPY(info); #else if (is_not_immed(message)) - message = copy_struct_litopt(message, msize, &hp, ohp, - lit_purge_ptr, lit_purge_sz); + message = copy_struct_litopt(message, msize, &hp, ohp, &litarea); #endif if (is_immed(stoken)) token = stoken; @@ -800,7 +799,7 @@ erts_send_message(Process* sender, INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else - msize = size_object_litopt(message, lit_purge_ptr, lit_purge_sz); + msize = size_object_litopt(message, &litarea); #endif mp = erts_alloc_message_heap_state(receiver, &receiver_state, @@ -814,8 +813,7 @@ erts_send_message(Process* sender, DESTROY_SHCOPY(info); #else if (is_not_immed(message)) - message = copy_struct_litopt(message, msize, &hp, ohp, - lit_purge_ptr, lit_purge_sz); + message = copy_struct_litopt(message, msize, &hp, ohp, &litarea); #endif } #ifdef USE_VM_PROBES diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index e1944fff29..6bd27d11f4 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -627,11 +627,12 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, MBUF(&menv->phony_proc) = NULL; } } else { - Eterm *lit_purge_ptr = erts_clrange.ptr; - Uint lit_purge_sz = erts_clrange.sz; - Uint sz = size_object_litopt(msg, lit_purge_ptr, lit_purge_sz); + erts_literal_area_t litarea; ErlOffHeap *ohp; Eterm *hp; + Uint sz; + INITIALIZE_LITERAL_PURGE_AREA(litarea); + sz = size_object_litopt(msg, &litarea); if (env && !env->tracee) { flush_env(env); mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp); @@ -651,8 +652,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ohp = &bp->off_heap; } } - msg = copy_struct_litopt(msg, sz, &hp, ohp, - lit_purge_ptr, lit_purge_sz); + msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea); } ERL_MESSAGE_TERM(mp) = msg; diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index d6037e9f5d..85c37e08be 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -11100,8 +11100,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). erts_shcopy_t info; INITIALIZE_SHCOPY(info); #else - Eterm *lit_purge_ptr = erts_clrange.ptr; - Uint lit_purge_sz = erts_clrange.sz; + erts_literal_area_t litarea; + INITIALIZE_LITERAL_PURGE_AREA(litarea); #endif erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); @@ -11160,7 +11160,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #ifdef SHCOPY_SPAWN arg_size = copy_shared_calculate(args, &info); #else - arg_size = size_object_litopt(args, lit_purge_ptr, lit_purge_sz); + arg_size = size_object_litopt(args, &litarea); #endif heap_need = arg_size; @@ -11242,8 +11242,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap); DESTROY_SHCOPY(info); #else - p->arg_reg[2] = copy_struct_litopt(args, arg_size, &p->htop, &p->off_heap, - lit_purge_ptr, lit_purge_sz); + p->arg_reg[2] = copy_struct_litopt(args, arg_size, &p->htop, &p->off_heap, &litarea); #endif p->arity = 3; diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 67ce0d0723..1423973739 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -1113,23 +1113,34 @@ do { \ } while(0) /* copy.c */ +typedef struct { + Eterm *lit_purge_ptr; + Uint lit_purge_sz; +} erts_literal_area_t; + +#define INITIALIZE_LITERAL_PURGE_AREA(Area) \ + do { \ + (Area).lit_purge_ptr = erts_clrange.ptr; \ + (Area).lit_purge_sz = erts_clrange.sz; \ + } while(0) + Eterm copy_object_x(Eterm, Process*, Uint); #define copy_object(Term, Proc) copy_object_x(Term,Proc,0) -Uint size_object_x(Eterm,Eterm*,Uint,Uint); -#define size_object(Term) size_object_x(Term,NULL,0,0) -#define size_object_litopt(Term,LitPtr,LitSz) size_object_x(Term,LitPtr,LitSz,1) +Uint size_object_x(Eterm, erts_literal_area_t*); +#define size_object(Term) size_object_x(Term,NULL) +#define size_object_litopt(Term,LitArea) size_object_x(Term,LitArea) Uint copy_shared_calculate(Eterm, erts_shcopy_t*); Eterm copy_shared_perform(Eterm, Uint, erts_shcopy_t*, Eterm**, ErlOffHeap*); Uint size_shared(Eterm); -Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint* bsz, Eterm *lit_ptr, Uint lit_sz, Uint litopt); +Eterm copy_struct_x(Eterm, Uint, Eterm**, ErlOffHeap*, Uint*, erts_literal_area_t*); #define copy_struct(Obj,Sz,HPP,OH) \ - copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL,0,0) -#define copy_struct_litopt(Obj,Sz,HPP,OH,LitPtr,LitSz) \ - copy_struct_x(Obj,Sz,HPP,OH,NULL,LitPtr,LitSz,1) + copy_struct_x(Obj,Sz,HPP,OH,NULL,NULL) +#define copy_struct_litopt(Obj,Sz,HPP,OH,LitArea) \ + copy_struct_x(Obj,Sz,HPP,OH,NULL,LitArea) Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*); -- cgit v1.2.3 From 58d8a3ac4d491c8cd962ac0839a56cd1a0e339f9 Mon Sep 17 00:00:00 2001 From: Dmytro Lytovchenko Date: Mon, 13 Jun 2016 12:05:55 +0200 Subject: Option to erlang:garbage_collect to request minor (generational) GC Note: Minor GC option is a hint, and GC may still decide to run fullsweep. Test case for major and minor gc on self Test case for major and minor gs on some other process + async gc test check docs fix --- erts/emulator/beam/atom.names | 2 ++ erts/emulator/beam/bif.c | 19 ++++++++++++++++++- erts/emulator/beam/bif.tab | 1 + erts/emulator/beam/erl_process.c | 23 ++++++++++++++++------- erts/emulator/beam/ops.tab | 4 ++++ 5 files changed, 41 insertions(+), 8 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index badd69856e..f88aeba49f 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -361,6 +361,7 @@ atom long_schedule atom low atom Lt='<' atom machine +atom major atom match atom match_limit atom match_limit_recursion @@ -388,6 +389,7 @@ atom microstate_accounting atom milli_seconds atom min_heap_size atom min_bin_vheap_size +atom minor atom minor_version atom Minus='-' atom module diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index d9048065c8..84eab5f651 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -3869,7 +3869,24 @@ BIF_RETTYPE garbage_collect_0(BIF_ALIST_0) { FLAGS(BIF_P) |= F_NEED_FULLSWEEP; erts_garbage_collect(BIF_P, 0, NULL, 0); - BIF_RET(am_true); + return am_true; +} + +/* + * Pass atom 'minor' for relaxed generational GC run. This is only + * recommendation, major run may still be chosen by VM. + * Pass atom 'major' for default behaviour - major GC run (fullsweep) + */ +BIF_RETTYPE +erts_internal_garbage_collect_1(BIF_ALIST_1) +{ + switch (BIF_ARG_1) { + case am_minor: break; + case am_major: FLAGS(BIF_P) |= F_NEED_FULLSWEEP; break; + default: BIF_ERROR(BIF_P, BADARG); + } + erts_garbage_collect(BIF_P, 0, NULL, 0); + return am_true; } /**********************************************************************/ diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 978bcd4bba..5bb7db5d47 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -69,6 +69,7 @@ bif erlang:float_to_list/1 bif erlang:float_to_list/2 bif erlang:fun_info/2 bif erlang:garbage_collect/0 +bif erts_internal:garbage_collect/1 bif erlang:get/0 bif erlang:get/1 bif erlang:get_keys/1 diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 85c37e08be..a0c5e247d4 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -446,7 +446,8 @@ int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP; #endif typedef enum { - ERTS_PSTT_GC, /* Garbage Collect */ + ERTS_PSTT_GC_MAJOR, /* Garbage Collect: Fullsweep */ + ERTS_PSTT_GC_MINOR, /* Garbage Collect: Generational */ ERTS_PSTT_CPC, /* Check Process Code */ ERTS_PSTT_COHMQ, /* Change off heap message queue */ ERTS_PSTT_FTMQ /* Flush trace msg queue */ @@ -10358,7 +10359,8 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) break; switch (st->type) { - case ERTS_PSTT_GC: + case ERTS_PSTT_GC_MAJOR: + case ERTS_PSTT_GC_MINOR: if (c_p->flags & F_DISABLE_GC) { save_gc_task(c_p, st, st_prio); st = NULL; @@ -10366,7 +10368,9 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) } else { if (!garbage_collected) { - FLAGS(c_p) |= F_NEED_FULLSWEEP; + if (st->type == ERTS_PSTT_GC_MAJOR) { + FLAGS(c_p) |= F_NEED_FULLSWEEP; + } reds -= scheduler_gc_proc(c_p, reds); garbage_collected = 1; } @@ -10442,7 +10446,8 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) break; switch (st->type) { - case ERTS_PSTT_GC: + case ERTS_PSTT_GC_MAJOR: + case ERTS_PSTT_GC_MINOR: st_res = am_false; break; case ERTS_PSTT_CPC: @@ -10555,9 +10560,13 @@ erts_internal_request_system_task_3(BIF_ALIST_3) switch (req_type) { case am_garbage_collect: - st->type = ERTS_PSTT_GC; - noproc_res = am_false; - if (!rp) + switch (st->arg[0]) { + case am_minor: st->type = ERTS_PSTT_GC_MINOR; break; + case am_major: st->type = ERTS_PSTT_GC_MAJOR; break; + default: goto badarg; + } + noproc_res = am_false; + if (!rp) goto noproc; break; diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index 879daaca0a..801d43e957 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -811,6 +811,10 @@ call_ext u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext Bif call_ext_last u==0 Bif=u$bif:erlang:garbage_collect/0 D => i_call_ext_last Bif D call_ext_only u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext_only Bif +call_ext u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext Bif +call_ext_last u==1 Bif=u$bif:erts_internal:garbage_collect/1 D => i_call_ext_last Bif D +call_ext_only u==1 Bif=u$bif:erts_internal:garbage_collect/1 => i_call_ext_only Bif + # # put/2 and erase/1 must be able to do garbage collection, so we must call # them like functions. -- cgit v1.2.3 From 6cba7d35d3322db8acc25c45889c2b03f1b2f4c2 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Fri, 19 Aug 2016 20:16:20 +0200 Subject: erts: Add ErtsContainerStruct_ for array members that otherwise may produce warning from compilers that think T* and T[] are incompatible types (?). --- erts/emulator/beam/sys.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index dfe82cab44..2d0628f70e 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -99,6 +99,10 @@ #define ErtsContainerStruct(ptr, type, member) \ ((type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member))) +/* Use this variant when the member is an array */ +#define ErtsContainerStruct_(ptr, type, memberv) \ + ((type *)((char *)(1 ? (ptr) : ((type *)0)->memberv) - offsetof(type, memberv))) + #if defined (__WIN32__) # include "erl_win_sys.h" #else -- cgit v1.2.3 From 1b4a59c405e6bd3532921d5c534e2264bb05b2eb Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Mon, 29 Aug 2016 19:21:53 +0200 Subject: Remove old purge strategy --- erts/emulator/beam/beam_bif_load.c | 324 ++---------------------------------- erts/emulator/beam/bif.tab | 2 +- erts/emulator/beam/erl_bif_info.c | 21 --- erts/emulator/beam/erl_init.c | 2 - erts/emulator/beam/erl_lock_check.c | 2 - erts/emulator/beam/erl_process.c | 11 -- erts/emulator/beam/global.h | 8 +- erts/emulator/beam/ops.tab | 8 +- 8 files changed, 16 insertions(+), 362 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 5c28fb1a98..10759c1f57 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -61,7 +61,7 @@ ErtsLiteralArea *erts_copy_literal_area = NULL; #ifdef ERTS_DIRTY_SCHEDULERS Process *erts_dirty_process_code_checker; #endif -#ifdef ERTS_NEW_PURGE_STRATEGY + Process *erts_literal_area_collector = NULL; typedef struct ErtsLiteralAreaRef_ ErtsLiteralAreaRef; @@ -75,10 +75,9 @@ struct { ErtsLiteralAreaRef *first; ErtsLiteralAreaRef *last; } release_literal_areas; -#endif static void set_default_trace_pattern(Eterm module); -static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls); +static Eterm check_process_code(Process* rp, Module* modp, int *redsp, int fcalls); static void delete_code(Module* modp); static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size); static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size); @@ -107,11 +106,9 @@ init_purge_state(void) void erts_beam_bif_load_init(void) { -#ifdef ERTS_NEW_PURGE_STRATEGY erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas"); release_literal_areas.first = NULL; release_literal_areas.last = NULL; -#endif init_purge_state(); } @@ -539,7 +536,7 @@ check_old_code_1(BIF_ALIST_1) } Eterm -erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls) +erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls) { Module* modp; Eterm res; @@ -554,31 +551,23 @@ erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int if (!modp) return am_false; erts_rlock_old_code(code_ix); - res = (!modp->old.code_hdr ? am_false : - check_process_code(c_p, modp, flags, redsp, fcalls)); + res = (!modp->old.code_hdr + ? am_false + : check_process_code(c_p, modp, redsp, fcalls)); erts_runlock_old_code(code_ix); return res; } -BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2) +BIF_RETTYPE erts_internal_check_process_code_1(BIF_ALIST_1) { int reds = 0; - Uint flags; Eterm res; if (is_not_atom(BIF_ARG_1)) goto badarg; - if (is_not_small(BIF_ARG_2)) - goto badarg; - - flags = unsigned_val(BIF_ARG_2); - if (flags & ~ERTS_CPC_ALL) { - goto badarg; - } - - res = erts_check_process_code(BIF_P, BIF_ARG_1, flags, &reds, BIF_P->fcalls); + res = erts_check_process_code(BIF_P, BIF_ARG_1, &reds, BIF_P->fcalls); ASSERT(is_value(res)); @@ -614,7 +603,7 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) if (!rp) BIF_RET(am_false); - res = erts_check_process_code(rp, BIF_ARG_2, 0, &reds, BIF_P->fcalls); + res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls); if (BIF_P != rp) erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); @@ -857,32 +846,12 @@ set_default_trace_pattern(Eterm module) } } -#ifndef ERTS_NEW_PURGE_STRATEGY - -static ERTS_INLINE int -check_mod_funs(Process *p, ErlOffHeap *off_heap, char *area, size_t area_size) -{ - struct erl_off_heap_header* oh; - for (oh = off_heap->first; oh; oh = oh->next) { - if (thing_subtag(oh->thing_word) == FUN_SUBTAG) { - ErlFunThing* funp = (ErlFunThing*) oh; - if (ErtsInArea(funp->fe->address, area, area_size)) - return !0; - } - } - return 0; -} - -#endif - static Uint hfrag_literal_size(Eterm* start, Eterm* end, char* lit_start, Uint lit_size); static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp, Eterm *start, Eterm *end, char *lit_start, Uint lit_size); -#ifdef ERTS_NEW_PURGE_STRATEGY - Eterm erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed) { @@ -1044,7 +1013,7 @@ literal_gc: } static Eterm -check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls) +check_process_code(Process* rp, Module* modp, int *redsp, int fcalls) { BeamInstr* start; char* mod_start; @@ -1109,253 +1078,6 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls return am_false; } -#else /* !ERTS_NEW_PURGE_STRATEGY, i.e, old style purge... */ - -static Eterm -check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls) -{ - BeamInstr* start; - char* literals; - Uint lit_bsize; - char* mod_start; - Uint mod_size; - Eterm* sp; - int done_gc = 0; - int need_gc = 0; - ErtsMessage *msgp; - ErlHeapFragment *hfrag; - -#define ERTS_ORDINARY_GC__ (1 << 0) -#define ERTS_LITERAL_GC__ (1 << 1) - - /* - * Pick up limits for the module. - */ - start = (BeamInstr*) modp->old.code_hdr; - mod_start = (char *) start; - mod_size = modp->old.code_length; - - /* - * Check if current instruction or continuation pointer points into module. - */ - if (ErtsInArea(rp->i, mod_start, mod_size) - || ErtsInArea(rp->cp, mod_start, mod_size)) { - return am_true; - } - - /* - * Check all continuation pointers stored on the stack. - */ - for (sp = rp->stop; sp < STACK_START(rp); sp++) { - if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) { - return am_true; - } - } - - /* - * Check all continuation pointers stored in stackdump - * and clear exception stackdump if there is a pointer - * to the module. - */ - if (rp->ftrace != NIL) { - struct StackTrace *s; - ASSERT(is_list(rp->ftrace)); - s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace))); - if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) || - (s->current && ErtsInArea(s->current, mod_start, mod_size))) { - rp->freason = EXC_NULL; - rp->fvalue = NIL; - rp->ftrace = NIL; - } else { - int i; - for (i = 0; i < s->depth; i++) { - if (ErtsInArea(s->trace[i], mod_start, mod_size)) { - rp->freason = EXC_NULL; - rp->fvalue = NIL; - rp->ftrace = NIL; - break; - } - } - } - } - - if (rp->flags & F_DISABLE_GC) { - /* - * Cannot proceed. Process has disabled gc in order to - * safely leave inconsistent data on the heap and/or - * off heap lists. Need to wait for gc to be enabled - * again. - */ - return THE_NON_VALUE; - } - - /* - * Message queue can contains funs, and may contain - * literals. If we got references to this module from the message - * queue. - * - * If a literal is in the message queue we maka an explicit copy of - * and attach it to the heap fragment. Each message needs to be - * self contained, we cannot save the literal in the old_heap or - * any other heap than the message it self. - */ - - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_MSGQ); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ); - - if (modp->old.code_hdr->literal_area) { - literals = (char*) modp->old.code_hdr->literal_area->start; - lit_bsize = (char*) modp->old.code_hdr->literal_area->end - literals; - } - else { - literals = NULL; - lit_bsize = 0; - } - - for (msgp = rp->msg.first; msgp; msgp = msgp->next) { - if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) - hfrag = &msgp->hfrag; - else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag) - hfrag = msgp->data.heap_frag; - else - continue; - { - ErlHeapFragment *hf; - Uint lit_sz = 0; - for (hf=hfrag; hf; hf = hf->next) { - if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)) - return am_true; - lit_sz += hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size], - literals, lit_bsize); - } - if (lit_sz > 0) { - ErlHeapFragment *bp = new_message_buffer(lit_sz); - Eterm *hp = bp->mem; - - for (hf=hfrag; hf; hf = hf->next) { - hfrag_literal_copy(&hp, &bp->off_heap, - &hf->mem[0], &hf->mem[hf->used_size], - literals, lit_bsize); - hfrag=hf; - } - /* link new hfrag last */ - ASSERT(hfrag->next == NULL); - hfrag->next = bp; - bp->next = NULL; - } - } - } - - while (1) { - - /* Check heap, stack etc... */ - if (check_mod_funs(rp, &rp->off_heap, mod_start, mod_size)) - goto try_gc; - if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, literals, lit_bsize)) { - rp->freason = EXC_NULL; - rp->fvalue = NIL; - rp->ftrace = NIL; - } - if (any_heap_ref_ptrs(rp->stop, rp->hend, literals, lit_bsize)) - goto try_literal_gc; -#ifdef HIPE - if (nstack_any_heap_ref_ptrs(rp, literals, lit_bsize)) - goto try_literal_gc; -#endif - if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize)) - goto try_literal_gc; - if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize)) - goto try_literal_gc; - - /* Check dictionary */ - if (rp->dictionary) { - Eterm* start = ERTS_PD_START(rp->dictionary); - Eterm* end = start + ERTS_PD_SIZE(rp->dictionary); - - if (any_heap_ref_ptrs(start, end, literals, lit_bsize)) - goto try_literal_gc; - } - - /* Check heap fragments */ - for (hfrag = rp->mbuf; hfrag; hfrag = hfrag->next) { - Eterm *hp, *hp_end; - /* Off heap lists should already have been moved into process */ - ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)); - - hp = &hfrag->mem[0]; - hp_end = &hfrag->mem[hfrag->used_size]; - if (any_heap_refs(hp, hp_end, literals, lit_bsize)) - goto try_literal_gc; - } - - /* - * Message buffer fragments (matched messages) - * - off heap lists should already have been moved into - * process off heap structure. - * - Check for literals - */ - for (msgp = rp->msg_frag; msgp; msgp = msgp->next) { - hfrag = erts_message_to_heap_frag(msgp); - for (; hfrag; hfrag = hfrag->next) { - Eterm *hp, *hp_end; - ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size)); - - hp = &hfrag->mem[0]; - hp_end = &hfrag->mem[hfrag->used_size]; - - if (any_heap_refs(hp, hp_end, literals, lit_bsize)) - goto try_literal_gc; - } - } - - return am_false; - - try_literal_gc: - need_gc |= ERTS_LITERAL_GC__; - - try_gc: - need_gc |= ERTS_ORDINARY_GC__; - - if ((done_gc & need_gc) == need_gc) - return am_true; - - if (!(flags & ERTS_CPC_ALLOW_GC)) - return am_aborted; - - need_gc &= ~done_gc; - - /* - * Try to get rid of literals by by garbage collecting. - * Clear both fvalue and ftrace. - */ - - rp->freason = EXC_NULL; - rp->fvalue = NIL; - rp->ftrace = NIL; - - if (need_gc & ERTS_ORDINARY_GC__) { - FLAGS(rp) |= F_NEED_FULLSWEEP; - *redsp += erts_garbage_collect_nobump(rp, 0, rp->arg_reg, rp->arity, fcalls); - done_gc |= ERTS_ORDINARY_GC__; - } - if (need_gc & ERTS_LITERAL_GC__) { - struct erl_off_heap_header* oh; - oh = modp->old.code_hdr->literal_area->off_heap; - *redsp += lit_bsize / 64; /* Need, better value... */ - erts_garbage_collect_literals(rp, (Eterm*)literals, lit_bsize, oh); - done_gc |= ERTS_LITERAL_GC__; - } - need_gc = 0; - } - -#undef ERTS_ORDINARY_GC__ -#undef ERTS_LITERAL_GC__ - -} - -#endif /* !ERTS_NEW_PURGE_STRATEGY */ - static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size) { @@ -1484,8 +1206,6 @@ hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp, } } -#ifdef ERTS_NEW_PURGE_STRATEGY - ErtsThrPrgrLaterOp later_literal_area_switch; #ifdef ERTS_SMP @@ -1499,13 +1219,8 @@ complete_literal_area_switch(void *unused) } #endif -#endif /* ERTS_NEW_PURGE_STRATEGY */ - BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) { -#ifndef ERTS_NEW_PURGE_STRATEGY - BIF_ERROR(BIF_P, EXC_NOTSUP); -#else ErtsLiteralAreaRef *la_ref; if (BIF_P != erts_literal_area_collector) @@ -1544,7 +1259,6 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) ERTS_BIF_YIELD_RETURN(BIF_P, am_true); #endif -#endif /* ERTS_NEW_PURGE_STRATEGY */ } void @@ -1722,10 +1436,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); erts_fun_purge_prepare(code, end); -#if !defined(ERTS_NEW_PURGE_STRATEGY) - ASSERT(!erts_copy_literal_area); - erts_copy_literal_area = modp->old.code_hdr->literal_area; -#endif } erts_runlock_old_code(code_ix); } @@ -1765,10 +1475,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix); -#if !defined(ERTS_NEW_PURGE_STRATEGY) - ASSERT(erts_copy_literal_area); - erts_copy_literal_area = NULL; -#endif #ifndef ERTS_SMP erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix); finalize_purge_operation(BIF_P, 0); @@ -1877,14 +1583,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) finalize_purge_operation(BIF_P, ret == am_true); -#if !defined(ERTS_NEW_PURGE_STRATEGY) - - ASSERT(erts_copy_literal_area == literals); - erts_copy_literal_area = NULL; - erts_release_literal_area(literals); - -#else /* ERTS_NEW_PURGE_STRATEGY */ - if (literals) { ErtsLiteralAreaRef *ref; ref = erts_alloc(ERTS_ALC_T_LITERAL_REF, @@ -1908,8 +1606,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) BIF_P->common.id); } -#endif /* ERTS_NEW_PURGE_STRATEGY */ - return ret; } diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index ea66f165a0..7a35c02934 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -164,7 +164,7 @@ bif erts_internal:port_connect/2 bif erts_internal:request_system_task/3 bif erts_internal:request_system_task/4 -bif erts_internal:check_process_code/2 +bif erts_internal:check_process_code/1 bif erts_internal:map_to_tuple_keys/1 bif erts_internal:term_type/1 diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 107c0c01d9..cb7278696f 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -2883,27 +2883,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(AM_tag); #endif } - else if (ERTS_IS_ATOM_STR("check_process_code",BIF_ARG_1)) { - Eterm terms[3]; - Sint length = 1; - Uint sz = 0; - Eterm *hp, res; - DECL_AM(direct_references); - - terms[0] = AM_direct_references; -#if !defined(ERTS_NEW_PURGE_STRATEGY) - { - DECL_AM(indirect_references); - terms[1] = AM_indirect_references; - terms[2] = am_copy_literals; - length = 3; - } -#endif - erts_bld_list(NULL, &sz, length, terms); - hp = HAlloc(BIF_P, sz); - res = erts_bld_list(&hp, NULL, length, terms); - BIF_RET(res); - } BIF_ERROR(BIF_P, BADARG); } diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 781bf024dd..fb85bbff34 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -2264,7 +2264,6 @@ erl_start(int argc, char **argv) ASSERT(erts_code_purger && erts_code_purger->common.id == pid); erts_proc_inc_refc(erts_code_purger); -#ifdef ERTS_NEW_PURGE_STRATEGY pid = erl_system_process_otp(otp_ring0_pid, "erts_literal_area_collector"); erts_literal_area_collector = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, @@ -2272,7 +2271,6 @@ erl_start(int argc, char **argv) ASSERT(erts_literal_area_collector && erts_literal_area_collector->common.id == pid); erts_proc_inc_refc(erts_literal_area_collector); -#endif #ifdef ERTS_DIRTY_SCHEDULERS pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker"); diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 06266363b5..356f5ca71e 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -113,9 +113,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "export_tab", NULL }, { "fun_tab", NULL }, { "environ", NULL }, -#ifdef ERTS_NEW_PURGE_STRATEGY { "release_literal_areas", NULL }, -#endif #endif { "efile_drv", "address" }, { "drv_ev_state_grow", NULL, }, diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index fa8290028a..91eb82af7d 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -448,9 +448,7 @@ int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP; typedef enum { ERTS_PSTT_GC, /* Garbage Collect */ ERTS_PSTT_CPC, /* Check Process Code */ -#ifdef ERTS_NEW_PURGE_STRATEGY ERTS_PSTT_CLA, /* Copy Literal Area */ -#endif ERTS_PSTT_COHMQ, /* Change off heap message queue */ ERTS_PSTT_FTMQ /* Flush trace msg queue */ } ErtsProcSysTaskType; @@ -10430,7 +10428,6 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) fcalls = reds - CONTEXT_REDS; st_res = erts_check_process_code(c_p, st->arg[0], - unsigned_val(st->arg[1]), &cpc_reds, fcalls); reds -= cpc_reds; @@ -10441,7 +10438,6 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) } break; } -#ifdef ERTS_NEW_PURGE_STRATEGY case ERTS_PSTT_CLA: { int fcalls; int cla_reds = 0; @@ -10461,7 +10457,6 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) } break; } -#endif case ERTS_PSTT_COHMQ: reds -= erts_complete_off_heap_message_queue_change(c_p); st_res = am_true; @@ -10516,11 +10511,9 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) case ERTS_PSTT_COHMQ: st_res = am_false; break; -#ifdef ERTS_NEW_PURGE_STRATEGY case ERTS_PSTT_CLA: st_res = am_ok; break; -#endif #ifdef ERTS_SMP case ERTS_PSTT_FTMQ: reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN); @@ -10695,8 +10688,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, case am_check_process_code: if (is_not_atom(st->arg[0])) goto badarg; - if (is_not_small(st->arg[1]) || (unsigned_val(st->arg[1]) & ~ERTS_CPC_ALL)) - goto badarg; noproc_res = am_false; st->type = ERTS_PSTT_CPC; if (!rp) @@ -10712,7 +10703,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, #endif break; -#ifdef ERTS_NEW_PURGE_STRATEGY case am_copy_literals: if (st->arg[0] != am_true && st->arg[0] != am_false) goto badarg; @@ -10721,7 +10711,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, if (!rp) goto noproc; break; -#endif default: goto badarg; diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index d7dd6371b7..0fd9ab5e58 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -1000,12 +1000,8 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg); Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2); /* beam_bif_load.c */ -#define ERTS_CPC_ALLOW_GC (1 << 0) -#define ERTS_CPC_ALL ERTS_CPC_ALLOW_GC -Eterm erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls); -#ifdef ERTS_NEW_PURGE_STRATEGY +Eterm erts_check_process_code(Process *c_p, Eterm module, int *redsp, int fcalls); Eterm erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed); -#endif typedef struct ErtsLiteralArea_ { struct erl_off_heap_header *off_heap; @@ -1017,9 +1013,7 @@ typedef struct ErtsLiteralArea_ { (sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1)) extern ErtsLiteralArea *erts_copy_literal_area; -#ifdef ERTS_NEW_PURGE_STRATEGY extern Process *erts_literal_area_collector; -#endif #ifdef ERTS_DIRTY_SCHEDULERS extern Process *erts_dirty_process_code_checker; #endif diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index 879daaca0a..b467c5a9b6 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -792,14 +792,14 @@ allocate_init t I y ################################################################# # -# The BIFs erts_internal:check_process_code/2 must be called like a function, +# The BIFs erts_internal:check_process_code/1 must be called like a function, # to ensure that c_p->i (program counter) is set correctly (an ordinary # BIF call doesn't set it). # -call_ext u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext Bif -call_ext_last u==2 Bif=u$bif:erts_internal:check_process_code/2 D => i_call_ext_last Bif D -call_ext_only u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext_only Bif +call_ext u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext Bif +call_ext_last u==1 Bif=u$bif:erts_internal:check_process_code/1 D => i_call_ext_last Bif D +call_ext_only u==1 Bif=u$bif:erts_internal:check_process_code/1 => i_call_ext_only Bif # # The BIFs erlang:garbage_collect/0 must be called like a function, -- cgit v1.2.3 From d5ba65f0aa25768c1af7a1639eb73298b029eaf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Tue, 23 Aug 2016 07:46:43 +0200 Subject: Slightly reduce code size of process_main() clang will generate smaller code if we avoid reassigning the reg and freg arrays. That can be easily arranged by passing them as arguments to process_main(). --- erts/emulator/beam/beam_emu.c | 10 ++++------ erts/emulator/beam/erl_init.c | 2 +- erts/emulator/beam/erl_process.c | 3 ++- erts/emulator/beam/global.h | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index c704323bb2..0ba06058a5 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -1078,7 +1078,7 @@ static Eterm make_arglist(Process* c_p, Eterm* reg, int a); void init_emulator(void) { - process_main(); + process_main(0, 0); } /* @@ -1225,7 +1225,7 @@ init_emulator(void) * the instructions' C labels to the loader. * The second call starts execution of BEAM code. This call never returns. */ -void process_main(void) +void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) { static int init_done = 0; Process* c_p = NULL; @@ -1237,7 +1237,7 @@ void process_main(void) /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC, * in all other cases x0 is used. */ - register Eterm* reg REG_xregs = NULL; + register Eterm* reg REG_xregs = x_reg_array; /* * Top of heap (next free location); grows upwards. @@ -1264,7 +1264,7 @@ void process_main(void) * X registers and floating point registers are located in * scheduler specific data. */ - register FloatDef *freg; + register FloatDef *freg = f_reg_array; /* * For keeping the negative old value of 'reds' when call saving is active. @@ -1350,8 +1350,6 @@ void process_main(void) start_time_i = c_p->i; } - reg = erts_proc_sched_data(c_p)->x_reg_array; - freg = erts_proc_sched_data(c_p)->f_reg_array; ERL_BITS_RELOAD_STATEP(c_p); { int reds; diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 781bf024dd..2f68ece37b 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -2302,7 +2302,7 @@ erl_start(int argc, char **argv) #endif set_main_stack_size(); erts_sched_init_time_sup(esdp); - process_main(); + process_main(esdp->x_reg_array, esdp->f_reg_array); } #endif } diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index bcbb9b3115..b577ae95b0 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -8169,7 +8169,8 @@ sched_thread_func(void *vesdp) ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL); #endif - process_main(); + process_main(esdp->x_reg_array, esdp->f_reg_array); + /* No schedulers should *ever* terminate */ erts_exit(ERTS_ABORT_EXIT, "Scheduler thread number %beu terminated\n", diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index d7dd6371b7..3030a095a8 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -1200,7 +1200,7 @@ void print_pass_through(int, byte*, int); /* beam_emu.c */ int catchlevel(Process*); void init_emulator(void); -void process_main(void); +void process_main(Eterm* x_reg_array, FloatDef* f_reg_array); void erts_dirty_process_main(ErtsSchedulerData *); Eterm build_stacktrace(Process* c_p, Eterm exc); Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value); -- cgit v1.2.3 From 986d32a62b20c32338dac4dfd27c141c8f9be0fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Mon, 20 Jun 2016 13:25:04 +0200 Subject: Implement the new ceil/1 and floor/1 guard BIFs Implement as ceil/1 and floor/1 as new guard BIFs (essentially part of Erlang language). They are guard BIFs because trunc/1 is a guard BIF. It would be strange to have trunc/1 as a part of the language, but not ceil/1 and floor/1. --- erts/emulator/beam/bif.tab | 7 ++++ erts/emulator/beam/erl_bif_guard.c | 65 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 7a35c02934..67eae35dec 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -658,6 +658,13 @@ bif erlang:has_prepared_code_on_load/1 bif maps:take/2 +# +# New in 20.0 +# + +gcbif erlang:floor/1 +gcbif erlang:ceil/1 + # # Obsolete # diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c index b42d2dc28b..458315f293 100644 --- a/erts/emulator/beam/erl_bif_guard.c +++ b/erts/emulator/beam/erl_bif_guard.c @@ -141,6 +141,39 @@ BIF_RETTYPE trunc_1(BIF_ALIST_1) BIF_RET(res); } +BIF_RETTYPE floor_1(BIF_ALIST_1) +{ + Eterm res; + FloatDef f; + + if (is_not_float(BIF_ARG_1)) { + if (is_integer(BIF_ARG_1)) + BIF_RET(BIF_ARG_1); + BIF_ERROR(BIF_P, BADARG); + } + GET_DOUBLE(BIF_ARG_1, f); + res = double_to_integer(BIF_P, floor(f.fd)); + BIF_RET(res); +} + +BIF_RETTYPE ceil_1(BIF_ALIST_1) +{ + Eterm res; + FloatDef f; + + /* check arg */ + if (is_not_float(BIF_ARG_1)) { + if (is_integer(BIF_ARG_1)) + BIF_RET(BIF_ARG_1); + BIF_ERROR(BIF_P, BADARG); + } + /* get the float */ + GET_DOUBLE(BIF_ARG_1, f); + + res = double_to_integer(BIF_P, ceil(f.fd)); + BIF_RET(res); +} + BIF_RETTYPE round_1(BIF_ALIST_1) { Eterm res; @@ -621,6 +654,38 @@ Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live) reg, live); } +Eterm erts_gc_floor_1(Process* p, Eterm* reg, Uint live) +{ + Eterm arg; + FloatDef f; + + arg = reg[live]; + if (is_not_float(arg)) { + if (is_integer(arg)) { + return arg; + } + BIF_ERROR(p, BADARG); + } + GET_DOUBLE(arg, f); + return gc_double_to_integer(p, floor(f.fd), reg, live); +} + +Eterm erts_gc_ceil_1(Process* p, Eterm* reg, Uint live) +{ + Eterm arg; + FloatDef f; + + arg = reg[live]; + if (is_not_float(arg)) { + if (is_integer(arg)) { + return arg; + } + BIF_ERROR(p, BADARG); + } + GET_DOUBLE(arg, f); + return gc_double_to_integer(p, ceil(f.fd), reg, live); +} + static Eterm gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live) { -- cgit v1.2.3 From 6d40cfd77f1d2f1e1403e4b41c0b53ae6499ea11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Mon, 27 Jun 2016 12:54:04 +0200 Subject: Add math:floor/1 and math:ceil/1 Add math:floor/1 and math:ceil/1 to avoid unnecessary conversions in floating point expressions. That is, instead of having to write float(floor(X)) as part of a floating point expressions, we can write simply math:floor(X). --- erts/emulator/beam/bif.tab | 2 ++ erts/emulator/beam/erl_math.c | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 67eae35dec..0bd80ea684 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -664,6 +664,8 @@ bif maps:take/2 gcbif erlang:floor/1 gcbif erlang:ceil/1 +bif math:floor/1 +bif math:ceil/1 # # Obsolete diff --git a/erts/emulator/beam/erl_math.c b/erts/emulator/beam/erl_math.c index fc0aaed18a..990fa63bd4 100644 --- a/erts/emulator/beam/erl_math.c +++ b/erts/emulator/beam/erl_math.c @@ -247,6 +247,12 @@ BIF_RETTYPE math_pow_2(BIF_ALIST_2) return math_call_2(BIF_P, pow, BIF_ARG_1, BIF_ARG_2); } +BIF_RETTYPE math_ceil_1(BIF_ALIST_1) +{ + return math_call_1(BIF_P, ceil, BIF_ARG_1); +} - - +BIF_RETTYPE math_floor_1(BIF_ALIST_1) +{ + return math_call_1(BIF_P, floor, BIF_ARG_1); +} -- cgit v1.2.3 From 4f8071d81aa0690caa3adc734d73a1cb004ad808 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Mon, 19 Sep 2016 14:34:57 +0200 Subject: erts: Remove deprecated nif 'reload' feature and instead let erlang:load_nif/2 return {error, {reload, _}} before even trying to load the library if a NIF library has already been successfully loaded for the calling module instance. --- erts/emulator/beam/erl_nif.c | 121 ++++++++++++------------------------------- 1 file changed, 33 insertions(+), 88 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index b53d3d4c19..7442e99cb3 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -3145,7 +3145,6 @@ static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func) BIF_RETTYPE load_nif_2(BIF_ALIST_2) { static const char bad_lib[] = "bad_lib"; - static const char reload[] = "reload"; static const char upgrade[] = "upgrade"; char* lib_name = NULL; void* handle = NULL; @@ -3162,7 +3161,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) Eterm ret = am_ok; int veto; struct erl_module_nif* lib = NULL; - int reload_warning = 0; struct erl_module_instance* this_mi; struct erl_module_instance* prev_mi; @@ -3222,8 +3220,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) this_mi = module_p->on_load; } - if (init_func == NULL && - (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) { + if (this_mi->nif != NULL) { + ret = load_nif_error(BIF_P,"reload","NIF library already loaded" + " (reload disallowed since OTP 20)."); + } + else if (init_func == NULL && + (err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) { const char slogan[] = "Failed to load NIF library"; if (strstr(errdesc.str, lib_name) != NULL) { ret = load_nif_error(BIF_P, "load_failed", "%s: '%s'", slogan, errdesc.str); @@ -3312,7 +3314,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) goto error; } - /* Call load, reload or upgrade: + /* Call load or upgrade: */ @@ -3324,82 +3326,33 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) ASSERT(opened_rt_list == NULL); lib->mod = module_p; env.mod_nif = lib; - if (this_mi->nif != NULL) { /*************** Reload ******************/ - /* - * Repeated load_nif calls from same Erlang module instance ("reload") - * is deprecated and was only ment as a development feature not to - * be used in production systems. (See warning below) - */ - int k, old_incr = 0; - ErlNifFunc* old_func; - lib->priv_data = this_mi->nif->priv_data; - - ASSERT(this_mi->nif->entry != NULL); - if (entry->reload == NULL) { - ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library."); - goto error; - } - /* Check that no NIF is removed */ - old_func = this_mi->nif->entry->funcs; - for (k=0; k < this_mi->nif->entry->num_of_funcs; k++) { - int incr = 0; - ErlNifFunc* f = entry->funcs; - for (i=0; i < entry->num_of_funcs; i++) { - if (old_func->arity == f->arity - && sys_strcmp(old_func->name, f->name) == 0) { - break; - } - f = next_func(entry, &incr, f); - } - if (i == entry->num_of_funcs) { - ret = load_nif_error(BIF_P,reload,"Reloaded library missing " - "function %T:%s/%u\r\n", mod_atom, - old_func->name, old_func->arity); - goto error; - } - old_func = next_func(this_mi->nif->entry, &old_incr, old_func); - } - erts_pre_nif(&env, BIF_P, lib, NULL); - veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2); - erts_post_nif(&env); - if (veto) { - ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful."); - } - else { - commit_opened_resource_types(lib); - this_mi->nif->entry = NULL; /* to prevent 'unload' callback */ - erts_unload_nif(this_mi->nif); - reload_warning = 1; - } + + lib->priv_data = NULL; + if (prev_mi->nif != NULL) { /**************** Upgrade ***************/ + void* prev_old_data = prev_mi->nif->priv_data; + if (entry->upgrade == NULL) { + ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library."); + goto error; + } + erts_pre_nif(&env, BIF_P, lib, NULL); + veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2); + erts_post_nif(&env); + if (veto) { + prev_mi->nif->priv_data = prev_old_data; + ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful."); + } + else + commit_opened_resource_types(lib); } - else { - lib->priv_data = NULL; - if (prev_mi->nif != NULL) { /**************** Upgrade ***************/ - void* prev_old_data = prev_mi->nif->priv_data; - if (entry->upgrade == NULL) { - ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library."); - goto error; - } - erts_pre_nif(&env, BIF_P, lib, NULL); - veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2); - erts_post_nif(&env); - if (veto) { - prev_mi->nif->priv_data = prev_old_data; - ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful."); - } - else - commit_opened_resource_types(lib); - } - else if (entry->load != NULL) { /********* Initial load ***********/ - erts_pre_nif(&env, BIF_P, lib, NULL); - veto = entry->load(&env, &lib->priv_data, BIF_ARG_2); - erts_post_nif(&env); - if (veto) { - ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful."); - } - else - commit_opened_resource_types(lib); - } + else if (entry->load != NULL) { /********* Initial load ***********/ + erts_pre_nif(&env, BIF_P, lib, NULL); + veto = entry->load(&env, &lib->priv_data, BIF_ARG_2); + erts_post_nif(&env); + if (veto) { + ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful."); + } + else + commit_opened_resource_types(lib); } if (ret == am_ok) { /* @@ -3458,14 +3411,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) erts_release_code_write_permission(); erts_free(ERTS_ALC_T_TMP, lib_name); - if (reload_warning) { - erts_dsprintf_buf_t* dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, - "Repeated calls to erlang:load_nif from module '%T'.\n\n" - "The NIF reload mechanism is deprecated and must not " - "be used in production systems.\n", mod_atom); - erts_send_warning_to_logger(BIF_P->group_leader, dsbufp); - } BIF_RET(ret); } -- cgit v1.2.3 From f1ffa5e90e5eecaac890e876760932d1bb1d9c86 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Mon, 19 Sep 2016 15:47:18 +0200 Subject: erts: Add ErtsSizeofMember macro (in case it matters) --- erts/emulator/beam/sys.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 2d0628f70e..16e6c33367 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -103,6 +103,8 @@ #define ErtsContainerStruct_(ptr, type, memberv) \ ((type *)((char *)(1 ? (ptr) : ((type *)0)->memberv) - offsetof(type, memberv))) +#define ErtsSizeofMember(type, member) sizeof(((type *)0)->member) + #if defined (__WIN32__) # include "erl_win_sys.h" #else -- cgit v1.2.3 From ff7eb12d002afbffacae5429bd9bb0819aa2d146 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Wed, 1 Jun 2016 16:19:04 +0200 Subject: erts: Remove unnecessary access of 'is_resizing' in tables without write_concurrency and remove it totally #ifndef ERTS_SMP --- erts/emulator/beam/erl_db_hash.c | 21 ++++++++++----------- erts/emulator/beam/erl_db_hash.h | 2 +- 2 files changed, 11 insertions(+), 12 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 12ae086b31..16d97f05ee 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -670,8 +670,8 @@ int db_create_hash(Process *p, DbTable *tbl) tb->nsegs = NSEG_1; tb->nslots = SEGSZ; - erts_smp_atomic_init_nob(&tb->is_resizing, 0); #ifdef ERTS_SMP + erts_smp_atomic_init_nob(&tb->is_resizing, 0); if (tb->common.type & DB_FINE_LOCKED) { erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; int i; @@ -2604,23 +2604,22 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2, static ERTS_INLINE int begin_resizing(DbTableHash* tb) { +#ifdef ERTS_SMP if (DB_USING_FINE_LOCKING(tb)) - return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1); - else { - if (erts_smp_atomic_read_nob(&tb->is_resizing)) - return 0; - erts_smp_atomic_set_nob(&tb->is_resizing, 1); - return 1; - } + return !erts_atomic_xchg_acqb(&tb->is_resizing, 1); + else + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); +#endif + return 1; } static ERTS_INLINE void done_resizing(DbTableHash* tb) { +#ifdef ERTS_SMP if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_relb(&tb->is_resizing, 0); - else - erts_smp_atomic_set_nob(&tb->is_resizing, 0); + erts_atomic_set_relb(&tb->is_resizing, 0); +#endif } /* Grow table with one new bucket. diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index e654363cd5..081ff8fafc 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -60,8 +60,8 @@ typedef struct db_table_hash { /* List of slots where elements have been deleted while table was fixed */ erts_smp_atomic_t fixdel; /* (FixedDeletion*) */ erts_smp_atomic_t nactive; /* Number of "active" slots */ - erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ #ifdef ERTS_SMP + erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLocks* locks; #endif #ifdef VALGRIND -- cgit v1.2.3 From abc2f4fdae2d62b5d2843082dbb4437595973b38 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Thu, 23 Jun 2016 19:30:37 +0200 Subject: erts: Redesign ets with separate segment tables * Keep it simple(r) * To prepare for both dynamic sized segments and segtabs --- erts/emulator/beam/erl_db.h | 2 - erts/emulator/beam/erl_db_hash.c | 246 +++++++++++++++------------------------ erts/emulator/beam/erl_db_hash.h | 4 +- 3 files changed, 97 insertions(+), 155 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index 1d26c49652..2f3c4a8e1b 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -267,7 +267,5 @@ erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#undef ERTS_DB_ALC_MEM_UPDATE_ - #endif /* #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) */ diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 16d97f05ee..82f03e458a 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -84,14 +84,6 @@ #include "erl_db_hash.h" -#ifdef MYDEBUG /* Will fail test case ets_SUITE:memory */ -# define IF_DEBUG(x) x -# define MY_ASSERT(x) ASSERT(x) -#else -# define IF_DEBUG(x) -# define MY_ASSERT(x) -#endif - /* * The following symbols can be manipulated to "tune" the linear hash array */ @@ -102,7 +94,7 @@ #define SEGSZ (1 << SEGSZ_EXP) #define SEGSZ_MASK (SEGSZ-1) -#define NSEG_1 2 /* Size of first segment table (must be at least 2) */ +#define NSEG_1 (ErtsSizeofMember(DbTableHash,first_segtab) / sizeof(struct segment*)) #define NSEG_2 256 /* Size of second segment table */ #define NSEG_INC 128 /* Number of segments to grow after that */ @@ -318,27 +310,21 @@ struct mp_info { /* A table segment */ struct segment { - HashDbTerm* buckets[SEGSZ]; -#ifdef MYDEBUG - int is_ext_segment; -#endif + HashDbTerm* buckets[1]; }; +#define SIZEOF_SEGMENT(N) \ + (offsetof(struct segment,buckets) + sizeof(HashDbTerm*)*(N)) -/* A segment that also contains a segment table */ -struct ext_segment { - struct segment s; /* The segment itself. Must be first */ - +/* An extended segment table */ +struct ext_segtab { + ErtsThrPrgrLaterOp lop; struct segment** prev_segtab; /* Used when table is shrinking */ - int nsegs; /* Size of segtab */ + int prev_nsegs; /* Size of prev_segtab */ + int nsegs; /* Size of this segtab */ struct segment* segtab[1]; /* The segment table */ }; -#define SIZEOF_EXTSEG(NSEGS) \ - (offsetof(struct ext_segment,segtab) + sizeof(struct segment*)*(NSEGS)) - -#if defined(DEBUG) || defined(VALGRIND) -# define EXTSEG(SEGTAB_PTR) \ - ((struct ext_segment*) (((char*)(SEGTAB_PTR)) - offsetof(struct ext_segment,segtab))) -#endif +#define SIZEOF_EXT_SEGTAB(NSEGS) \ + (offsetof(struct ext_segtab,segtab) + sizeof(struct segment*)*(NSEGS)) static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, @@ -348,41 +334,14 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); else erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); -#ifdef VALGRIND - tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab); -#endif } -/* How the table segments relate to each other: - - ext_segment: ext_segment: "plain" segment - #=================# #================# #=============# - | bucket[0] |<--+ +------->| bucket[256] | +->| bucket[512] | - | bucket[1] | | | | [257] | | | [513] | - : : | | : : | : : - | bucket[255] | | | | [511] | | | [767] | - |-----------------| | | |----------------| | #=============# - | prev_segtab=NULL| | | +--<---prev_segtab | | - | nsegs = 2 | | | | | nsegs = 256 | | -+->| segtab[0] -->-------+---|---|--<---segtab[0] |<-+ | -| | segtab[1] -->-----------+---|--<---segtab[1] | | | -| #=================# | | segtab[2] -->-----|--+ ext_segment: -| | : : | #================# -+----------------<---------------+ | segtab[255] ->----|----->| bucket[255*256]| - #================# | | | - | : : - | |----------------| - +----<---prev_segtab | - : : -*/ - /* ** Forward decl's (static functions) */ -static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix, - struct segment** old_segtab); +static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix); static int alloc_seg(DbTableHash *tb); static int free_seg(DbTableHash *tb, int free_records); static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, @@ -666,9 +625,13 @@ int db_create_hash(Process *p, DbTable *tbl) erts_smp_atomic_init_nob(&tb->nactive, SEGSZ); erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); - SET_SEGTAB(tb, alloc_ext_seg(tb,0,NULL)->segtab); + SET_SEGTAB(tb, tb->first_segtab); tb->nsegs = NSEG_1; tb->nslots = SEGSZ; + tb->first_segtab[0] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG, + (DbTable *) tb, + SIZEOF_SEGMENT(SEGSZ)); + sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(SEGSZ)); #ifdef ERTS_SMP erts_smp_atomic_init_nob(&tb->is_resizing, 0); @@ -2414,34 +2377,29 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern, return DB_ERROR_NONE; } -static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix, - struct segment** old_segtab) +static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix) { - int nsegs; - struct ext_segment* eseg; + struct segment** old_segtab = SEGTAB(tb); + int nsegs = 0; + struct ext_segtab* est; + ASSERT(seg_ix >= NSEG_1); switch (seg_ix) { - case 0: nsegs = NSEG_1; break; - case 1: nsegs = NSEG_2; break; - default: nsegs = seg_ix + NSEG_INC; break; - } - eseg = (struct ext_segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, - (DbTable *) tb, - SIZEOF_EXTSEG(nsegs)); - ASSERT(eseg != NULL); - sys_memset(&eseg->s, 0, sizeof(struct segment)); - IF_DEBUG(eseg->s.is_ext_segment = 1); - eseg->prev_segtab = old_segtab; - eseg->nsegs = nsegs; - if (old_segtab) { - ASSERT(nsegs > tb->nsegs); - sys_memcpy(eseg->segtab, old_segtab, tb->nsegs*sizeof(struct segment*)); - } + case NSEG_1: nsegs = NSEG_2; break; + default: nsegs = seg_ix + NSEG_INC; break; + } + ASSERT(nsegs > tb->nsegs); + est = (struct ext_segtab*) erts_db_alloc(ERTS_ALC_T_DB_SEG, + (DbTable *) tb, + SIZEOF_EXT_SEGTAB(nsegs)); + est->nsegs = nsegs; + est->prev_segtab = old_segtab; + est->prev_nsegs = tb->nsegs; + sys_memcpy(est->segtab, old_segtab, tb->nsegs*sizeof(struct segment*)); #ifdef DEBUG - sys_memset(&eseg->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*)); + sys_memset(&est->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*)); #endif - eseg->segtab[seg_ix] = &eseg->s; - return eseg; + return est; } /* Extend table with one new segment @@ -2449,36 +2407,32 @@ static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix, static int alloc_seg(DbTableHash *tb) { int seg_ix = tb->nslots >> SEGSZ_EXP; - - if (seg_ix+1 == tb->nsegs) { /* New segtab needed (extended segment) */ - struct segment** segtab = SEGTAB(tb); - struct ext_segment* seg = alloc_ext_seg(tb, seg_ix, segtab); - if (seg == NULL) return 0; - segtab[seg_ix] = &seg->s; - /* We don't use the new segtab until next call (see "shrink race") */ - } - else { /* Just a new plain segment */ - struct segment** segtab; - if (seg_ix == tb->nsegs) { /* Time to start use segtab from last call */ - struct ext_segment* eseg; - eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1]; - MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment); - SET_SEGTAB(tb, eseg->segtab); - tb->nsegs = eseg->nsegs; - } - ASSERT(seg_ix < tb->nsegs); - segtab = SEGTAB(tb); - ASSERT(segtab[seg_ix] == NULL); - segtab[seg_ix] = (struct segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, - (DbTable *) tb, - sizeof(struct segment)); - if (segtab[seg_ix] == NULL) return 0; - sys_memset(segtab[seg_ix], 0, sizeof(struct segment)); - } + struct segment** segtab; + + if (seg_ix == tb->nsegs) { /* New segtab needed */ + struct ext_segtab* est = alloc_ext_segtab(tb, seg_ix); + SET_SEGTAB(tb, est->segtab); + tb->nsegs = est->nsegs; + } + ASSERT(seg_ix < tb->nsegs); + segtab = SEGTAB(tb); + segtab[seg_ix] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG, + (DbTable *) tb, + SIZEOF_SEGMENT(SEGSZ)); + sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(SEGSZ)); tb->nslots += SEGSZ; return 1; } +#ifdef ERTS_SMP +static void dealloc_ext_segtab(void* lop_data) +{ + struct ext_segtab* est = (struct ext_segtab*) lop_data; + + erts_free(ERTS_ALC_T_DB_SEG, est); +} +#endif + /* Shrink table by freeing the top segment ** free_records: 1=free any records in segment, 0=assume segment is empty */ @@ -2486,18 +2440,17 @@ static int free_seg(DbTableHash *tb, int free_records) { const int seg_ix = (tb->nslots >> SEGSZ_EXP) - 1; struct segment** const segtab = SEGTAB(tb); - struct ext_segment* const top = (struct ext_segment*) segtab[seg_ix]; - int bytes; + struct segment* const segp = segtab[seg_ix]; int nrecords = 0; - ASSERT(top != NULL); + ASSERT(segp != NULL); #ifndef DEBUG if (free_records) #endif { int i; for (i=0; is.buckets[i]; + HashDbTerm* p = segp->buckets[i]; while(p != 0) { HashDbTerm* nxt = p->next; ASSERT(free_records); /* segment not empty as assumed? */ @@ -2507,53 +2460,46 @@ static int free_seg(DbTableHash *tb, int free_records) } } } - - /* The "shrink race": - * We must avoid deallocating an extended segment while its segtab may - * still be used by other threads. - * The trick is to stop use a segtab one call earlier. That is, stop use - * a segtab when the segment above it is deallocated. When the segtab is - * later deallocated, it has not been used for a very long time. - * It is even theoretically safe as we have by then rehashed the entire - * segment, seizing *all* locks, so there cannot exist any retarded threads - * still hanging in BUCKET macro with an old segtab pointer. - * For this to work, we must of course allocate a new segtab one call - * earlier in alloc_seg() as well. And this is also the reason why - * the minimum size of the first segtab is 2 and not 1 (NSEG_1). - */ - if (seg_ix == tb->nsegs-1 || seg_ix==0) { /* Dealloc extended segment */ - MY_ASSERT(top->s.is_ext_segment); - ASSERT(segtab != top->segtab || seg_ix==0); - bytes = SIZEOF_EXTSEG(top->nsegs); - } - else { /* Dealloc plain segment */ - struct ext_segment* newtop = (struct ext_segment*) segtab[seg_ix-1]; - MY_ASSERT(!top->s.is_ext_segment); - - if (segtab == newtop->segtab) { /* New top segment is extended */ - MY_ASSERT(newtop->s.is_ext_segment); - if (newtop->prev_segtab != NULL) { - /* Time to use a smaller segtab */ - SET_SEGTAB(tb, newtop->prev_segtab); - tb->nsegs = seg_ix; - ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs); - } - else { - ASSERT(NSEG_1 > 2 && seg_ix==1); - } - } - bytes = sizeof(struct segment); + if (seg_ix >= NSEG_1) { + struct ext_segtab* est = ErtsContainerStruct_(segtab,struct ext_segtab,segtab); + + if (seg_ix == est->prev_nsegs) { /* Dealloc extended segtab */ + ASSERT(est->prev_segtab != NULL); + SET_SEGTAB(tb, est->prev_segtab); + tb->nsegs = est->prev_nsegs; + +#ifdef ERTS_SMP + if (!tb->common.is_thread_safe) { + /* + * Table is doing a graceful shrink operation and we must avoid + * deallocating this segtab while it may still be read by other + * threads. Schedule deallocation with thread progress to make + * sure no lingering threads are still hanging in BUCKET macro + * with an old segtab pointer. + */ + Uint sz = SIZEOF_EXT_SEGTAB(est->nsegs); + ASSERT(sz == ERTS_ALC_DBG_BLK_SZ(est)); + ERTS_DB_ALC_MEM_UPDATE_(tb, sz, 0); + erts_schedule_thr_prgr_later_cleanup_op(dealloc_ext_segtab, + est, + &est->lop, + sz); + } + else +#endif + erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est, + SIZEOF_EXT_SEGTAB(est->nsegs)); + } + else { + + } } + erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(SEGSZ)); - erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, - (void*)top, bytes); #ifdef DEBUG - if (seg_ix > 0) { - segtab[seg_ix] = NULL; - } else { - SET_SEGTAB(tb, NULL); - } + if (seg_ix < tb->nsegs) + SEGTAB(tb)[seg_ix] = NULL; #endif tb->nslots -= SEGSZ; ASSERT(tb->nslots >= 0); diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 081ff8fafc..2d9b5e308a 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -51,6 +51,7 @@ typedef struct db_table_hash { DbTableCommon common; erts_smp_atomic_t segtab; /* The segment table (struct segment**) */ + struct segment* first_segtab[1]; erts_smp_atomic_t szm; /* current size mask. */ /* SMP: nslots and nsegs are protected by is_resizing or table write lock */ @@ -64,9 +65,6 @@ typedef struct db_table_hash { erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLocks* locks; #endif -#ifdef VALGRIND - struct ext_segment* top_ptr_to_segment_with_active_segtab; -#endif } DbTableHash; -- cgit v1.2.3 From 0d7a001039dbcab096397f27213b518113a9e5d0 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Wed, 24 Aug 2016 16:28:53 +0200 Subject: erts: Enable a smaller first hash segment for ets --- erts/emulator/beam/erl_db_hash.c | 67 ++++++++++++++++++++++++---------------- 1 file changed, 41 insertions(+), 26 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 82f03e458a..2d24f438ca 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -89,10 +89,20 @@ */ #define CHAIN_LEN 6 /* Medium bucket chain len */ -/* Number of slots per segment */ -#define SEGSZ_EXP 8 -#define SEGSZ (1 << SEGSZ_EXP) -#define SEGSZ_MASK (SEGSZ-1) +/* +** We want the first mandatory segment to be small (to reduce minimal footprint) +** and larger extra segments (to reduce number of alloc/free calls). +*/ + +/* Number of slots in first segment */ +#define FIRST_SEGSZ_EXP 8 +#define FIRST_SEGSZ (1 << FIRST_SEGSZ_EXP) +#define FIRST_SEGSZ_MASK (FIRST_SEGSZ - 1) + +/* Number of slots per extra segment */ +#define EXT_SEGSZ_EXP 11 +#define EXT_SEGSZ (1 << EXT_SEGSZ_EXP) +#define EXT_SEGSZ_MASK (EXT_SEGSZ-1) #define NSEG_1 (ErtsSizeofMember(DbTableHash,first_segtab) / sizeof(struct segment*)) #define NSEG_2 256 /* Size of second segment table */ @@ -115,7 +125,9 @@ #define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive)) #define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) -#define BUCKET(tb, i) SEGTAB(tb)[(i) >> SEGSZ_EXP]->buckets[(i) & SEGSZ_MASK] +#define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP) + +#define BUCKET(tb, i) SEGTAB(tb)[SLOT_IX_TO_SEG_IX(i)]->buckets[(i) & EXT_SEGSZ_MASK] /* * When deleting a table, the number of records to delete. @@ -422,7 +434,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle); static ERTS_INLINE void try_shrink(DbTableHash* tb) { int nactive = NACTIVE(tb); - if (nactive > SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN) + if (nactive > FIRST_SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN) && !IS_FIXED(tb)) { shrink(tb, nactive); } @@ -621,17 +633,17 @@ int db_create_hash(Process *p, DbTable *tbl) { DbTableHash *tb = &tbl->hash; - erts_smp_atomic_init_nob(&tb->szm, SEGSZ_MASK); - erts_smp_atomic_init_nob(&tb->nactive, SEGSZ); + erts_smp_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK); + erts_smp_atomic_init_nob(&tb->nactive, FIRST_SEGSZ); erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); SET_SEGTAB(tb, tb->first_segtab); tb->nsegs = NSEG_1; - tb->nslots = SEGSZ; + tb->nslots = FIRST_SEGSZ; tb->first_segtab[0] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG, (DbTable *) tb, - SIZEOF_SEGMENT(SEGSZ)); - sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(SEGSZ)); + SIZEOF_SEGMENT(FIRST_SEGSZ)); + sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ)); #ifdef ERTS_SMP erts_smp_atomic_init_nob(&tb->is_resizing, 0); @@ -649,7 +661,7 @@ int db_create_hash(Process *p, DbTable *tbl) erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt, "db_hash_slot", make_small(i)); } - /* This important property is needed to guarantee that the buckets + /* This important property is needed to guarantee the two buckets * involved in a grow/shrink operation it protected by the same lock: */ ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0); @@ -2213,12 +2225,12 @@ static int db_free_table_continue_hash(DbTable *tbl) done /= 2; while(tb->nslots != 0) { - free_seg(tb, 1); + done += 1 + EXT_SEGSZ/64 + free_seg(tb, 1); /* * If we have done enough work, get out here. */ - if (++done >= (DELETE_RECORD_LIMIT / CHAIN_LEN / SEGSZ)) { + if (done >= DELETE_RECORD_LIMIT) { return 0; /* Not done */ } } @@ -2406,9 +2418,10 @@ static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix) */ static int alloc_seg(DbTableHash *tb) { - int seg_ix = tb->nslots >> SEGSZ_EXP; + int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots); struct segment** segtab; + ASSERT(seg_ix > 0); if (seg_ix == tb->nsegs) { /* New segtab needed */ struct ext_segtab* est = alloc_ext_segtab(tb, seg_ix); SET_SEGTAB(tb, est->segtab); @@ -2418,9 +2431,9 @@ static int alloc_seg(DbTableHash *tb) segtab = SEGTAB(tb); segtab[seg_ix] = (struct segment*) erts_db_alloc(ERTS_ALC_T_DB_SEG, (DbTable *) tb, - SIZEOF_SEGMENT(SEGSZ)); - sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(SEGSZ)); - tb->nslots += SEGSZ; + SIZEOF_SEGMENT(EXT_SEGSZ)); + sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ)); + tb->nslots += EXT_SEGSZ; return 1; } @@ -2438,9 +2451,10 @@ static void dealloc_ext_segtab(void* lop_data) */ static int free_seg(DbTableHash *tb, int free_records) { - const int seg_ix = (tb->nslots >> SEGSZ_EXP) - 1; + const int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots) - 1; struct segment** const segtab = SEGTAB(tb); struct segment* const segp = segtab[seg_ix]; + Uint seg_sz; int nrecords = 0; ASSERT(segp != NULL); @@ -2448,8 +2462,8 @@ static int free_seg(DbTableHash *tb, int free_records) if (free_records) #endif { - int i; - for (i=0; ibuckets[i]; while(p != 0) { HashDbTerm* nxt = p->next; @@ -2495,13 +2509,14 @@ static int free_seg(DbTableHash *tb, int free_records) } } - erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(SEGSZ)); + seg_sz = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ; + erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(seg_sz)); #ifdef DEBUG if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL; #endif - tb->nslots -= SEGSZ; + tb->nslots -= seg_sz; ASSERT(tb->nslots >= 0); return nrecords; } @@ -2589,7 +2604,7 @@ static void grow(DbTableHash* tb, int nactive) /* Ensure that the slot nactive exists */ if (nactive == tb->nslots) { /* Time to get a new segment */ - ASSERT((nactive & SEGSZ_MASK) == 0); + ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0); if (!alloc_seg(tb)) goto abort; } ASSERT(nactive < tb->nslots); @@ -2668,7 +2683,7 @@ static void shrink(DbTableHash* tb, int nactive) int dst_ix = src_ix & low_szm; ASSERT(dst_ix < src_ix); - ASSERT(nactive > SEGSZ); + ASSERT(nactive > FIRST_SEGSZ); lck = WLOCK_HASH(tb, dst_ix); /* Double check for racing table fixers */ if (!IS_FIXED(tb)) { @@ -2697,7 +2712,7 @@ static void shrink(DbTableHash* tb, int nactive) } WUNLOCK_HASH(lck); - if (tb->nslots - src_ix >= SEGSZ) { + if (tb->nslots - src_ix >= EXT_SEGSZ) { free_seg(tb, 0); } } -- cgit v1.2.3 From 92c98a138638541a710f17f21073b568362502f8 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Wed, 24 Aug 2016 18:26:55 +0200 Subject: erts: Reduce ets hash load factor for faster lookup/insert/delete at the expense of about one word per object. --- erts/emulator/beam/erl_db_hash.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 2d24f438ca..1752ec5191 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -87,7 +87,8 @@ /* * The following symbols can be manipulated to "tune" the linear hash array */ -#define CHAIN_LEN 6 /* Medium bucket chain len */ +#define GROW_LIMIT(NACTIVE) ((NACTIVE)*1) +#define SHRINK_LIMIT(NACTIVE) ((NACTIVE) / 2) /* ** We want the first mandatory segment to be small (to reduce minimal footprint) @@ -434,7 +435,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle); static ERTS_INLINE void try_shrink(DbTableHash* tb) { int nactive = NACTIVE(tb); - if (nactive > FIRST_SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN) + if (nactive > FIRST_SEGSZ && NITEMS(tb) < SHRINK_LIMIT(nactive) && !IS_FIXED(tb)) { shrink(tb, nactive); } @@ -837,7 +838,7 @@ Lnew: WUNLOCK_HASH(lck); { int nactive = NACTIVE(tb); - if (nitems > nactive * (CHAIN_LEN+1) && !IS_FIXED(tb)) { + if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { grow(tb, nactive); } } @@ -2891,7 +2892,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) WUNLOCK_HASH(lck); nactive = NACTIVE(tb); - if (nitems > nactive * (CHAIN_LEN + 1) && !IS_FIXED(tb)) { + if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { grow(tb, nactive); } } else { -- cgit v1.2.3 From 25eb3fe353cb0f5c381107e43a865d3a312c8c25 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 30 Aug 2016 11:49:20 +0200 Subject: erts: Suppress failed ETS memory checks due to the grow/shrink hysteresis of the meta tables --- erts/emulator/beam/erl_bif_info.c | 8 ++++++++ erts/emulator/beam/erl_db.c | 30 +++++++++++++++++++++++++++ erts/emulator/beam/erl_db.h | 2 ++ erts/emulator/beam/erl_db_hash.c | 43 ++++++++++++++++++++++++++++++++++++++- erts/emulator/beam/erl_db_hash.h | 2 ++ 5 files changed, 84 insertions(+), 1 deletion(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 3fb866733c..abf20a90e4 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -3547,6 +3547,10 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint); BIF_RET(make_small((Uint) words)); } + else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) { + /* Used by ets_SUITE (stdlib) */ + BIF_RET(erts_ets_get_meta_state(BIF_P)); + } else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) { /* Used by driver_SUITE (emulator) */ Uint sz, *szp; @@ -4280,6 +4284,10 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) } BIF_RET(am_ok); } + else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) { + /* Used by ets_SUITE (stdlib) */ + BIF_RET(erts_ets_restore_meta_state(BIF_P, BIF_ARG_2)); + } } BIF_ERROR(BIF_P, BADARG); diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index bad34211a5..df4e34511f 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -3978,6 +3978,36 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt) return list; } +/* + * For testing only + * Retreive meta table size state + */ +Eterm erts_ets_get_meta_state(Process* p) +{ + Eterm* hp = HAlloc(p, 3); + return TUPLE2(hp, + erts_ets_hash_get_memstate(p, &meta_pid_to_tab->hash), + erts_ets_hash_get_memstate(p, &meta_pid_to_fixed_tab->hash)); +} +/* + * For testing only + * Restore a previously retrieved meta table size state. + * We do this to suppress failed memory checks + * caused by the hysteresis of meta tables grow/shrink limits. + */ +Eterm erts_ets_restore_meta_state(Process* p, Eterm meta_state) +{ + Eterm* tv; + Eterm* hp; + if (!is_tuple_arity(meta_state, 2)) + return am_badarg; + + tv = tuple_val(meta_state); + hp = HAlloc(p, 3); + return TUPLE2(hp, + erts_ets_hash_restore_memstate(&meta_pid_to_tab->hash, tv[1]), + erts_ets_hash_restore_memstate(&meta_pid_to_fixed_tab->hash, tv[2])); +} #ifdef HARDDEBUG /* Here comes some debug functions */ diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index 2f3c4a8e1b..f7eb3dc45c 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -90,6 +90,8 @@ extern Export ets_select_continue_exp; extern erts_smp_atomic_t erts_ets_misc_mem_size; Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt); +Eterm erts_ets_get_meta_state(Process* p); +Eterm erts_ets_restore_meta_state(Process* p, Eterm target_state); Uint erts_db_get_max_tabs(void); diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 1752ec5191..581b135233 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -2726,7 +2726,6 @@ static void shrink(DbTableHash* tb, int nactive) done_resizing(tb); } - /* Search a list of tuples for a matching key */ static HashDbTerm* search_list(DbTableHash* tb, Eterm key, @@ -2977,6 +2976,48 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats) stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb))); stats->kept_items = kept_items; } + +/* For testing only */ +Eterm erts_ets_hash_get_memstate(Process* p, DbTableHash* tb) +{ + Eterm seg_cnt; + while (!begin_resizing(tb)) + /*spinn*/; + + seg_cnt = make_small(SLOT_IX_TO_SEG_IX(tb->nslots)); + done_resizing(tb); + return seg_cnt; +} +/* For testing only */ +Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate) +{ + int seg_cnt, target; + int nactive; + + if (!is_small(memstate)) + return make_small(__LINE__); + + target = signed_val(memstate); + if (target < 1) + return make_small(__LINE__); + while (1) { + while (!begin_resizing(tb)) + /*spin*/; + seg_cnt = SLOT_IX_TO_SEG_IX(tb->nslots); + nactive = NACTIVE(tb); + done_resizing(tb); + + if (target == seg_cnt) + return am_ok; + if (IS_FIXED(tb)) + return make_small(__LINE__); + if (target < seg_cnt) + shrink(tb, nactive); + else + grow(tb, nactive); + } +} + #ifdef HARDDEBUG void db_check_table_hash(DbTable *tbl) diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 2d9b5e308a..e209037878 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -107,5 +107,7 @@ typedef struct { }DbHashStats; void db_calc_stats_hash(DbTableHash* tb, DbHashStats*); +Eterm erts_ets_hash_get_memstate(Process*, DbTableHash* tb); +Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate); #endif /* _DB_HASH_H */ -- cgit v1.2.3 From fdc2f2b4a6f6314ae7c183dc4e39e19d05ac89e4 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Fri, 2 Sep 2016 20:04:47 +0200 Subject: erts: Fix ets_SUITE:memory by simply asking for the size of struct ext_segtab --- erts/emulator/beam/erl_bif_info.c | 4 +++- erts/emulator/beam/erl_db_hash.c | 5 +++++ erts/emulator/beam/erl_db_hash.h | 1 + 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index abf20a90e4..13391b7c67 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -3545,7 +3545,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) { /* Used by ets_SUITE (stdlib) */ size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint); - BIF_RET(make_small((Uint) words)); + Eterm* hp = HAlloc(BIF_P ,3); + BIF_RET(TUPLE2(hp, make_small((Uint) words), + erts_ets_hash_sizeof_ext_segtab())); } else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) { /* Used by ets_SUITE (stdlib) */ diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 581b135233..3f7e14d15d 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -2977,6 +2977,11 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats) stats->kept_items = kept_items; } +/* For testing only */ +Eterm erts_ets_hash_sizeof_ext_segtab(void) +{ + return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1); +} /* For testing only */ Eterm erts_ets_hash_get_memstate(Process* p, DbTableHash* tb) { diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index e209037878..11bd6aa32a 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -107,6 +107,7 @@ typedef struct { }DbHashStats; void db_calc_stats_hash(DbTableHash* tb, DbHashStats*); +Eterm erts_ets_hash_sizeof_ext_segtab(void); Eterm erts_ets_hash_get_memstate(Process*, DbTableHash* tb); Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate); -- cgit v1.2.3 From 1573603177f5ba641390c67b0515defb2786ecfb Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 6 Sep 2016 17:42:30 +0200 Subject: erts: Tweak ets grow/shrink to keep up at contention --- erts/emulator/beam/erl_db_hash.c | 287 +++++++++++++++++++++------------------ erts/emulator/beam/erl_db_hash.h | 8 +- 2 files changed, 161 insertions(+), 134 deletions(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 3f7e14d15d..698e4c301b 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -194,6 +194,7 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, #ifdef ERTS_SMP # define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1) # define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck) +# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval)) /* Fine grained read lock */ static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval) @@ -330,7 +331,9 @@ struct segment { /* An extended segment table */ struct ext_segtab { +#ifdef ERTS_SMP ErtsThrPrgrLaterOp lop; +#endif struct segment** prev_segtab; /* Used when table is shrinking */ int prev_nsegs; /* Size of prev_segtab */ int nsegs; /* Size of this segtab */ @@ -355,14 +358,14 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, ** Forward decl's (static functions) */ static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix); -static int alloc_seg(DbTableHash *tb); +static void alloc_seg(DbTableHash *tb); static int free_seg(DbTableHash *tb, int free_records); static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, HashDbTerm *list); static HashDbTerm* search_list(DbTableHash* tb, Eterm key, HashValue hval, HashDbTerm *list); -static void shrink(DbTableHash* tb, int nactive); -static void grow(DbTableHash* tb, int nactive); +static void shrink(DbTableHash* tb, int nitems); +static void grow(DbTableHash* tb, int nitems); static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2, Uint sz, DbTableHash*); static int analyze_pattern(DbTableHash *tb, Eterm pattern, @@ -435,9 +438,10 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle); static ERTS_INLINE void try_shrink(DbTableHash* tb) { int nactive = NACTIVE(tb); - if (nactive > FIRST_SEGSZ && NITEMS(tb) < SHRINK_LIMIT(nactive) + int nitems = NITEMS(tb); + if (nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive) && !IS_FIXED(tb)) { - shrink(tb, nactive); + shrink(tb, nitems); } } @@ -839,7 +843,7 @@ Lnew: { int nactive = NACTIVE(tb); if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { - grow(tb, nactive); + grow(tb, nitems); } } CHECK_TABLES(); @@ -2417,7 +2421,7 @@ static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix) /* Extend table with one new segment */ -static int alloc_seg(DbTableHash *tb) +static void alloc_seg(DbTableHash *tb) { int seg_ix = SLOT_IX_TO_SEG_IX(tb->nslots); struct segment** segtab; @@ -2435,7 +2439,6 @@ static int alloc_seg(DbTableHash *tb) SIZEOF_SEGMENT(EXT_SEGSZ)); sys_memset(segtab[seg_ix], 0, SIZEOF_SEGMENT(EXT_SEGSZ)); tb->nslots += EXT_SEGSZ; - return 1; } #ifdef ERTS_SMP @@ -2506,9 +2509,6 @@ static int free_seg(DbTableHash *tb, int free_records) erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est, SIZEOF_EXT_SEGTAB(est->nsegs)); } - else { - - } } seg_sz = (seg_ix == 0) ? FIRST_SEGSZ : EXT_SEGSZ; erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb, segp, SIZEOF_SEGMENT(seg_sz)); @@ -2584,85 +2584,93 @@ done_resizing(DbTableHash* tb) #endif } -/* Grow table with one new bucket. +/* Grow table with one or more new buckets. ** Allocate new segment if needed. */ -static void grow(DbTableHash* tb, int nactive) +static void grow(DbTableHash* tb, int nitems) { HashDbTerm** pnext; HashDbTerm** to_pnext; HashDbTerm* p; erts_smp_rwmtx_t* lck; - int from_ix; + int nactive; + int from_ix, to_ix; int szm; + int loop_limit = 5; - if (!begin_resizing(tb)) - return; /* already in progress */ - if (NACTIVE(tb) != nactive) { - goto abort; /* already done (race) */ - } + do { + if (!begin_resizing(tb)) + return; /* already in progress */ + nactive = NACTIVE(tb); + if (nitems <= GROW_LIMIT(nactive)) { + goto abort; /* already done (race) */ + } - /* Ensure that the slot nactive exists */ - if (nactive == tb->nslots) { - /* Time to get a new segment */ - ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0); - if (!alloc_seg(tb)) goto abort; - } - ASSERT(nactive < tb->nslots); + /* Ensure that the slot nactive exists */ + if (nactive == tb->nslots) { + /* Time to get a new segment */ + ASSERT(((nactive-FIRST_SEGSZ) & EXT_SEGSZ_MASK) == 0); + alloc_seg(tb); + } + ASSERT(nactive < tb->nslots); - szm = erts_smp_atomic_read_nob(&tb->szm); - if (nactive <= szm) { - from_ix = nactive & (szm >> 1); - } else { - ASSERT(nactive == szm+1); - from_ix = 0; - szm = (szm<<1) | 1; - } + szm = erts_smp_atomic_read_nob(&tb->szm); + if (nactive <= szm) { + from_ix = nactive & (szm >> 1); + } else { + ASSERT(nactive == szm+1); + from_ix = 0; + szm = (szm<<1) | 1; + } + to_ix = nactive; + + lck = WLOCK_HASH(tb, from_ix); + ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix)); + /* Now a final double check (with the from_ix lock held) + * that we did not get raced by a table fixer. + */ + if (IS_FIXED(tb)) { + WUNLOCK_HASH(lck); + goto abort; + } + erts_smp_atomic_set_nob(&tb->nactive, ++nactive); + if (from_ix == 0) { + if (DB_USING_FINE_LOCKING(tb)) + erts_smp_atomic_set_relb(&tb->szm, szm); + else + erts_smp_atomic_set_nob(&tb->szm, szm); + } + done_resizing(tb); - lck = WLOCK_HASH(tb, from_ix); - /* Now a final double check (with the from_ix lock held) - * that we did not get raced by a table fixer. - */ - if (IS_FIXED(tb)) { - WUNLOCK_HASH(lck); - goto abort; - } - erts_smp_atomic_inc_nob(&tb->nactive); - if (from_ix == 0) { - if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_relb(&tb->szm, szm); - else - erts_smp_atomic_set_nob(&tb->szm, szm); - } - done_resizing(tb); + /* Finally, let's split the bucket. We try to do it in a smart way + to keep link order and avoid unnecessary updates of next-pointers */ + pnext = &BUCKET(tb, from_ix); + p = *pnext; + to_pnext = &BUCKET(tb, to_ix); + while (p != NULL) { + if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */ + *pnext = p->next; + free_term(tb, p); + p = *pnext; + } + else { + int ix = p->hvalue & szm; + if (ix != from_ix) { + ASSERT(ix == (from_ix ^ ((szm+1)>>1))); + *to_pnext = p; + /* Swap "from" and "to": */ + from_ix = ix; + to_pnext = pnext; + } + pnext = &p->next; + p = *pnext; + } + } + *to_pnext = NULL; + WUNLOCK_HASH(lck); - /* Finally, let's split the bucket. We try to do it in a smart way - to keep link order and avoid unnecessary updates of next-pointers */ - pnext = &BUCKET(tb, from_ix); - p = *pnext; - to_pnext = &BUCKET(tb, nactive); - while (p != NULL) { - if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */ - *pnext = p->next; - free_term(tb, p); - p = *pnext; - } - else { - int ix = p->hvalue & szm; - if (ix != from_ix) { - ASSERT(ix == (from_ix ^ ((szm+1)>>1))); - *to_pnext = p; - /* Swap "from" and "to": */ - from_ix = ix; - to_pnext = pnext; - } - pnext = &p->next; - p = *pnext; - } - } - *to_pnext = NULL; + }while (--loop_limit && nitems > GROW_LIMIT(nactive)); - WUNLOCK_HASH(lck); return; abort: @@ -2673,56 +2681,75 @@ abort: /* Shrink table by joining top bucket. ** Remove top segment if it gets empty. */ -static void shrink(DbTableHash* tb, int nactive) -{ - if (!begin_resizing(tb)) - return; /* already in progress */ - if (NACTIVE(tb) == nactive) { - erts_smp_rwmtx_t* lck; - int src_ix = nactive - 1; - int low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1; - int dst_ix = src_ix & low_szm; - - ASSERT(dst_ix < src_ix); - ASSERT(nactive > FIRST_SEGSZ); - lck = WLOCK_HASH(tb, dst_ix); - /* Double check for racing table fixers */ - if (!IS_FIXED(tb)) { - HashDbTerm** src_bp = &BUCKET(tb, src_ix); - HashDbTerm** dst_bp = &BUCKET(tb, dst_ix); - HashDbTerm** bp = src_bp; - - /* Q: Why join lists by appending "dst" at the end of "src"? - A: Must step through "src" anyway to purge pseudo deleted. */ - while(*bp != NULL) { - if ((*bp)->hvalue == INVALID_HASH) { - HashDbTerm* deleted = *bp; - *bp = deleted->next; - free_term(tb, deleted); - } else { - bp = &(*bp)->next; - } - } - *bp = *dst_bp; - *dst_bp = *src_bp; - *src_bp = NULL; - - erts_smp_atomic_set_nob(&tb->nactive, src_ix); - if (dst_ix == 0) { - erts_smp_atomic_set_relb(&tb->szm, low_szm); - } - WUNLOCK_HASH(lck); - - if (tb->nslots - src_ix >= EXT_SEGSZ) { - free_seg(tb, 0); - } - } - else { - WUNLOCK_HASH(lck); - } +static void shrink(DbTableHash* tb, int nitems) +{ + HashDbTerm** src_bp; + HashDbTerm** dst_bp; + HashDbTerm** bp; + erts_smp_rwmtx_t* lck; + int src_ix, dst_ix, low_szm; + int nactive; + int loop_limit = 5; - } - /*else already done */ + do { + if (!begin_resizing(tb)) + return; /* already in progress */ + nactive = NACTIVE(tb); + if (!(nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive))) { + goto abort; /* already done (race) */ + } + src_ix = nactive - 1; + low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1; + dst_ix = src_ix & low_szm; + + ASSERT(dst_ix < src_ix); + ASSERT(nactive > FIRST_SEGSZ); + lck = WLOCK_HASH(tb, dst_ix); + ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix)); + /* Double check for racing table fixers */ + if (IS_FIXED(tb)) { + WUNLOCK_HASH(lck); + goto abort; + } + + src_bp = &BUCKET(tb, src_ix); + dst_bp = &BUCKET(tb, dst_ix); + bp = src_bp; + + /* + * We join lists by appending "dst" at the end of "src" + * as we must step through "src" anyway to purge pseudo deleted. + */ + while(*bp != NULL) { + if ((*bp)->hvalue == INVALID_HASH) { + HashDbTerm* deleted = *bp; + *bp = deleted->next; + free_term(tb, deleted); + } else { + bp = &(*bp)->next; + } + } + *bp = *dst_bp; + *dst_bp = *src_bp; + *src_bp = NULL; + + nactive = src_ix; + erts_smp_atomic_set_nob(&tb->nactive, nactive); + if (dst_ix == 0) { + erts_smp_atomic_set_relb(&tb->szm, low_szm); + } + WUNLOCK_HASH(lck); + + if (tb->nslots - src_ix >= EXT_SEGSZ) { + free_seg(tb, 0); + } + done_resizing(tb); + + } while (--loop_limit + && nactive > FIRST_SEGSZ && nitems < SHRINK_LIMIT(nactive)); + return; + +abort: done_resizing(tb); } @@ -2892,7 +2919,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) nactive = NACTIVE(tb); if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) { - grow(tb, nactive); + grow(tb, nitems); } } else { WUNLOCK_HASH(lck); @@ -2997,7 +3024,6 @@ Eterm erts_ets_hash_get_memstate(Process* p, DbTableHash* tb) Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate) { int seg_cnt, target; - int nactive; if (!is_small(memstate)) return make_small(__LINE__); @@ -3009,7 +3035,6 @@ Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate) while (!begin_resizing(tb)) /*spin*/; seg_cnt = SLOT_IX_TO_SEG_IX(tb->nslots); - nactive = NACTIVE(tb); done_resizing(tb); if (target == seg_cnt) @@ -3017,9 +3042,9 @@ Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate) if (IS_FIXED(tb)) return make_small(__LINE__); if (target < seg_cnt) - shrink(tb, nactive); + shrink(tb, 0); else - grow(tb, nactive); + grow(tb, INT_MAX); } } @@ -3031,7 +3056,7 @@ void db_check_table_hash(DbTable *tbl) HashDbTerm* list; int j; - for (j = 0; j < tb->nactive; j++) { + for (j = 0; j < NACTIVE(tb); j++) { if ((list = BUCKET(tb,j)) != 0) { while (list != 0) { if (!is_tuple(make_tuple(list->dbterm.tpl))) { diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 11bd6aa32a..6d25c73549 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -50,17 +50,19 @@ typedef struct db_table_hash_fine_locks { typedef struct db_table_hash { DbTableCommon common; + /* SMP: szm and nactive are write-protected by is_resizing or table write lock */ + erts_smp_atomic_t szm; /* current size mask. */ + erts_smp_atomic_t nactive; /* Number of "active" slots */ + erts_smp_atomic_t segtab; /* The segment table (struct segment**) */ struct segment* first_segtab[1]; - erts_smp_atomic_t szm; /* current size mask. */ - + /* SMP: nslots and nsegs are protected by is_resizing or table write lock */ int nslots; /* Total number of slots */ int nsegs; /* Size of segment table */ /* List of slots where elements have been deleted while table was fixed */ erts_smp_atomic_t fixdel; /* (FixedDeletion*) */ - erts_smp_atomic_t nactive; /* Number of "active" slots */ #ifdef ERTS_SMP erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLocks* locks; -- cgit v1.2.3 From d100187bde4f3f555b4f3cd9561ec8c67a528895 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Tue, 13 Sep 2016 18:28:09 +0200 Subject: erts: Unify reduction count for ets:select to be per object as the other select-variants and not per table slot. --- erts/emulator/beam/erl_db_hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'erts/emulator/beam') diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 698e4c301b..40df1c5356 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -1494,6 +1494,7 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl, match_list = CONS(hp, match_res, match_list); ++got; } + --num_left; } current = current->next; } @@ -1511,7 +1512,6 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl, } } else { /* Key is variable */ - --num_left; if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) { slot_ix = -1; -- cgit v1.2.3