diff options
Diffstat (limited to 'erts/emulator')
47 files changed, 465 insertions, 3730 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index 2efbe2d57e..6040308aa7 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -136,13 +136,6 @@ ENABLE_ALLOC_TYPE_VARS += smp nofrag M4FLAGS += -DERTS_SMP=1 else -ifeq ($(FLAVOR),hybrid) -FLAVOR_MARKER=.hybrid -FLAVOR_FLAGS=-DHYBRID -ENABLE_ALLOC_TYPE_VARS += hybrid -else - - # If flavor isn't one of the above, it *is* plain flavor... override FLAVOR=plain FLAVOR_MARKER= @@ -151,7 +144,6 @@ ENABLE_ALLOC_TYPE_VARS += nofrag M4FLAGS += endif -endif TF_MARKER=$(TYPEMARKER)$(FLAVOR_MARKER) @@ -748,7 +740,7 @@ RUN_OBJS = \ $(OBJDIR)/register.o $(OBJDIR)/break.o \ $(OBJDIR)/erl_async.o $(OBJDIR)/erl_lock_check.o \ $(OBJDIR)/erl_gc.o $(OBJDIR)/erl_lock_count.o \ - $(OBJDIR)/erl_nmgc.o $(OBJDIR)/erl_posix_str.o \ + $(OBJDIR)/erl_posix_str.o \ $(OBJDIR)/erl_bits.o $(OBJDIR)/erl_math.o \ $(OBJDIR)/erl_fun.o $(OBJDIR)/erl_bif_port.o \ $(OBJDIR)/erl_term.o $(OBJDIR)/erl_node_tables.o \ diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 78c566ed38..106fad030b 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -244,7 +244,6 @@ atom gather_sched_wall_time_result atom getting_linked atom getting_unlinked atom global -atom global_heaps_size atom Gt='>' atom grun atom group_leader @@ -259,7 +258,6 @@ atom hide atom high atom hipe_architecture atom http httph https http_response http_request http_header http_eoh http_error http_bin httph_bin -atom hybrid atom id atom if_clause atom imports diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 78a9d76a20..ada2d152b7 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -419,10 +419,8 @@ check_process_code(Process* rp, Module* modp) Uint mod_size; BeamInstr* end; Eterm* sp; -#ifndef HYBRID /* FIND ME! */ struct erl_off_heap_header* oh; int done_gc = 0; -#endif #define INSIDE(a) (start <= (a) && (a) < end) @@ -481,7 +479,6 @@ check_process_code(Process* rp, Module* modp) * See if there are funs that refer to the old version of the module. */ -#ifndef HYBRID /* FIND ME! */ rescan: for (oh = MSO(rp).first; oh; oh = oh->next) { if (thing_subtag(oh->thing_word) == FUN_SUBTAG) { @@ -507,7 +504,6 @@ check_process_code(Process* rp, Module* modp) } } } -#endif /* * See if there are constants inside the module referenced by the process. diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 18a57931ae..6d3b15cd46 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -26,7 +26,6 @@ #include "erl_vm.h" #include "global.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "error.h" #include "bif.h" #include "big.h" @@ -253,20 +252,6 @@ void** beam_ops; extern int count_instructions; #endif -#if defined(HYBRID) -#define SWAPIN \ - g_htop = global_htop; \ - g_hend = global_hend; \ - HTOP = HEAP_TOP(c_p); \ - E = c_p->stop - -#define SWAPOUT \ - global_htop = g_htop; \ - global_hend = g_hend; \ - HEAP_TOP(c_p) = HTOP; \ - c_p->stop = E - -#else #define SWAPIN \ HTOP = HEAP_TOP(c_p); \ E = c_p->stop @@ -294,8 +279,6 @@ extern int count_instructions; #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p) -#endif - #ifdef FORCE_HEAP_FRAGS # define HEAP_SPACE_VERIFIED(Words) do { \ c_p->space_verified = (Words); \ @@ -457,36 +440,6 @@ extern int count_instructions; CHECK_TERM(r(0)); \ } while (0) -#ifdef HYBRID -#ifdef INCREMENTAL -#define TestGlobalHeap(Nh, Live, hp) \ - do { \ - unsigned need = (Nh); \ - ASSERT(global_heap <= g_htop && g_htop <= global_hend); \ - SWAPOUT; \ - reg[0] = r(0); \ - FCALLS -= need; \ - (hp) = IncAlloc(c_p,need,reg,(Live)); \ - r(0) = reg[0]; \ - SWAPIN; \ - } while (0) -#else -#define TestGlobalHeap(Nh, Live, hp) \ - do { \ - unsigned need = (Nh); \ - ASSERT(global_heap <= g_htop && g_htop <= global_hend); \ - if (g_hend - g_htop < need) { \ - SWAPOUT; \ - reg[0] = r(0); \ - FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \ - r(0) = reg[0]; \ - SWAPIN; \ - } \ - (hp) = global_htop; \ - } while (0) -#endif -#endif /* HYBRID */ - #define Init(N) make_blank(yb(N)) #define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0) @@ -1178,12 +1131,6 @@ void process_main(void) */ register Eterm* HTOP REG_htop = NULL; - -#ifdef HYBRID - Eterm *g_htop; - Eterm *g_hend; -#endif - /* Stack pointer. Grows downwards; points * to last item pushed (normally a saved * continuation pointer). @@ -6549,10 +6496,8 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) hp = funp->env; erts_refc_inc(&fe->refc, 2); funp->thing_word = HEADER_FUN; -#ifndef HYBRID /* FIND ME! */ funp->next = MSO(p).first; MSO(p).first = (struct erl_off_heap_header*) funp; -#endif funp->fe = fe; funp->num_free = num_free; funp->creator = p->id; diff --git a/erts/emulator/beam/benchmark.c b/erts/emulator/beam/benchmark.c index 7fbf44a03c..7ac14b8e8b 100644 --- a/erts/emulator/beam/benchmark.c +++ b/erts/emulator/beam/benchmark.c @@ -33,17 +33,6 @@ unsigned long long messages_copied; unsigned long long messages_ego; unsigned long long minor_gc; unsigned long long major_gc; -#ifdef HYBRID -unsigned long long minor_global_gc; -unsigned long long major_global_gc; -unsigned long long gc_in_copy; -#ifdef INCREMENTAL -unsigned long long minor_gc_cycles; -unsigned long long major_gc_cycles; -unsigned long long minor_gc_stages; -unsigned long long major_gc_stages; -#endif -#endif #endif /* BM_COUNTERS */ #ifdef BM_TIMERS @@ -191,17 +180,6 @@ void init_benchmarking() messages_ego = 0; minor_gc = 0; major_gc = 0; -#ifdef HYBRID - minor_global_gc = 0; - major_global_gc = 0; - gc_in_copy = 0; -#ifdef INCREMENTAL - minor_gc_cycles = 0; - major_gc_cycles = 0; - minor_gc_stages = 0; - major_gc_stages = 0; -#endif -#endif #endif /* BM_COUNTERS */ #ifdef BM_HEAP_SIZES @@ -243,16 +221,6 @@ void save_statistics() erts_fprintf(file,"Number of processes spawned: %lld\n",processes_spawned); erts_fprintf(file,"Number of local minor GCs: %lld\n",minor_gc); erts_fprintf(file,"Number of local major GCs: %lld\n",major_gc); -#ifdef HYBRID - erts_fprintf(file,"Number of global minor GCs: %lld\n",minor_global_gc); - erts_fprintf(file,"Number of global major GCs: %lld\n",major_global_gc); -#ifdef INCREMENTAL - erts_fprintf(file,"Number of minor GC-cycles: %lld\n",minor_gc_cycles); - erts_fprintf(file,"Number of major GC-cycles: %lld\n",major_gc_cycles); - erts_fprintf(file,"Number of minor GC-stages: %lld\n",minor_gc_stages); - erts_fprintf(file,"Number of major GC-stages: %lld\n",major_gc_stages); -#endif -#endif erts_fprintf(file,"Number of messages sent: %lld\n",messages_sent); erts_fprintf(file,"Number of messages copied: %lld\n",messages_copied); erts_fprintf(file,"Number of messages sent to self: %lld\n",messages_ego); diff --git a/erts/emulator/beam/benchmark.h b/erts/emulator/beam/benchmark.h index eedb06a1b6..003e821bce 100644 --- a/erts/emulator/beam/benchmark.h +++ b/erts/emulator/beam/benchmark.h @@ -99,17 +99,6 @@ extern unsigned long long messages_copied; extern unsigned long long messages_ego; extern unsigned long long minor_gc; extern unsigned long long major_gc; -#ifdef HYBRID -extern unsigned long long minor_global_gc; -extern unsigned long long major_global_gc; -extern unsigned long long gc_in_copy; -#ifdef INCREMENTAL -extern unsigned long long minor_gc_cycles; -extern unsigned long long major_gc_cycles; -extern unsigned long long minor_gc_stages; -extern unsigned long long major_gc_stages; -#endif -#endif #define BM_COUNT(var) (var)++; diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 39d4582435..fc00b42454 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -3517,22 +3517,6 @@ BIF_RETTYPE garbage_collect_0(BIF_ALIST_0) } /**********************************************************************/ -/* Perform garbage collection of the message area */ - -BIF_RETTYPE garbage_collect_message_area_0(BIF_ALIST_0) -{ -#if defined(HYBRID) && !defined(INCREMENTAL) - int reds = 0; - - FLAGS(BIF_P) |= F_NEED_FULLSWEEP; - reds = erts_global_garbage_collect(BIF_P, 0, NULL, 0); - BIF_RET2(am_true, reds); -#else - BIF_RET(am_false); -#endif -} - -/**********************************************************************/ /* Return a list of active ports */ BIF_RETTYPE ports_0(BIF_ALIST_0) diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 8a85e102d1..797bce43ab 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -99,8 +99,6 @@ bif erlang:garbage_collect/0 bif 'erl.system':garbage_collect/0 ebif_garbage_collect_0 bif erlang:garbage_collect/1 bif 'erl.system':garbage_collect/1 ebif_garbage_collect_1 -bif erlang:garbage_collect_message_area/0 -bif 'erl.system':garbage_collect_message_area/0 ebif_garbage_collect_message_area_0 bif erlang:get/0 bif 'erl.lang.proc.pdict':get/0 ebif_get_0 bif erlang:get/1 diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index d7345c2f54..36eda04de2 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -26,30 +26,13 @@ #include "global.h" #include "erl_process.h" #include "erl_gc.h" -#include "erl_nmgc.h" #include "big.h" #include "erl_binary.h" #include "erl_bits.h" #include "dtrace-wrapper.h" -#ifdef HYBRID -MA_STACK_DECLARE(src); -MA_STACK_DECLARE(dst); -MA_STACK_DECLARE(offset); -#endif - static void move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap*); -void -init_copy(void) -{ -#ifdef HYBRID - MA_STACK_ALLOC(src); - MA_STACK_ALLOC(dst); - MA_STACK_ALLOC(offset); -#endif -} - /* * Copy object "obj" to process p. */ @@ -432,12 +415,10 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) while (i--) { *htop++ = *objp++; } -#ifndef HYBRID /* FIND ME! */ funp = (ErlFunThing *) tp; funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*) funp; erts_refc_inc(&funp->fe->refc, 2); -#endif *argp = make_fun_rel(tp, dst_base); } break; @@ -500,420 +481,6 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) return res; } -#ifdef HYBRID - -#ifdef BM_MESSAGE_SIZES -# define BM_ADD(var,val) (var) += (val); -#else -# define BM_ADD(var,val) -#endif - -#ifdef DEBUG -# define CLEARMEM(PTR,SIZE) memset(PTR,0,SIZE*sizeof(Eterm)) -#else -# define CLEARMEM(PTR,SIZE) -#endif - -#ifdef INCREMENTAL -#define GlobalAlloc(p, need, hp) \ -do { \ - Uint n = (need); \ - BM_ADD(words_copied,n); \ - BM_SWAP_TIMER(copy,system); \ - /* If a new collection cycle is started during copy, the message * \ - * will end up in the old generation and all allocations * \ - * thereafter must go directly into the old generation. */ \ - if (alloc_old) { \ - erts_incremental_gc((p),n,&dest,1); \ - (hp) = erts_inc_alloc(n); \ - } else { \ - (hp) = IncAlloc((p),n,&dest,1); \ - if (ma_gc_flags & GC_CYCLE_START) { \ - alloc_old = 1; \ - global_htop = global_heap; \ - (hp) = erts_inc_alloc(n); \ - } \ - } \ - CLEARMEM((hp),(n)); \ - BM_SWAP_TIMER(system,copy); \ -} while(0) - -#else /* no INCREMELNTAL */ - -#define GlobalAlloc(p, need, hp) \ -do { \ - Uint n = (need); \ - total_need += n; \ - if (total_need >= global_heap_sz) \ - erl_exit(ERTS_ABORT_EXIT, "Copying a message (%d words) larger than the nursery simply won't work...\n", total_need); \ - if (global_hend - n < global_htop) { \ - BM_SWAP_TIMER(copy,system); \ - erts_global_garbage_collect((p),total_need,NULL,0); \ - BM_SWAP_TIMER(system,copy); \ - total_need = 0; \ - ma_src_top = 0; \ - ma_dst_top = 0; \ - ma_offset_top = 0; \ - goto copy_start; \ - } \ - (hp) = global_htop; \ - global_htop += n; \ - BM_ADD(words_copied,n); \ -} while(0) -#endif /* INCREMENTAL */ - -/* Copy a message to the message area. */ -Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs) -{ - Eterm obj; - Eterm dest; -#ifdef INCREMENTAL - int alloc_old = 0; -#else - int total_need = 0; -#endif - - VERBOSE(DEBUG_MESSAGES, - ("COPY START; %T is sending a message @ 0x%016x\n%T\n", - from->id, orig, orig)); - -#ifndef INCREMENTAL - copy_start: -#endif - MA_STACK_PUSH(src,orig); - MA_STACK_PUSH(dst,&dest); - MA_STACK_PUSH(offset,offs); - - while (ma_src_top > 0) { - obj = MA_STACK_POP(src); - - /* copy_struct_lazy should never be called with something that - * do not need to be copied. Within the loop, nothing that do - * not need copying should be placed in the src-stack. - */ - ASSERT(!NO_COPY(obj)); - - switch (primary_tag(obj)) { - case TAG_PRIMARY_LIST: { - Eterm *hp; - Eterm *objp; - - GlobalAlloc(from,2,hp); - objp = list_val(obj); - - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_list(hp)); - MA_STACK_POP(dst); - - /* TODO: Byt ordningen nedan så att CDR pushas först. */ - - if (NO_COPY(*objp)) { - hp[0] = *objp; -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,2); -#endif - } else { - MA_STACK_PUSH(src,*objp); - MA_STACK_PUSH(dst,hp); - MA_STACK_PUSH(offset,0); - } - - objp++; - - if (NO_COPY(*objp)) { - hp[1] = *objp; -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,2); -#endif - } - else { - MA_STACK_PUSH(src,*objp); - MA_STACK_PUSH(dst,hp); - MA_STACK_PUSH(offset,1); - } - continue; - } - - case TAG_PRIMARY_BOXED: { - Eterm *objp = boxed_val(obj); - - switch (*objp & _TAG_HEADER_MASK) { - case ARITYVAL_SUBTAG: { - Uint ari = arityval(*objp); - Uint i; - Eterm *hp; - GlobalAlloc(from,ari + 1,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp)); - MA_STACK_POP(dst); - *hp = *objp++; - for (i = 1; i <= ari; i++) { - switch (primary_tag(*objp)) { - case TAG_PRIMARY_LIST: - case TAG_PRIMARY_BOXED: - if (NO_COPY(*objp)) { - hp[i] = *objp; -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp), - inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,BOXED_NEED(hp,*hp)); -#endif - objp++; - } else { - MA_STACK_PUSH(src,*objp++); - MA_STACK_PUSH(dst,hp); - MA_STACK_PUSH(offset,i); - } - break; - default: - hp[i] = *objp++; - } - } - continue; - } - - case REFC_BINARY_SUBTAG: { - ProcBin *pb; - Uint i = thing_arityval(*objp) + 1; - Eterm *hp; - GlobalAlloc(from,i,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_binary(hp)); - MA_STACK_POP(dst); - pb = (ProcBin*) hp; - while (i--) { - *hp++ = *objp++; - } - erts_refc_inc(&pb->val->refc, 2); - pb->next = erts_global_offheap.first; - erts_global_offheap.first = pb; - OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm)); - continue; - } - - case FUN_SUBTAG: { - ErlFunThing *funp = (ErlFunThing*) objp; - Uint i = thing_arityval(*objp) + 1; - Uint j = i + 1 + funp->num_free; - Uint k = i; - Eterm *hp, *hp_start; - GlobalAlloc(from,j,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - hp_start = hp; - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_fun(hp)); - MA_STACK_POP(dst); - funp = (ErlFunThing*) hp; - while (i--) { - *hp++ = *objp++; - } -#ifndef HYBRID /* FIND ME! */ - funp->next = erts_global_offheap.first; - erts_global_offheap.first = funp; - erts_refc_inc(&funp->fe->refc, 2); -#endif - for (i = k; i < j; i++) { - switch (primary_tag(*objp)) { - case TAG_PRIMARY_LIST: - case TAG_PRIMARY_BOXED: - if (NO_COPY(*objp)) { -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp), - inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,BOXED_NEED(hp,*hp)); -#endif - *hp++ = *objp++; - } else { - MA_STACK_PUSH(src,*objp++); - MA_STACK_PUSH(dst,hp_start); - MA_STACK_PUSH(offset,i); - hp++; - } - break; - default: - *hp++ = *objp++; - } - } - continue; - } - - case EXTERNAL_PID_SUBTAG: - case EXTERNAL_PORT_SUBTAG: - case EXTERNAL_REF_SUBTAG: { - ExternalThing *etp; - Uint i = thing_arityval(*objp) + 1; - Eterm *hp; - GlobalAlloc(from,i,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_external(hp)); - MA_STACK_POP(dst); - etp = (ExternalThing*) hp; - while (i--) { - *hp++ = *objp++; - } - - etp->next = erts_global_offheap.first; - erts_global_offheap.first = etp; - erts_refc_inc(&etp->node->refc, 2); - continue; - } - - case SUB_BINARY_SUBTAG: { - ErlSubBin *sb = (ErlSubBin *) objp; - Eterm *hp; - Eterm res_binary; - Eterm real_bin = sb->orig; - Uint bit_offset = sb->bitoffs; - Uint bit_size = sb -> bitsize; - Uint sub_offset = sb->offs; - size_t size = sb->size; - Uint extra_bytes; - Uint real_size; - Uint sub_binary_heapneed; - if ((bit_size + bit_offset) > 8) { - extra_bytes = 2; - sub_binary_heapneed = ERL_SUB_BIN_SIZE; - } else if ((bit_size + bit_offset) > 0) { - extra_bytes = 1; - sub_binary_heapneed = ERL_SUB_BIN_SIZE; - } else { - extra_bytes = 0; - sub_binary_heapneed = 0; - } - - real_size = size+extra_bytes; - objp = binary_val(real_bin); - if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) { - ErlHeapBin *from_bin; - ErlHeapBin *to_bin; - Uint i = heap_bin_size(real_size); - GlobalAlloc(from,i+sub_binary_heapneed,hp); - from_bin = (ErlHeapBin *) objp; - to_bin = (ErlHeapBin *) hp; - to_bin->thing_word = header_heap_bin(real_size); - to_bin->size = real_size; - sys_memcpy(to_bin->data, ((byte *)from_bin->data) + - sub_offset, real_size); - res_binary = make_binary(to_bin); - hp += i; - } else { - ProcBin *from_bin; - ProcBin *to_bin; - - ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG); - from_bin = (ProcBin *) objp; - erts_refc_inc(&from_bin->val->refc, 2); - GlobalAlloc(from,PROC_BIN_SIZE+sub_binary_heapneed,hp); - to_bin = (ProcBin *) hp; - to_bin->thing_word = HEADER_PROC_BIN; - to_bin->size = real_size; - to_bin->val = from_bin->val; - to_bin->bytes = from_bin->bytes + sub_offset; - to_bin->next = erts_global_offheap.first; - erts_global_offheap.first = to_bin; - OH_OVERHEAD(&erts_global_offheap, to_bin->size / sizeof(Eterm)); - res_binary=make_binary(to_bin); - hp += PROC_BIN_SIZE; - } - if (extra_bytes != 0) { - ErlSubBin* res; - res = (ErlSubBin *) hp; - res->thing_word = HEADER_SUB_BIN; - res->size = size; - res->bitsize = bit_size; - res->bitoffs = bit_offset; - res->offs = 0; - res->is_writable = 0; - res->orig = res_binary; - res_binary = make_binary(hp); - } - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),res_binary); - MA_STACK_POP(dst); - continue; - } - - case BIN_MATCHSTATE_SUBTAG: - erl_exit(ERTS_ABORT_EXIT, - "copy_struct_lazy: matchstate term not allowed"); - - default: { - Uint size = thing_arityval(*objp) + 1; - Eterm *hp; - GlobalAlloc(from,size,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_boxed(hp)); - MA_STACK_POP(dst); - while (size--) { - *hp++ = *objp++; - } - continue; - } - } - continue; - } - - case TAG_PRIMARY_HEADER: - ASSERT((obj & _TAG_HEADER_MASK) == ARITYVAL_SUBTAG); - { - Eterm *objp = &obj; - Uint ari = arityval(obj); - Uint i; - Eterm *hp; - GlobalAlloc(from,ari + 1,hp); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp)); - MA_STACK_POP(dst); - *hp = *objp++; - for (i = 1; i <= ari; i++) { - switch (primary_tag(*objp)) { - case TAG_PRIMARY_LIST: - case TAG_PRIMARY_BOXED: - if (NO_COPY(*objp)) { -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,ari + 1); -#endif - hp[i] = *objp++; - } else { - MA_STACK_PUSH(src,*objp++); - MA_STACK_PUSH(dst,hp); - MA_STACK_PUSH(offset,i); - } - break; - default: - hp[i] = *objp++; - } - } - continue; - } - - default: - erl_exit(ERTS_ABORT_EXIT, - "%s, line %d: Internal error in copy_struct_lazy: 0x%08x\n", - __FILE__, __LINE__,obj); - } - } - - VERBOSE(DEBUG_MESSAGES, - ("Copy allocated @ 0x%08lx:\n%T\n", - (unsigned long)ptr_val(dest),dest)); - - ma_gc_flags &= ~GC_CYCLE_START; - - ASSERT(eq(orig, dest)); - ASSERT(ma_src_top == 0); - ASSERT(ma_dst_top == 0); - ASSERT(ma_offset_top == 0); - return dest; -} - -#undef NO_COPY -#endif /* HYBRID */ - /* * Copy a term that is guaranteed to be contained in a single * heap block. The heap block is copied word by word, and any diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 7c75c9fdb7..01fd63d5d9 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -1582,11 +1582,9 @@ int erts_net_message(Port *prt, } erts_cleanup_offheap(&off_heap); -#ifndef HYBRID /* FIND ME! */ if (ctl != ctl_default) { erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl); } -#endif UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); ERTS_SMP_CHK_NO_PROC_LOCKS; return 0; @@ -1599,11 +1597,9 @@ int erts_net_message(Port *prt, data_error: PURIFY_MSG("data error"); erts_cleanup_offheap(&off_heap); -#ifndef HYBRID /* FIND ME! */ if (ctl != ctl_default) { erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl); } -#endif UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); erts_do_exit_port(prt, dep->cid, am_killed); ERTS_SMP_CHK_NO_PROC_LOCKS; diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 8130d5c576..6fce032f9d 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -2138,9 +2138,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg) tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0); } tmp += erts_max_processes*sizeof(Process*); -#ifdef HYBRID - tmp += erts_max_processes*sizeof(Process*); -#endif tmp += erts_bif_timer_memory_size(); tmp += erts_tot_link_lh_size(); diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index bba6b83ac6..d4ef9cc553 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -317,19 +317,6 @@ type ACTIVE_PROCS STANDARD PROCESSES active_procs +endif -+if hybrid - -type ACTIVE_PROCS STANDARD PROCESSES active_procs - -# Used for all memory involved in incremental gc of the message area -# that is, young (x2) and old generation, forwarding pointers and blackmap -type MESSAGE_AREA LONG_LIVED PROCESSES message_area - -# Used in MA_STACK (global.h) and INC_STORAGE (erl_nmgc.h) -type OBJECT_STACK STANDARD PROCESSES object_stack - -+endif - +if smp type SL_PTIMER SHORT_LIVED SYSTEM ptimer_sl type LL_PTIMER STANDARD SYSTEM ptimer_ll diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c index f321ed21aa..cb975d64b0 100644 --- a/erts/emulator/beam/erl_async.c +++ b/erts/emulator/beam/erl_async.c @@ -253,7 +253,9 @@ erts_get_async_ready_queue(Uint sched_id) static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q) { +#ifdef USE_VM_PROBES int len; +#endif if (is_internal_port(a->port)) { #if ERTS_USE_ASYNC_READY_Q @@ -291,7 +293,9 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, int saved_fin_deq = 0; ErtsThrQFinDeQ_t fin_deq; #endif +#ifdef USE_VM_PROBES int len; +#endif while (1) { ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q); diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 2373dc7af4..f56b00287f 100755 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -25,7 +25,6 @@ #include "erl_vm.h" #include "global.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "error.h" #include "erl_driver.h" #include "bif.h" @@ -89,12 +88,6 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE #ifdef ERTS_ENABLE_KERNEL_POLL " [kernel-poll:%s]" #endif -#ifdef HYBRID - " [hybrid heap]" -#endif -#ifdef INCREMENTAL - " [incremental GC]" -#endif #ifdef ET_DEBUG #if ET_DEBUG " [type-assertions]" @@ -576,9 +569,6 @@ static Eterm pi_args[] = { am_min_bin_vheap_size, am_current_location, am_current_stacktrace, -#ifdef HYBRID - am_message_binary -#endif }; #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm))) @@ -626,9 +616,6 @@ pi_arg2ix(Eterm arg) case am_min_bin_vheap_size: return 28; case am_current_location: return 29; case am_current_stacktrace: return 30; -#ifdef HYBRID - case am_message_binary: return 31; -#endif default: return -1; } } @@ -1081,12 +1068,8 @@ process_info_aux(Process *BIF_P, if (rp != BIF_P) { Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp); if (is_value(msg)) { - mq[i].copy_struct_size = (is_immed(msg) -#ifdef HYBRID - || NO_COPY(msg) -#endif - ? 0 - : size_object(msg)); + mq[i].copy_struct_size = (is_immed(msg)? 0 : + size_object(msg)); } else if (mq[i].msgp->data.attached) { mq[i].copy_struct_size @@ -1526,16 +1509,6 @@ process_info_aux(Process *BIF_P, break; } -#ifdef HYBRID - case am_message_binary: { - Uint sz = 3; - (void) bld_bin_list(NULL, &sz, erts_global_offheap.mso); - hp = HAlloc(BIF_P, sz); - res = bld_bin_list(&hp, NULL, erts_global_offheap.mso); - break; - } -#endif - case am_sequential_trace_token: res = copy_object(rp->seq_trace_token, BIF_P); hp = HAlloc(BIF_P, 3); @@ -2354,36 +2327,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) #endif } else if (BIF_ARG_1 == am_heap_sizes) { return erts_heap_sizes(BIF_P); - } else if (BIF_ARG_1 == am_global_heaps_size) { -#ifdef HYBRID - Uint hsz = 0; - Uint sz = 0; - - sz += global_heap_sz; -#ifdef INCREMENTAL - /* The size of the old generation is a bit hard to define here... - * The amount of live data in the last collection perhaps..? */ - sz = 0; -#else - if (global_old_hend && global_old_heap) - sz += global_old_hend - global_old_heap; -#endif - - sz *= sizeof(Eterm); - - (void) erts_bld_uint(NULL, &hsz, sz); - hp = hsz ? HAlloc(BIF_P, hsz) : NULL; - res = erts_bld_uint(&hp, NULL, sz); -#else - res = make_small(0); -#endif - return res; } else if (BIF_ARG_1 == am_heap_type) { -#if defined(HYBRID) - return am_hybrid; -#else return am_private; -#endif } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) { res = erts_get_cpu_topology_term(BIF_P, am_used); BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res); diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c index 5525426824..2a1b01b107 100644 --- a/erts/emulator/beam/erl_bif_port.c +++ b/erts/emulator/beam/erl_bif_port.c @@ -265,7 +265,7 @@ port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3) Eterm res; Sint result_size; Eterm *hp; - Eterm *hp_end; /* To satisfy hybrid heap architecture */ + Eterm *hp_end; unsigned ret_flags = 0U; int fpe_was_unmasked; diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index c726be5fb4..2fea4671e1 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -105,7 +105,20 @@ #define NSEG_2 256 /* Size of second segment table */ #define NSEG_INC 128 /* Number of segments to grow after that */ -#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read_ddrb(&(tb)->segtab)) +#ifdef ERTS_SMP +# define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED) +#else +# define DB_USING_FINE_LOCKING(TB) 0 +#endif + +#ifdef ETHR_ORDERED_READ_DEPEND +#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)) +#else +#define SEGTAB(tb) \ + (DB_USING_FINE_LOCKING(tb) \ + ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \ + : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))) +#endif #define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive)) #define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) @@ -122,7 +135,9 @@ */ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval) { - Uint mask = erts_smp_atomic_read_acqb(&tb->szm); + Uint mask = (DB_USING_FINE_LOCKING(tb) + ? erts_smp_atomic_read_acqb(&tb->szm) + : erts_smp_atomic_read_nob(&tb->szm)); Uint ix = hval & mask; if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) { ix &= mask>>1; @@ -319,7 +334,10 @@ struct ext_segment { static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, struct segment** segtab) { - erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); + if (DB_USING_FINE_LOCKING(tb)) + erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); + else + erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); #ifdef VALGRIND tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab); #endif @@ -2501,6 +2519,28 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2, return list; } +static ERTS_INLINE int +begin_resizing(DbTableHash* tb) +{ + if (DB_USING_FINE_LOCKING(tb)) + return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1); + else { + if (erts_smp_atomic_read_nob(&tb->is_resizing)) + return 0; + erts_smp_atomic_set_nob(&tb->is_resizing, 1); + return 1; + } +} + +static ERTS_INLINE void +done_resizing(DbTableHash* tb) +{ + if (DB_USING_FINE_LOCKING(tb)) + erts_smp_atomic_set_relb(&tb->is_resizing, 0); + else + erts_smp_atomic_set_nob(&tb->is_resizing, 0); +} + /* Grow table with one new bucket. ** Allocate new segment if needed. */ @@ -2513,9 +2553,8 @@ static void grow(DbTableHash* tb, int nactive) int from_ix; int szm; - if (erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1)) { + if (!begin_resizing(tb)) return; /* already in progress */ - } if (NACTIVE(tb) != nactive) { goto abort; /* already done (race) */ } @@ -2547,9 +2586,12 @@ static void grow(DbTableHash* tb, int nactive) } erts_smp_atomic_inc_nob(&tb->nactive); if (from_ix == 0) { - erts_smp_atomic_set_relb(&tb->szm, szm); + if (DB_USING_FINE_LOCKING(tb)) + erts_smp_atomic_set_relb(&tb->szm, szm); + else + erts_smp_atomic_set_nob(&tb->szm, szm); } - erts_smp_atomic_set_relb(&tb->is_resizing, 0); + done_resizing(tb); /* Finally, let's split the bucket. We try to do it in a smart way to keep link order and avoid unnecessary updates of next-pointers */ @@ -2581,7 +2623,7 @@ static void grow(DbTableHash* tb, int nactive) return; abort: - erts_smp_atomic_set_relb(&tb->is_resizing, 0); + done_resizing(tb); } @@ -2590,9 +2632,8 @@ abort: */ static void shrink(DbTableHash* tb, int nactive) { - if (erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1)) { + if (!begin_resizing(tb)) return; /* already in progress */ - } if (NACTIVE(tb) == nactive) { erts_smp_rwmtx_t* lck; int src_ix = nactive - 1; @@ -2639,7 +2680,7 @@ static void shrink(DbTableHash* tb, int nactive) } /*else already done */ - erts_smp_atomic_set_relb(&tb->is_resizing, 0); + done_resizing(tb); } diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c index d7d6fcf0a2..2121f72fd2 100644 --- a/erts/emulator/beam/erl_debug.c +++ b/erts/emulator/beam/erl_debug.c @@ -25,7 +25,6 @@ #include "erl_vm.h" #include "global.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "big.h" #include "bif.h" #include "beam_catches.h" @@ -33,34 +32,9 @@ #define WITHIN(ptr, x, y) ((x) <= (ptr) && (ptr) < (y)) -#if defined(HYBRID) -#if defined(INCREMENTAL) -/* Hybrid + Incremental */ -#define IN_HEAP(p, ptr) \ - (WITHIN((ptr), p->heap, p->hend) || \ - (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))) || \ - WITHIN((ptr), global_heap, global_hend) || \ - (inc_fromspc && WITHIN((ptr), inc_fromspc, inc_fromend)) || \ - WITHIN((ptr), global_old_heap, global_old_hend)) - -#define IN_MA(ptr) \ - (WITHIN((ptr), global_heap, global_hend) || \ - (inc_fromspc && WITHIN((ptr), inc_fromspc, inc_fromend)) || \ - WITHIN((ptr), global_old_heap, global_old_hend)) -#else -/* Hybrid */ -#define IN_HEAP(p, ptr) \ - (WITHIN((ptr), p->heap, p->hend) || \ - (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))) || \ - WITHIN((ptr), global_heap, global_hend) || \ - (global_old_heap && WITHIN((ptr),global_old_heap,global_old_hend))) -#endif -#else -/* Private */ #define IN_HEAP(p, ptr) \ (WITHIN((ptr), p->heap, p->hend) || \ (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p)))) -#endif #ifdef __GNUC__ @@ -266,13 +240,6 @@ static int verify_eterm(Process *p,Eterm element) } } } -#ifdef INCREMENTAL - else { - if (IN_MA(ptr)) - return 1; - } -#endif - return 0; } @@ -447,51 +414,12 @@ void verify_process(Process *p) VERIFY_ETERM("fvalue",p->fvalue); VERIFY_ETERM("ftrace",p->ftrace); -#ifdef HYBRID - VERIFY_AREA("rrma",p->rrma,p->nrr); -#endif - VERBOSE(DEBUG_MEMORY,("...done\n")); #undef VERIFY_AREA #undef VERIFY_ETERM } -void verify_everything() -{ -#ifdef HYBRID - Uint i; - Uint n = erts_num_active_procs; - -#ifdef INCREMENTAL_FREE_SIZES_NEEDS_TO_BE_TAGGED_AS_HEADERS_WITH_ARITY - INC_Page *page = inc_used_mem; -#endif - - for (i = 0; i < n; i++) { - verify_process(erts_active_procs[i]); - } - - erts_check_memory(NULL,global_heap,global_htop); - -#ifdef INCREMENTAL_FREE_SIZES_NEEDS_TO_BE_TAGGED_AS_HEADERS_WITH_ARITY - while (page) - { - Eterm *end = page + INC_PAGE_SIZE; - Eterm *pos = page->start; - - while( pos < end) { - Eterm val = *pos++; - if(is_header(val)) - pos += thing_arityval(val); - else - verify_eterm(NULL,val); - } - page = page->next; - } -#endif -#endif /* HYBRID */ -} - /* * print_untagged_memory will print the contents of given memory area. */ @@ -582,83 +510,6 @@ void print_tagged_memory(Eterm *pos, Eterm *end) erts_printf("+-%s-+-%s-+\n",dashes,dashes); } -#ifdef HYBRID -void print_ma_info(void) -{ - erts_printf("Message Area (start - top - end): " - "0x%0*lx - 0x%0*lx - 0x%0*lx\n", - PTR_SIZE, (unsigned long)global_heap, - PTR_SIZE, (unsigned long)global_htop, - PTR_SIZE, (unsigned long)global_hend); -#ifndef INCREMENTAL - erts_printf(" High water: 0x%0*lx " - "Old gen: 0x%0*lx - 0x%0*lx - 0x%0*lx\n", - PTR_SIZE, (unsigned long)global_high_water, - PTR_SIZE, (unsigned long)global_old_heap, - PTR_SIZE, (unsigned long)global_old_htop, - PTR_SIZE, (unsigned long)global_old_hend); -#endif -} - -void print_message_area(void) -{ - Eterm *pos = global_heap; - Eterm *end = global_htop; - - erts_printf("From: 0x%0*lx to 0x%0*lx\n", - PTR_SIZE,(unsigned long)pos,PTR_SIZE,(unsigned long)end); - erts_printf("(Old generation: 0x%0*lx to 0x%0*lx\n", - PTR_SIZE, (unsigned long)global_old_heap, - PTR_SIZE, (unsigned long)global_old_hend); - erts_printf("| %-*s | %-*s |\n",PTR_SIZE,"Address",PTR_SIZE,"Contents"); - erts_printf("|-%s-|-%s-|\n",dashes,dashes); - while( pos < end ) { - Eterm val = pos[0]; - erts_printf("| 0x%0*lx | 0x%0*lx | ", - PTR_SIZE,(unsigned long)pos,PTR_SIZE,(unsigned long)val); - ++pos; - if( is_arity_value(val) ) { - erts_printf("Arity(%lu)", arityval(val)); - } else if( is_thing(val) ) { - unsigned int ari = thing_arityval(val); - erts_printf("Thing Arity(%u) Tag(%lu)", ari, thing_subtag(val)); - while( ari ) { - erts_printf("\n| 0x%0*lx | 0x%0*lx | THING", - PTR_SIZE, (unsigned long)pos, - PTR_SIZE, (unsigned long)*pos); - ++pos; - --ari; - } - } else - erts_printf("%.30T", val); - erts_printf("\n"); - } - erts_printf("+-%s-+-%s-+\n",dashes,dashes); -} - -void check_message_area() -{ - Eterm *pos = global_heap; - Eterm *end = global_htop; - - while( pos < end ) { - Eterm val = *pos++; - if(is_header(val)) - pos += thing_arityval(val); - else if(!is_immed(val)) - if ((ptr_val(val) < global_heap || ptr_val(val) >= global_htop) && - (ptr_val(val) < global_old_heap || - ptr_val(val) >= global_old_hend)) - { - erts_printf("check_message_area: Stray pointer found\n"); - print_message_area(); - erts_printf("Crashing to make it look real...\n"); - pos = 0; - } - } -} -#endif /* HYBRID */ - static void print_process_memory(Process *p); static void print_process_memory(Process *p) { @@ -703,19 +554,6 @@ static void print_process_memory(Process *p) erts_printf(" Fvalue: 0x%0*lx\n",PTR_SIZE,p->fvalue); erts_printf(" Ftrace: 0x%0*lx\n",PTR_SIZE,p->ftrace); -#ifdef HYBRID - if (p->nrr > 0) { - int i; - erts_printf(" Remembered Roots:\n"); - for (i = 0; i < p->nrr; i++) - if (p->rrsrc[i] != NULL) - erts_printf("0x%0*lx -> 0x%0*lx\n", - PTR_SIZE, (unsigned long)p->rrsrc[i], - PTR_SIZE, (unsigned long)p->rrma[i]); - erts_printf("\n"); - } -#endif - erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx %s-%s-+\n", PTR_SIZE, "Stack", PTR_SIZE, (unsigned long)STACK_TOP(p), @@ -757,92 +595,6 @@ void print_memory(Process *p) if (p != NULL) { print_process_memory(p); } -#ifdef HYBRID - else { - Uint i; - Uint n = erts_num_active_procs; - - for (i = 0; i < n; i++) { - Process *p = erts_active_procs[i]; - print_process_memory(p); - } - - erts_printf("==================\n"); - erts_printf("|| Message area ||\n"); - erts_printf("==================\n"); - erts_printf("+-%s-+-%s-%s-%s-%s-+\n", - dashes,dashes,dashes,dashes,dashes); - erts_printf("| %-*s | 0x%0*lx - 0x%0*lx - 0x%0*lx%*s|\n", - PTR_SIZE, "Young", - PTR_SIZE, (unsigned long)global_heap, - PTR_SIZE, (unsigned long)global_htop, - PTR_SIZE, (unsigned long)global_hend, - PTR_SIZE, ""); - erts_printf("+-%s-+-%s-%s-%s-%s-+\n", - dashes,dashes,dashes,dashes,dashes); - - print_untagged_memory(global_heap,global_htop); - - - erts_printf("+-%s-+-%s-%s-%s-%s-+\n", - dashes,dashes,dashes,dashes,dashes); - erts_printf("| %-*s | 0x%0*lx - 0x%0*lx %*s |\n", - PTR_SIZE, "Old", - PTR_SIZE, (unsigned long)global_old_heap, - PTR_SIZE, (unsigned long)global_old_hend, - 2 * PTR_SIZE, ""); - erts_printf("+-%s-+-%s-%s-%s-%s-+\n", - dashes,dashes,dashes,dashes,dashes); - -#ifdef INCREMENTAL - { - INC_Page *page = inc_used_mem; - /* Genom att g� igenom fri-listan f�rst kan vi markera de - omr�den som inte �r allokerade och bara skriva ut de som - lever. - char markarea[INC_PAGESIZE]; - */ - - while (page) { - Eterm *ptr = (Eterm*)page->start; - Eterm *end = (Eterm*)page->start + INC_PAGESIZE; - - erts_printf("| %*s | This: 0x%0*lx Next: 0x%0*lx %*s|\n", - PTR_SIZE, "", - PTR_SIZE, (unsigned long)page, - PTR_SIZE, (unsigned long)page->next, - 2 * PTR_SIZE - 8, ""); - print_untagged_memory(ptr,end); - page = page->next; - } - } - - { - INC_MemBlock *this = inc_free_list; - - erts_printf("-- %-*s --%s-%s-%s-%s-\n",PTR_SIZE+2,"Free list", - dashes,dashes,dashes,dashes); - while (this) { - erts_printf("Block @ 0x%0*lx sz: %8d prev: 0x%0*lx next: 0x%0*lx\n", - PTR_SIZE, (unsigned long)this,this->size, - PTR_SIZE, (unsigned long)this->prev, - PTR_SIZE, (unsigned long)this->next); - this = this->next; - } - erts_printf("--%s---%s-%s-%s-%s--\n", - dashes,dashes,dashes,dashes,dashes); - } - - if (inc_fromspc != NULL) { - erts_printf("-- fromspace - 0x%0*lx 0x%0*lx " - "------------------------------\n", - PTR_SIZE, (unsigned long)inc_fromspc, - PTR_SIZE, (unsigned long)inc_fromend); - print_untagged_memory(inc_fromspc,inc_fromend); - } -#endif /* INCREMENTAL */ - } -#endif /* HYBRID */ } void print_memory_info(Process *p) @@ -869,26 +621,6 @@ void print_memory_info(Process *p) erts_printf("|| Memory info ||\n"); erts_printf("=================\n"); } -#ifdef HYBRID - erts_printf("|- message area --%s-%s-%s-%s-|\n", - dashes,dashes,dashes,dashes); - erts_printf("| Young | 0x%0*lx - 0x%0*lx - 0x%0*lx %*s |\n", - PTR_SIZE, (unsigned long)global_heap, - PTR_SIZE, (unsigned long)global_htop, - PTR_SIZE, (unsigned long)global_hend, - PTR_SIZE, ""); - erts_printf("| Old | 0x%0*lx - 0x%0*lx %*s |\n", - PTR_SIZE, (unsigned long)global_old_heap, - PTR_SIZE, (unsigned long)global_old_hend, - 2 * PTR_SIZE, ""); -#endif -#ifdef INCREMENTAL - if (inc_fromspc != NULL) - erts_printf("| Frmsp | 0x%0*lx - 0x%0*lx %*s |\n", - PTR_SIZE, (unsigned long)inc_fromspc, - PTR_SIZE, (unsigned long)inc_fromend, - 2 * PTR_SIZE, ""); -#endif erts_printf("+-----------------%s-%s-%s-%s-+\n",dashes,dashes,dashes,dashes); } #if !HEAP_ON_C_STACK && defined(DEBUG) diff --git a/erts/emulator/beam/erl_debug.h b/erts/emulator/beam/erl_debug.h index c49354a2b3..a028a95fef 100644 --- a/erts/emulator/beam/erl_debug.h +++ b/erts/emulator/beam/erl_debug.h @@ -42,12 +42,11 @@ #define DEBUG_DEFAULT 0x0000 /* No flags are set per default */ #define DEBUG_SYSTEM 0x0001 /* Misc system info at startup and end */ #define DEBUG_PRIVATE_GC 0x0002 /* GC of private heaps */ -#define DEBUG_HYBRID_GC 0x0004 /* GC of the message area */ -#define DEBUG_ALLOCATION 0x0008 /* HAlloc. To find holes in the heap */ -#define DEBUG_MESSAGES 0x0010 /* Message passing */ -#define DEBUG_THREADS 0x0020 /* Thread-related stuff */ -#define DEBUG_PROCESSES 0x0040 /* Process creation and removal */ -#define DEBUG_MEMORY 0x0080 /* Display results of memory checks */ +#define DEBUG_ALLOCATION 0x0004 /* HAlloc. To find holes in the heap */ +#define DEBUG_MESSAGES 0x0008 /* Message passing */ +#define DEBUG_THREADS 0x0010 /* Thread-related stuff */ +#define DEBUG_PROCESSES 0x0020 /* Process creation and removal */ +#define DEBUG_MEMORY 0x0040 /* Display results of memory checks */ extern Uint32 verbose; @@ -88,7 +87,6 @@ extern void erts_check_stack(Process *p); extern void erts_check_heap(Process *p); extern void erts_check_memory(Process *p, Eterm *start, Eterm *end); extern void verify_process(Process *p); -extern void verify_everything(void); extern void print_tagged_memory(Eterm *start, Eterm *end); extern void print_untagged_memory(Eterm *start, Eterm *end); extern void print_memory(Process *p); @@ -99,10 +97,4 @@ extern void erts_debug_use_tmp_heap(int, Process *); extern void erts_debug_unuse_tmp_heap(int, Process *); #endif -#ifdef HYBRID -extern void print_ma_info(void); -extern void print_message_area(void); -extern void check_message_area(void); -#endif - #endif /* _ERL_DEBUG_H_ */ diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h index 2f165afa06..54cfd6aa83 100644 --- a/erts/emulator/beam/erl_fun.h +++ b/erts/emulator/beam/erl_fun.h @@ -54,9 +54,7 @@ typedef struct erl_fun_entry { typedef struct erl_fun_thing { Eterm thing_word; /* Subtag FUN_SUBTAG. */ ErlFunEntry* fe; /* Pointer to fun entry. */ -#ifndef HYBRID /* FIND ME! */ struct erl_off_heap_header* next; -#endif #ifdef HIPE UWord* native_address; /* Native code for the fun. */ #endif @@ -83,9 +81,7 @@ ErlFunEntry* erts_get_fun_entry2(Eterm mod, int old_uniq, int old_index, byte* uniq, int index, int arity); void erts_erase_fun_entry(ErlFunEntry* fe); -#ifndef HYBRID /* FIND ME! */ void erts_cleanup_funs(ErlFunThing* funp); -#endif void erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end); void erts_dump_fun_entries(int, void *); diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index ca4385dd3a..02164728fe 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -34,7 +34,6 @@ #include "erl_binary.h" #include "dist.h" #include "erl_mseg.h" -#include "erl_nmgc.h" #include "erl_threads.h" #include "erl_bif_timer.h" #include "erl_instrument.h" @@ -154,28 +153,6 @@ Export *erts_delay_trap = NULL; int erts_use_r9_pids_ports; -#ifdef HYBRID -Eterm *global_heap; -Eterm *global_hend; -Eterm *global_htop; -Eterm *global_saved_htop; -Eterm *global_old_heap; -Eterm *global_old_hend; -ErlOffHeap erts_global_offheap; -Uint global_heap_sz = SH_DEFAULT_SIZE; - -#ifndef INCREMENTAL -Eterm *global_high_water; -Eterm *global_old_htop; -#endif - -Uint16 global_gen_gcs; -Uint16 global_max_gen_gcs; -Uint global_gc_flags; - -Uint global_heap_min_sz = SH_DEFAULT_SIZE; -#endif - int ignore_break; int replace_intr; @@ -281,7 +258,6 @@ erl_init(int ncpu) erl_drv_thr_init(); erts_init_async(); init_io(); - init_copy(); init_load(); erts_init_bif(); erts_init_bif_chksum(); @@ -302,45 +278,6 @@ erl_init(int ncpu) } static void -init_shared_memory(int argc, char **argv) -{ -#ifdef HYBRID - int arg_size = 0; - - global_heap_sz = erts_next_heap_size(global_heap_sz,0); - - /* Make sure arguments will fit on the heap, no one else will check! */ - while (argc--) - arg_size += 2 + strlen(argv[argc]); - if (global_heap_sz < arg_size) - global_heap_sz = erts_next_heap_size(arg_size,1); - -#ifndef INCREMENTAL - global_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, - sizeof(Eterm) * global_heap_sz); - global_hend = global_heap + global_heap_sz; - global_htop = global_heap; - global_high_water = global_heap; - global_old_hend = global_old_htop = global_old_heap = NULL; -#endif - - global_gen_gcs = 0; - global_max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); - global_gc_flags = erts_default_process_flags; - - erts_global_offheap.mso = NULL; -#ifndef HYBRID /* FIND ME! */ - erts_global_offheap.funs = NULL; -#endif - erts_global_offheap.overhead = 0; -#endif - -#ifdef INCREMENTAL - erts_init_incgc(); -#endif -} - -static void erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv) { int i; @@ -511,10 +448,14 @@ void erts_usage(void) erts_fprintf(stderr, "-rg amount set reader groups limit\n"); erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n"); erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n"); + erts_fprintf(stderr, "-sbwt val set scheduler busy wait threshold, valid values are:\n"); + erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n"); erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n"); erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); erts_fprintf(stderr, "-sct cput set cpu topology,\n"); erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); + erts_fprintf(stderr, "-sws val set scheduler wakeup strategy, valid values are:\n"); + erts_fprintf(stderr, " default|legacy|proposal.\n"); erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n"); erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n"); erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n"); @@ -790,6 +731,10 @@ early_init(int *argc, char **argv) /* } } +#ifndef USE_THREADS + erts_async_max_threads = 0; +#endif + #ifdef ERTS_SMP no_schedulers = schdlrs; no_schedulers_online = schdlrs_onln; @@ -991,7 +936,6 @@ erl_start(int argc, char **argv) switch (*ch) { case 's': verbose |= DEBUG_SYSTEM; break; case 'g': verbose |= DEBUG_PRIVATE_GC; break; - case 'h': verbose |= DEBUG_HYBRID_GC; break; case 'M': verbose |= DEBUG_MEMORY; break; case 'a': verbose |= DEBUG_ALLOCATION; break; case 't': verbose |= DEBUG_THREADS; break; @@ -1004,7 +948,6 @@ erl_start(int argc, char **argv) erts_printf("Verbose level: "); if (verbose & DEBUG_SYSTEM) erts_printf("SYSTEM "); if (verbose & DEBUG_PRIVATE_GC) erts_printf("PRIVATE_GC "); - if (verbose & DEBUG_HYBRID_GC) erts_printf("HYBRID_GC "); if (verbose & DEBUG_MEMORY) erts_printf("PARANOID_MEMORY "); if (verbose & DEBUG_ALLOCATION) erts_printf("ALLOCATION "); if (verbose & DEBUG_THREADS) erts_printf("THREADS "); @@ -1032,12 +975,6 @@ erl_start(int argc, char **argv) #ifdef HIPE strcat(tmp, ",HIPE"); #endif -#ifdef INCREMENTAL - strcat(tmp, ",INCREMENTAL_GC"); -#endif -#ifdef HYBRID - strcat(tmp, ",HYBRID"); -#endif erts_fprintf(stderr, "Erlang "); if (tmp[1]) { erts_fprintf(stderr, "(%s) ", tmp+1); @@ -1198,6 +1135,16 @@ erl_start(int argc, char **argv) erts_usage(); } } + else if (has_prefix("bwt", sub_param)) { + arg = get_arg(sub_param+3, argv[i+1], &i); + if (erts_sched_set_busy_wait_threshold(arg) != 0) { + erts_fprintf(stderr, "bad scheduler busy wait threshold: %s\n", + arg); + erts_usage(); + } + VERBOSE(DEBUG_SYSTEM, + ("scheduler wakup threshold: %s\n", arg)); + } else if (has_prefix("cl", sub_param)) { arg = get_arg(sub_param+2, argv[i+1], &i); if (sys_strcmp("true", arg) == 0) @@ -1258,13 +1205,23 @@ erl_start(int argc, char **argv) erts_use_sender_punish = 0; else if (sys_strcmp("wt", sub_param) == 0) { arg = get_arg(sub_param+2, argv[i+1], &i); - if (erts_sched_set_wakeup_limit(arg) != 0) { + if (erts_sched_set_wakeup_other_thresold(arg) != 0) { erts_fprintf(stderr, "scheduler wakeup threshold: %s\n", arg); erts_usage(); } VERBOSE(DEBUG_SYSTEM, - ("scheduler wakup threshold: %s\n", arg)); + ("scheduler wakeup threshold: %s\n", arg)); + } + else if (sys_strcmp("ws", sub_param) == 0) { + arg = get_arg(sub_param+2, argv[i+1], &i); + if (erts_sched_set_wakeup_other_type(arg) != 0) { + erts_fprintf(stderr, "scheduler wakeup strategy: %s\n", + arg); + erts_usage(); + } + VERBOSE(DEBUG_SYSTEM, + ("scheduler wakeup threshold: %s\n", arg)); } else if (has_prefix("ss", sub_param)) { /* suggested stack size (Kilo Words) for scheduler threads */ @@ -1469,7 +1426,6 @@ erl_start(int argc, char **argv) erl_init(ncpu); - init_shared_memory(boot_argc, boot_argv); load_preloaded(); erts_initialized = 1; @@ -1556,32 +1512,6 @@ system_cleanup(int flush_async) #endif #endif -#ifdef HYBRID - if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, - (void *)ma_src_stack); - if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, - (void *)ma_dst_stack); - if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, - (void *)ma_offset_stack); - ma_src_stack = NULL; - ma_dst_stack = NULL; - ma_offset_stack = NULL; - erts_cleanup_offheap(&erts_global_offheap); -#endif - -#if defined(HYBRID) && !defined(INCREMENTAL) - if (global_heap) { - ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, - (void*) global_heap, - sizeof(Eterm) * global_heap_sz); - } - global_heap = NULL; -#endif - -#ifdef INCREMENTAL - erts_cleanup_incgc(); -#endif - erts_exit_flush_async(); } diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index bd86e3ea9e..499a2aac53 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -29,7 +29,6 @@ #include "global.h" #include "erl_message.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "erl_binary.h" #include "dtrace-wrapper.h" @@ -303,8 +302,6 @@ notify_new_message(Process *receiver) ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(receiver)); - ACTIVATE(receiver); - switch (receiver->status) { case P_GARBING: switch (receiver->gcstatus) { @@ -987,56 +984,6 @@ erts_send_message(Process* sender, #endif ); BM_SWAP_TIMER(send,system); -#ifdef HYBRID - } else { - ErlMessage* mp = message_alloc(); - BM_SWAP_TIMER(send,copy); -#ifdef INCREMENTAL - /* TODO: During GC activate processes if the message relies in - * the fromspace and the sender is active. During major - * collections add the message to the gray stack if it relies - * in the old generation and the sender is active and the - * receiver is inactive. - - if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) && - (ptr_val(message) >= inc_fromspc && - ptr_val(message) < inc_fromend) && INC_IS_ACTIVE(sender)) - INC_ACTIVATE(receiver); - else if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) && - (ptr_val(message) >= global_old_heap && - ptr_val(message) < global_old_hend) && - INC_IS_ACTIVE(sender) && !INC_IS_ACTIVE(receiver)) - Mark message in blackmap and add it to the gray stack - */ - - if (!IS_CONST(message)) - INC_ACTIVATE(receiver); -#endif - LAZY_COPY(sender,message); - BM_SWAP_TIMER(copy,send); - DTRACE6(message_send, sender_name, receiver_name, - size_object(message)msize, tok_label, tok_lastcnt, tok_serial); - ERL_MESSAGE_TERM(mp) = message; - ERL_MESSAGE_TOKEN(mp) = NIL; -#ifdef USE_VM_PROBES - ERL_MESSAGE_DT_UTAG(mp) = NIL; -#endif - mp->next = NULL; - LINK_MESSAGE(receiver, mp); - ACTIVATE(receiver); - - if (receiver->status == P_WAITING) { - erts_add_to_runq(receiver); - } else if (receiver->status == P_SUSPENDED) { - receiver->rstatus = P_RUNABLE; - } - if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { - trace_receive(receiver, message); - } - - BM_SWAP_TIMER(send,system); - return; -#else } else if (sender == receiver) { /* Drop message if receiver has a pending exit ... */ #ifdef ERTS_SMP @@ -1145,7 +1092,6 @@ erts_send_message(Process* sender, BM_SWAP_TIMER(send,system); #endif /* #ifndef ERTS_SMP */ return; -#endif /* HYBRID */ } } diff --git a/erts/emulator/beam/erl_nmgc.c b/erts/emulator/beam/erl_nmgc.c deleted file mode 100644 index 2a8c819360..0000000000 --- a/erts/emulator/beam/erl_nmgc.c +++ /dev/null @@ -1,1401 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2004-2011. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif -#include "global.h" -#include "erl_gc.h" -#include "erl_binary.h" -#include "erl_nmgc.h" -#include "erl_debug.h" -#if HIPE -#include "hipe_stack.h" -#endif - - -#ifdef INCREMENTAL -/*************************************************************************** - * * - * Incremental Garbage Collector for the Message Area * - * * - ***************************************************************************/ - -/* - * The heap pointers are declared in erl_init.c - * global_heap is the nursery - * global_old_heap is the old generation - */ -unsigned char *blackmap = NULL; -INC_Page *inc_used_mem = NULL; -INC_MemBlock *inc_free_list = NULL; -Eterm *inc_fromspc; -Eterm *inc_fromend; -Eterm *inc_nursery_scn_ptr; -Eterm **fwdptrs; -Eterm *inc_alloc_limit; -Process *inc_active_proc; -Process *inc_active_last; -int inc_words_to_go; - -static Eterm *inc_last_nursery; -static int inc_pages = INC_NoPAGES; -static INC_Page *inc_bibop = NULL; -static int inc_used_pages; - -/* Used when growing the old generation */ -/* -#define INC_ROOTSAVE 16384 -static Eterm *root_save[INC_ROOTSAVE]; -static int roots_saved = 0; -*/ - -INC_STORAGE_DECLARATION(,gray); - -static void inc_minor_gc(Process *p, int need, Eterm* objv, int nobj); -static void inc_major_gc(Process *p, int need, Eterm* objv, int nobj); - -#ifdef INC_TIME_BASED -#if USE_PERFCTR - -/* - * This uses the Linux perfctr extension to virtualise the - * time-stamp counter. - */ -#include "libperfctr.h" -static struct vperfctr *vperfctr; -static double cpu_khz; -static double tsc_to_cpu_mult; - -static void inc_start_hrvtime(void) -{ - struct perfctr_info info; - struct vperfctr_control control; - - if( vperfctr != NULL ) - return; - vperfctr = vperfctr_open(); - if( vperfctr == NULL ) - return; - if( vperfctr_info(vperfctr, &info) >= 0 ) { - cpu_khz = (double)info.cpu_khz; - tsc_to_cpu_mult = (double)(info.tsc_to_cpu_mult ? : 1); - if( info.cpu_features & PERFCTR_FEATURE_RDTSC ) { - memset(&control, 0, sizeof control); - control.cpu_control.tsc_on = 1; - if( vperfctr_control(vperfctr, &control) >= 0 ) - return; - } - } - vperfctr_close(vperfctr); - vperfctr = NULL; -} - -#define inc_get_hrvtime() (((double)vperfctr_read_tsc(vperfctr) * tsc_to_cpu_mult) / cpu_khz) - -#endif /* USE_PERFCTR */ -#endif /* INC_TIME_BASED */ - -#ifdef INC_TIME_BASED -# define timeslice 1 /* milli seconds */ -# define WORK_MORE (inc_get_hrvtime() < start_time + timeslice) -#else -//# define inc_min_work 100 /* words */ -# define inc_min_work global_heap_sz + inc_pages * INC_FULLPAGE /* words */ -# define WORK_MORE (inc_words_to_go > 0) -#endif - -void erts_init_incgc(void) -{ - int i; - int size = inc_pages * INC_FULLPAGE; - - /* Young generation */ - global_heap = (Eterm *)erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - sizeof(Eterm) * global_heap_sz); - global_hend = global_heap + global_heap_sz; - global_htop = global_heap; - inc_alloc_limit = global_hend; - - /* Fromspace */ - inc_last_nursery = (Eterm *) erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - global_heap_sz * sizeof(Eterm)); - inc_fromspc = inc_fromend = NULL; - - /* Forward-pointers */ - fwdptrs = erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - global_heap_sz * sizeof(Eterm*)); - /* Old generation */ - global_old_heap = (Eterm *)erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - size * sizeof(Eterm)); - global_old_hend = global_old_heap + size; - - /* Pages i BiBOP */ - for (i = 0; i < inc_pages; i++) - { - INC_Page *this = (INC_Page*)(global_old_heap + i * INC_FULLPAGE); - this->next = (INC_Page*)((Eterm*)this + INC_FULLPAGE); - } - - inc_bibop = (INC_Page*)global_old_heap; - ((INC_Page*)(global_old_heap + (inc_pages - 1) * INC_FULLPAGE))->next = - NULL; - - inc_used_mem = inc_bibop; - inc_bibop = inc_bibop->next; - inc_used_mem->next = NULL; - inc_used_pages = 1; - - /* Free-list */ - inc_free_list = (INC_MemBlock*)inc_used_mem->start; - inc_free_list->size = INC_PAGESIZE; - inc_free_list->prev = NULL; - inc_free_list->next = NULL; - - /* Blackmap */ - blackmap = (unsigned char*)erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - INC_FULLPAGE * inc_pages); - /* Gray stack */ - INC_STORAGE_INIT(gray); - - inc_active_proc = NULL; - inc_active_last = NULL; - -#ifdef INC_TIME_BASED - inc_start_hrvtime(); -#endif -} - -void erts_cleanup_incgc(void) -{ - INC_STORAGE_ERASE(gray); - - if (inc_fromspc) - inc_last_nursery = inc_fromspc; - - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_heap); - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)inc_last_nursery); - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_old_heap); - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)blackmap); - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)fwdptrs); -} - -void erts_incremental_gc(Process* p, int need, Eterm* objv, int nobj) -{ - int repeat_minor; -#ifdef INC_TIME_BASED - double start_time = inc_get_hrvtime(); - int work_left_before = inc_words_to_go; -#endif - /* Used when growing the fromspace */ - static char inc_growing_nurs = 0; - - BM_STOP_TIMER(system); - //BM_MMU_READ(); - BM_RESET_TIMER(gc); - BM_START_TIMER(gc); - - VERBOSE(DEBUG_HYBRID_GC, - ("INCGC: Incremental GC START Caused by: %T Need: %d\n", - p->id,need)); - - ma_gc_flags |= GC_GLOBAL; - ma_gc_flags &= ~GC_CYCLE_START; - -#ifndef INC_TIME_BASED - /* Decide how much work to do this GC stage. The work is meassured - * in number of words copied from the young generation to the old - * plus number of work marked in the old generation. - */ - if (ma_gc_flags & GC_MAJOR) { - int wm = (need > inc_min_work) ? need : inc_min_work; - inc_words_to_go = (int)((wm * (((inc_used_pages * INC_PAGESIZE) / - (double)global_heap_sz) + 1)) + 0.5); - } - else - inc_words_to_go = (need > inc_min_work) ? need : inc_min_work; -#endif - - do { - if (ma_gc_flags & GC_MAJOR) { - /* This is a major collection cycle. */ - inc_major_gc(p,need,objv,nobj); - } else if (ma_gc_flags & GC_CYCLE) { - /* This is a minor collection cycle. */ - inc_minor_gc(p,need,objv,nobj); - } else { - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Collection cycle START\n")); - ma_gc_flags |= (GC_CYCLE | GC_CYCLE_START); - inc_fromspc = global_heap; - inc_fromend = global_htop; - global_heap = global_htop = inc_last_nursery; - global_hend = global_heap + global_heap_sz; - inc_nursery_scn_ptr = global_heap; -#ifdef INC_TIME_BASED - work_left_before = inc_words_to_go = global_heap_sz; -#endif -#ifdef DEBUG - inc_last_nursery = NULL; -#endif - memset(fwdptrs,0,global_heap_sz * sizeof(Eterm)); - - { - /* TODO: Alla processer ska v�l egentligen inte aktiveras h�r... */ - int i; - for (i = 0; i < erts_num_active_procs; i++) { - Process *cp = erts_active_procs[i]; - INC_ACTIVATE(cp); - cp->scan_top = cp->high_water; - } - } - - if (ma_gc_flags & GC_NEED_MAJOR) { - /* The previous collection cycle caused the old generation to - * overflow. This collection cycle will therefore be a major - * one. - */ - BM_COUNT(major_gc_cycles); - VERBOSE(DEBUG_HYBRID_GC,("INCGC: MAJOR cycle\n")); - inc_major_gc(p,need,objv,nobj); - } else { - BM_COUNT(minor_gc_cycles); - VERBOSE(DEBUG_HYBRID_GC,("INCGC: MINOR cycle\n")); - inc_minor_gc(p,need,objv,nobj); - } - } - - repeat_minor = 0; - if (!(ma_gc_flags & GC_CYCLE)) { - inc_alloc_limit = global_hend; - inc_last_nursery = inc_fromspc; - inc_fromspc = inc_fromend = NULL; - ASSERT(INC_STORAGE_EMPTY(gray)); - - if (inc_growing_nurs) { - /* - * The previous collection cycle caused the nursery to - * grow, now we have to grow the from-space as well. - */ - inc_last_nursery = - (Eterm*) erts_realloc(ERTS_ALC_T_MESSAGE_AREA, - (void*)inc_last_nursery, - sizeof(Eterm) * global_heap_sz); - inc_growing_nurs = 0; - } - - if (global_hend - global_htop <= need) { - /* - * Initiate a new GC cycle immediately and, if necessary, - * enlarge the nursery. - */ - if (global_heap_sz <= need) { - VERBOSE(DEBUG_HYBRID_GC, - ("INCGC: Allocating a larger nursery\n")); - global_heap_sz = erts_next_heap_size(need * 1.5,0); - inc_last_nursery = - (Eterm*) erts_realloc(ERTS_ALC_T_MESSAGE_AREA, - (void*)inc_last_nursery, - sizeof(Eterm) * global_heap_sz); - fwdptrs = erts_realloc(ERTS_ALC_T_MESSAGE_AREA,fwdptrs, - global_heap_sz * sizeof(Eterm*)); - inc_growing_nurs = 1; - } - repeat_minor = 1; - } - -#ifdef DEBUG - /* Fill the from-space with bad things */ - memset(inc_last_nursery,DEBUG_BAD_BYTE, - global_heap_sz * sizeof(Eterm)); -#endif - } - } while (repeat_minor); - - - /* Clean up after garbage collection ********************************/ - - if (inc_alloc_limit != global_hend) { - -#ifdef INC_TIME_BASED - if ((work_left_before - inc_words_to_go) == 0) { - inc_alloc_limit = global_htop + need; - } else { - inc_alloc_limit = (global_hend - global_htop) / - (inc_words_to_go / (work_left_before - inc_words_to_go)) + - global_htop; - if (inc_alloc_limit > global_hend) - inc_alloc_limit = global_hend; - } -#else - inc_alloc_limit = (Eterm*)(global_htop + (need > inc_min_work) ? - need : inc_min_work); - if (inc_alloc_limit > global_hend) - inc_alloc_limit = global_hend; -#endif - } - - ma_gc_flags &= ~GC_GLOBAL; - - /* INC_TIME_BASED: If this fails we have to increase the timeslice! */ - ASSERT(inc_alloc_limit - global_htop > need); - - BM_STOP_TIMER(gc); -#ifdef BM_TIMERS - minor_global_gc_time += gc_time; - if (gc_time > max_global_minor_time) - max_global_minor_time = gc_time; - - pause_times[(((gc_time * 1000) < MAX_PAUSE_TIME) ? - (int)(gc_time * 1000) : - MAX_PAUSE_TIME - 1)]++; -#endif - //BM_MMU_INIT(); - { static long long verif = 0; - //erts_printf("innan verify: %d\n",++verif); - if (verif==168) print_memory(NULL); - verify_everything(); - //erts_printf("efter verify: %d\n",verif); - } - BM_START_TIMER(system); - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Incremental GC END\n")); -} - - -/*************************************************************************** - * * - * Minor collection - Copy live data from young generation to old * - * * - ***************************************************************************/ - -#define MINOR_SCAN(PTR,END) do { \ - ASSERT(PTR <= END); \ - while (WORK_MORE && PTR < END) { \ - Eterm val = *PTR; \ - Eterm *obj_ptr = ptr_val(val); \ - switch (primary_tag(val)) { \ - case TAG_PRIMARY_LIST: \ - if (ptr_within(obj_ptr,inc_fromspc,inc_fromend)) { \ - if (INC_IS_FORWARDED(obj_ptr)) { \ - *PTR = make_list(INC_FORWARD_VALUE(obj_ptr)); \ - } \ - else { \ - Eterm *hp = erts_inc_alloc(2); \ - INC_STORE(gray,hp,2); \ - INC_COPY_CONS(obj_ptr,hp,PTR); \ - } \ - } \ - break; \ - case TAG_PRIMARY_BOXED: \ - if (ptr_within(obj_ptr,inc_fromspc,inc_fromend)) { \ - if (INC_IS_FORWARDED(obj_ptr)) { \ - *PTR = make_boxed(INC_FORWARD_VALUE(obj_ptr)); \ - } \ - else { \ - Eterm *hp = erts_inc_alloc(BOXED_NEED(obj_ptr,*obj_ptr)); \ - INC_STORE(gray,hp,BOXED_NEED(obj_ptr,*obj_ptr)); \ - INC_COPY_BOXED(obj_ptr,hp,PTR); \ - } \ - } \ - break; \ - case TAG_PRIMARY_HEADER: \ - switch (val & _TAG_HEADER_MASK) { \ - case ARITYVAL_SUBTAG: break; \ - default: PTR += thing_arityval(val); break; \ - } \ - break; \ - } \ - PTR++; \ - } \ -} while(0) - - -/* Returns: TRUE (1) if the need is greater than the available space - * and the garbage collector needs to be restarted immediately. FALSE - * (0) otherwise. - */ -static void inc_minor_gc(Process* p, int need, Eterm* objv, int nobj) -{ - BM_COUNT(minor_gc_stages); - - /* Start with looking at gray objects found in earlier collection - * stages. - */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Rescue gray found from nursery\n")); - { - INC_Object *obj = NULL; - Eterm *ptr; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - MINOR_SCAN(ptr,obj->this + obj->size); - } - /* TODO: Se f�reg�ende uppdatering av gr� objekt */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan root-set\n")); - while (WORK_MORE && inc_active_proc) { - Rootset rootset; - Process *cp = inc_active_proc; - - ASSERT(INC_IS_ACTIVE(cp)); - - /* TODO: Hur dyrt �r det att bygga nytt rootset varje g�ng? */ - - /* TODO: Fundera p� ordningen! Rootset, Heap, Old heap... */ - - /* TODO: Scanna stacken fr�n p->send till p->stop! [Brooks84] */ - /* Notera: Vi GC:ar inte de yngsta objekten - de som allokeras - under GC-cykeln. Detta ger ynglingarna en chans att d� innan - GC:n b�rjar kopiera dem. [StefanovicMcKinleyMoss@OOPSLA99] */ - - /* TODO: N�r rootset �r scannat borde processen inte vara - aktiv mer. Den b�r aktiveras i schedule, endast om en - process har k�rt beh�ver vi scanna rootset igen. */ - - /* MT: In a multithreaded system the process cp needs to be - * locked here. - */ - - if (cp == p) - rootset.n = setup_rootset(cp, objv, nobj, &rootset); - else - rootset.n = setup_rootset(cp, cp->arg_reg, cp->arity, &rootset); - - //MA_GENSWEEP_NSTACK(cp, old_htop, n_htop, objv, nobj); - - while (WORK_MORE && rootset.n--) { - Eterm *g_ptr = rootset.v[rootset.n]; - Uint g_sz = rootset.sz[rootset.n]; - - while (WORK_MORE && g_sz--) { - Eterm gval = *g_ptr; - switch (primary_tag(gval)) { - case TAG_PRIMARY_LIST: { - Eterm *ptr = list_val(gval); - if (ptr_within(ptr,inc_fromspc,inc_fromend)) { - if (INC_IS_FORWARDED(ptr)) { - *g_ptr++ = make_list(INC_FORWARD_VALUE(ptr)); - } - else { - Eterm *hp = erts_inc_alloc(2); - INC_STORE(gray,hp,2); - INC_COPY_CONS(ptr,hp,g_ptr++); - } - } - else - ++g_ptr; - continue; - } - - case TAG_PRIMARY_BOXED: { - Eterm *ptr = boxed_val(gval); - if (ptr_within(ptr,inc_fromspc,inc_fromend)) { - if (INC_IS_FORWARDED(ptr)) { - *g_ptr++ = make_boxed(INC_FORWARD_VALUE(ptr)); - } - else { - Eterm *hp = erts_inc_alloc(BOXED_NEED(ptr,*ptr)); - INC_STORE(gray,hp,BOXED_NEED(ptr,*ptr)); - INC_COPY_BOXED(ptr,hp,g_ptr++); - } - } - else - ++g_ptr; - continue; - } - - default: - g_ptr++; - continue; - } - } - } - - restore_one_rootset(cp, &rootset); - - /* MT: cp can be unlocked now. */ - - /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan private nursery\n")); */ - if (cp->scan_top != HEAP_TOP(cp)) { - Eterm *ptr = cp->scan_top; - MINOR_SCAN(ptr,HEAP_TOP(cp)); - /* TODO: F�r att spara scan_top h�r m�ste alla ma-pekare - * som hittas l�ggas till i cp->rrma. - */ - //cp->scan_top = ptr; - } - - /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan heap fragments\n")); */ - { - ErlHeapFragment* bp = MBUF(cp); - - while (WORK_MORE && bp) { - Eterm *ptr = bp->mem; - if ((ARITH_HEAP(cp) >= bp->mem) && - (ARITH_HEAP(cp) < bp->mem + bp->size)) { - MINOR_SCAN(ptr,ARITH_HEAP(cp)); - } else { - MINOR_SCAN(ptr,bp->mem + bp->size); - } - bp = bp->next; - } - } - - /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan gray\n")); */ - { - INC_Object *obj = NULL; - Eterm *ptr; - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - MINOR_SCAN(ptr,obj->this + obj->size); - } - /* TODO: INC_STORE(gray,ptr,obj->size-(ptr-obj->this)); Typ.. */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - if (WORK_MORE) { - //printf("Rootset after:\r\n"); - //print_one_rootset(&rootset); - INC_DEACTIVATE(cp); - } - } - - /* Update new pointers in the nursery to new copies in old generation. */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update nursery\n")); - { - Eterm *ptr = inc_nursery_scn_ptr; - MINOR_SCAN(ptr,global_htop); - inc_nursery_scn_ptr = ptr; - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Rescue gray found from nursery\n")); - { - INC_Object *obj = NULL; - Eterm *ptr; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - MINOR_SCAN(ptr,obj->this + obj->size); - } - /* TODO: Se f�reg�ende uppdatering av gr� objekt */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update copy stack\n")); - { - Uint i; - for (i = 0; i < ma_dst_top; i++) { - if (ptr_within(ma_dst_stack[i],inc_fromspc,inc_fromend)) { - if (INC_IS_FORWARDED(ma_dst_stack[i])) - ma_dst_stack[i] = INC_FORWARD_VALUE(ma_dst_stack[i]); - } - } - } - - if (WORK_MORE) { - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update offheap-lists\n")); - { - ExternalThing **prev = &erts_global_offheap.externals; - ExternalThing *ptr = erts_global_offheap.externals; - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep proc externals\n")); - while (ptr) { - Eterm *ppt = (Eterm*) ptr; - - if (ptr_within(ppt,global_old_heap,global_old_hend)) { - prev = &ptr->next; - ptr = ptr->next; - } else if (ptr_within(ppt, inc_fromspc, inc_fromend) && - INC_IS_FORWARDED(ppt)) { - ExternalThing *ro = (ExternalThing*)INC_FORWARD_VALUE(ppt); - *prev = ro; /* Patch to moved pos */ - prev = &ro->next; - ptr = ro->next; - } else { - erts_deref_node_entry(ptr->node); - *prev = ptr = ptr->next; - } - } - ASSERT(*prev == NULL); - } - - { - ProcBin **prev = &erts_global_offheap.mso; - ProcBin *ptr = erts_global_offheap.mso; - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep proc bins\n")); - while (ptr) { - Eterm *ppt = (Eterm*)ptr; - - if (ptr_within(ppt,global_old_heap,global_old_hend)) { - prev = &ptr->next; - ptr = ptr->next; - } else if (ptr_within(ppt, inc_fromspc, inc_fromend) && - INC_IS_FORWARDED(ppt)) { - ProcBin *ro = (ProcBin*)INC_FORWARD_VALUE(ppt); - *prev = ro; /* Patch to moved pos */ - prev = &ro->next; - ptr = ro->next; - } else { - Binary *bptr; - *prev = ptr->next; - bptr = ptr->val; - if (erts_refc_dectest(&bptr->refc, 0) == 0) - erts_bin_free(bptr); - ptr = *prev; - } - } - ASSERT(*prev == NULL); - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Minor collection cycle END\n")); - ma_gc_flags &= ~GC_CYCLE; - } -} - - - - -/*************************************************************************** - * * - * Major collection - CopyMark - Copy young to old, Mark-Sweep old * - * * - ***************************************************************************/ - -#define COPYMARK(PTR,END) do { \ - ASSERT(PTR <= END); \ - while (WORK_MORE && PTR < END) { \ - Eterm val = *PTR; \ - Eterm *obj_ptr = ptr_val(val); \ - switch (primary_tag(val)) { \ - case TAG_PRIMARY_LIST: \ - COPYMARK_CONS(obj_ptr,aging_htop,PTR,aging_end); break; \ - case TAG_PRIMARY_BOXED: \ - COPYMARK_BOXED(obj_ptr,aging_htop,PTR,aging_end); break; \ - case TAG_PRIMARY_HEADER: \ - switch (val & _TAG_HEADER_MASK) { \ - case ARITYVAL_SUBTAG: break; \ - default: \ - PTR += thing_arityval(val); \ - break; \ - } \ - break; \ - default: break; \ - } \ - PTR++; \ - } \ -} while(0); -/* TODO: - if (aging_htop + 10 > aging + INC_FULLPAGE) { - aging->next = inc_used_mem; - inc_used_mem = aging; - } -*/ - -static void inc_major_gc(Process *p, int need, Eterm* objv, int nobj) -{ - Eterm *free_start = NULL; - Uint live = 0; - Uint old_gen_sz = 0; - static INC_Page *aging; - static Eterm *aging_htop; - static Eterm *aging_end; - BM_NEW_TIMER(old_gc); - - BM_SWAP_TIMER(gc,old_gc); - BM_COUNT(major_gc_stages); - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Major collection START\n")); - - ma_gc_flags |= GC_INCLUDE_ALL; - - if (ma_gc_flags & GC_NEED_MAJOR) - { - INC_Page *page = inc_used_mem; - - ma_gc_flags |= GC_MAJOR; - ma_gc_flags &= ~GC_NEED_MAJOR; - - while (page) - { - memset(blackmap + - ((void*)page - (void*)global_old_heap) / sizeof(void*), - 0, INC_FULLPAGE); - page = page->next; - } - - if (inc_bibop) { - aging = inc_bibop; - inc_bibop = inc_bibop->next; - aging->next = NULL; - memset(blackmap + - ((void*)aging - (void*)global_old_heap) / sizeof(void*), - 1, INC_FULLPAGE); - aging_htop = aging->start; - aging_end = aging->start + INC_PAGESIZE; - } - else { - /* There are no free pages.. Either fragmentation is a - * problem or we are simply out of memory. Allocation in - * the old generation will be done through the free-list - * this GC cycle. - */ - aging = NULL; - aging_htop = aging_end = NULL; - } - } - - /* Start with looking at gray objects found in earlier collection - * stages. - */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray\n")); - { - INC_Object *obj = NULL; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - Eterm *ptr; - - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - COPYMARK(ptr,obj->this + obj->size); - } - /* TODO: Titta p� motsvarande i minor. */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark roots\n")); - while (WORK_MORE && inc_active_proc) - { - /* For each process: Scan all areas containing pointers to the - * message area. When a process is done here, all it's - * message-pointers should be to the old generation. - */ - Rootset rootset; - Process *cp = inc_active_proc; - - ASSERT(INC_IS_ACTIVE(cp)); - - /* MT: In a multithreaded system the process cp needs to be - * locked here. - */ - if (cp == p) - rootset.n = setup_rootset(cp, objv, nobj, &rootset); - else - rootset.n = setup_rootset(cp, cp->arg_reg, cp->arity, &rootset); - - while (WORK_MORE && rootset.n--) - { - Eterm *ptr = rootset.v[rootset.n]; - Eterm *end = ptr + rootset.sz[rootset.n]; - - while (WORK_MORE && ptr < end) { - Eterm val = *ptr; - Eterm *obj_ptr = ptr_val(val); - - switch (primary_tag(val)) { - case TAG_PRIMARY_LIST: - { - COPYMARK_CONS(obj_ptr,aging_htop,ptr,aging_end); - break; - } - - case TAG_PRIMARY_BOXED: - { - COPYMARK_BOXED(obj_ptr,aging_htop,ptr,aging_end); - break; - } - } - ptr++; - } - } - -#ifdef HIPE - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Native stack scan: %T\n",cp->id)); - aging_htop = ma_fullsweep_nstack(cp,aging_htop,aging_end); -#endif - restore_one_rootset(cp, &rootset); - - /* MT: cp can be unlocked now. But beware!! The message queue - * might be updated with new pointers to the fromspace while - * we work below. The send operation can not assume that all - * active processes will look through their message queue - * before deactivating as is the case in non-MT incremental - * collection. - */ - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark process heap\n")); - { - Eterm *ptr = cp->scan_top; - COPYMARK(ptr,cp->htop); - //cp->scan_top = ptr; - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark heap fragments\n")); - { - ErlHeapFragment* bp = MBUF(cp); - - while (WORK_MORE && bp) { - Eterm *ptr = bp->mem; - Eterm *end; - - if ((ARITH_HEAP(cp) >= bp->mem) && - (ARITH_HEAP(cp) < bp->mem + bp->size)) { - end = ARITH_HEAP(cp); - } else { - end = bp->mem + bp->size; - } - - COPYMARK(ptr,end); - bp = bp->next; - } - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray stack\n")); - { - INC_Object *obj = NULL; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - Eterm *ptr; - - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - COPYMARK(ptr,obj->this + obj->size); - } - /* TODO: Titta p� motsvarande i minor. */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - if (WORK_MORE) { - INC_DEACTIVATE(cp); - } - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark nursery\n")); - { - Eterm *ptr = inc_nursery_scn_ptr; - COPYMARK(ptr,global_htop); - inc_nursery_scn_ptr = ptr; - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray found in nursery\n")); - { - INC_Object *obj = NULL; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - Eterm *ptr; - - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - COPYMARK(ptr,obj->this + obj->size); - } - /* TODO: Titta p� motsvarande i minor. */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - - /**********************************************************************/ - if (WORK_MORE) { - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep phase\n")); - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep externals in old generation\n")); - { - ExternalThing** prev = &erts_global_offheap.externals; - ExternalThing* ptr = erts_global_offheap.externals; - - while (ptr) { - Eterm* ppt = (Eterm *) ptr; - - if ((ptr_within(ppt, global_old_heap, global_old_hend) && - blackmap[ppt - global_old_heap] == 0) || - (ptr_within(ppt, inc_fromspc, inc_fromend) && - !INC_IS_FORWARDED(ppt))) - { - erts_deref_node_entry(ptr->node); - *prev = ptr = ptr->next; - } else if (ptr_within(ppt, inc_fromspc, inc_fromend)) { - ExternalThing* ro = (ExternalThing*)INC_FORWARD_VALUE(ppt); - *prev = ro; /* Patch to moved pos */ - prev = &ro->next; - ptr = ro->next; - } else { - prev = &ptr->next; - ptr = ptr->next; - } - } - ASSERT(*prev == NULL); - } - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep refc bins in old generation\n")); - { - ProcBin** prev = &erts_global_offheap.mso; - ProcBin* ptr = erts_global_offheap.mso; - - while (ptr) { - Eterm *ppt = (Eterm*)ptr; - - if ((ptr_within(ppt, global_old_heap, global_old_hend) && - blackmap[ppt - global_old_heap] == 0) || - (ptr_within(ppt, inc_fromspc, inc_fromend) && - !INC_IS_FORWARDED(ppt))) - { - Binary* bptr; - *prev = ptr->next; - bptr = ptr->val; - if (erts_refc_dectest(&bptr->refc, 0) == 0) - erts_bin_free(bptr); - ptr = *prev; - } else if (ptr_within(ppt, inc_fromspc, inc_fromend)) { - ProcBin* ro = (ProcBin*)INC_FORWARD_VALUE(ppt); - *prev = ro; /* Patch to moved pos */ - prev = &ro->next; - ptr = ro->next; - } else { - prev = &ptr->next; - ptr = ptr->next; - } - } - ASSERT(*prev == NULL); - } - - /* TODO: Currently atomic phase - Can not be later of course. */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep old generation\n")); - { - INC_Page *page = inc_used_mem; - INC_Page *prev = NULL; - inc_free_list = NULL; - - while (page) { - int scavenging = 0; - int n = page->start - global_old_heap; - int stop = n + INC_PAGESIZE; - - old_gen_sz += INC_PAGESIZE; - while (n < stop) { - if (blackmap[n] != 0) { - if (scavenging) { - Eterm *ptr = global_old_heap + n; - scavenging = 0; - if ((ptr - free_start) * sizeof(Eterm) >= - sizeof(INC_MemBlock)) - { - INC_MemBlock *new = (INC_MemBlock*)free_start; - new->size = ptr - free_start; - new->prev = NULL; - new->next = inc_free_list; - if (inc_free_list) - inc_free_list->prev = new; - inc_free_list = new; - } - } - if (blackmap[n] == 255) { - unsigned int size = - *(unsigned int*)(((long)&blackmap[n]+4) & ~3); - live += size; - n += size; - } - else { - live += blackmap[n]; - n += blackmap[n]; - } - } - else if (!scavenging) { - free_start = global_old_heap + n; - scavenging = 1; - n++; - } - else { - n++; - } - } - - if (scavenging) { - if ((global_old_heap + n - free_start) * sizeof(Eterm) > - sizeof(INC_MemBlock)) - { - INC_MemBlock *new = (INC_MemBlock*)free_start; - new->size = global_old_heap + n - free_start; - new->prev = NULL; - new->next = inc_free_list; - if (inc_free_list) - inc_free_list->prev = new; - inc_free_list = new; - } - else if (free_start == page->start) { - INC_Page *next = page->next; - - if (prev) - prev->next = page->next; - else - inc_used_mem = page->next; - - page->next = inc_bibop; - inc_bibop = page; - inc_used_pages--; - page = next; - continue; - } - } - prev = page; - page = page->next; - } - } - } - - ASSERT(inc_bibop); - /* - This code is not expected to work right now. - if (!inc_bibop) { - int i; - int new_pages = inc_pages * 2; - int size = sizeof(Eterm) * new_pages * INC_FULLPAGE; - Eterm *new_heap = erts_alloc(ERTS_ALC_T_MESSAGE_AREA,size); - Eterm *new_hend = new_heap + size; - Eterm *new_htop; - Eterm *last_page_end; - INC_Page *new_used_mem; - INC_Page *page; - - erts_printf("The last page has been allocated..\n"); - erts_printf("We need to copy things!\n"); - - / * Create new, bigger bag of pages * / - for (i = 0; i < new_pages; i++) - { - INC_Page *this = - (INC_Page*)(new_heap + i * INC_FULLPAGE); - this->next = (INC_Page*)((Eterm*)this + INC_FULLPAGE); - } - inc_bibop = (INC_Page*)new_heap; - ((INC_Page*)(new_heap + (new_pages - 1) * - INC_FULLPAGE))->next = NULL; - - new_used_mem = inc_bibop; - inc_bibop = inc_bibop->next; - new_used_mem->next = NULL; - - / * Move stuff from old bag to new * / - inc_free_list = NULL; - new_htop = new_used_mem->start; - last_page_end = new_htop + INC_PAGESIZE; - page = inc_used_mem; - while (page) - { - Eterm *ptr = page->start; - Eterm *page_end = ptr + INC_PAGESIZE; - int n = offsetof(INC_Page,start) / sizeof(void*) + - ((Eterm*)page - global_old_heap); - while (ptr < page_end) - { - if (blackmap[n] > 0) - { - if (last_page_end - new_htop < blackmap[n]) - { - INC_Page *new_page = inc_bibop; - inc_bibop = inc_bibop->next; - new_page->next = new_used_mem; - new_used_mem = new_page; - new_htop = new_page->start; - last_page_end = new_htop + INC_PAGESIZE; - } - - memcpy(new_htop,ptr,blackmap[n] * sizeof(Eterm)); - for (i = 0; i < blackmap[n]; i++) - { - *ptr++ = (Eterm)new_htop++; - } - //new_htop += blackmap[n]; - //ptr += blackmap[n]; - / * - if (blackmap[n] == 255) Do the right thing... - * / - n += blackmap[n]; - } - else - { - n++; ptr++; - } - } - page = page->next; - } - - page = inc_used_mem; - while (page) - { - Eterm *ptr = page->start; - Eterm *page_end = ptr + INC_PAGESIZE; - - / * TODO: If inc_used_mem is sorted in address order, this - * pass can be done at the same time as copying. * / - while (ptr < page_end) - { - if (ptr_within(ptr_val(*ptr),global_old_heap,global_old_hend)) - { - *ptr = *((Eterm*)ptr_val(*ptr)); - } - ptr++; - } - page = page->next; - } - - printf("Restore rootset after heap move. Roots: %d\r\n",roots_saved); - while (roots_saved--) - { - Eterm *ptr = root_save[roots_saved]; - *ptr = *((Eterm*)ptr_val(*ptr)); - } - - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_old_heap); - - global_old_heap = new_heap; - global_old_hend = new_hend; - inc_used_mem = new_used_mem; - inc_pages = new_pages; - - if ((last_page_end - new_htop) * sizeof(Eterm) >= - sizeof(INC_MemBlock)) - { - inc_free_list = (INC_MemBlock*)(new_htop); - inc_free_list->size = last_page_end - new_htop; - inc_free_list->prev = NULL; - inc_free_list->next = NULL; - } - } - */ - - /* I vilka l�gen kan vi vilja sl�nga p� en extra sida.. ( < 25% kvar?) - if () - { - INC_Page *new_page = inc_bibop; - INC_MemBlock *new_free = - (INC_MemBlock*)new_page->start; - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Fetching new page\n")); - inc_bibop = inc_bibop->next; - - new_page->next = inc_used_mem; - if (inc_used_mem) - inc_used_mem->prev = new_page; - inc_used_mem = new_page; - - // kolla detta med normal sidstorlek! old_gen_sz += INC_PAGESIZE; - //BM_SWAP_TIMER(gc,misc1); - memset(blackmap + - ((void*)new_page - (void*)global_old_heap) / sizeof(void*), - 0, INC_FULLPAGE); - //BM_SWAP_TIMER(misc1,gc); - - new_free->prev = NULL; - new_free->next = inc_free_list; - new_free->size = INC_PAGESIZE; - if (inc_free_list) - inc_free_list->prev = new_free; - inc_free_list = new_free; - //printf("Snatched a new page @ 0x%08x\r\n",(int)new_page); - //print_free_list(); - found = new_free; - } - */ - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update copy stack\n")); - { - Uint i; - for (i = 0; i < ma_dst_top; i++) { - if (ptr_within(ma_dst_stack[i],inc_fromspc,inc_fromend)) { - if (INC_IS_FORWARDED(ma_dst_stack[i])) - ma_dst_stack[i] = INC_FORWARD_VALUE(ma_dst_stack[i]); - } - } - } - - if (WORK_MORE) - { - int size_left = INC_PAGESIZE - (aging_htop - aging->start); - - if (size_left > sizeof(INC_MemBlock)) - { - ((INC_MemBlock*)aging_htop)->size = size_left; - ((INC_MemBlock*)aging_htop)->prev = NULL; - ((INC_MemBlock*)aging_htop)->next = inc_free_list; - if (inc_free_list) - inc_free_list->prev = (INC_MemBlock*)aging_htop; - inc_free_list = (INC_MemBlock*)aging_htop; - } - aging->next = inc_used_mem; - inc_used_mem = aging; - inc_used_pages++; - - ma_gc_flags &= ~GC_MAJOR; - ma_gc_flags &= ~GC_CYCLE; - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Major collection cycle END\n")); - } - - ma_gc_flags &= ~GC_INCLUDE_ALL; - - BM_STOP_TIMER(old_gc); -#ifdef BM_TIMER - major_global_gc_time += old_gc_time; - if (old_gc_time > max_global_major_time) - max_global_major_time = old_gc_time; - - if ((old_gc_time * 1000) < MAX_PAUSE_TIME) - pause_times_old[(int)(old_gc_time * 1000)]++; - else - pause_times_old[MAX_PAUSE_TIME - 1]++; -#endif - BM_START_TIMER(gc); -} - - - -/*************************************************************************** - * * - * Allocation in the old generation. Used in minor colection and when * - * copying the rest of a message after a GC. * - * * - ***************************************************************************/ - - -Eterm *erts_inc_alloc(int need) -{ - INC_MemBlock *this = inc_free_list; - - ASSERT(need < INC_PAGESIZE); - while (this && (this->size) < need) - { - this = this->next; - } - - if (!this) - { - /* If a free block large enough is not found, a new page is - * allocated. GC_NEED_MAJOR is set so that the next garbage - * collection cycle will be a major one, that is, both - * generations will be garbage collected. - */ - INC_Page *new_page = inc_bibop; - INC_MemBlock *new_free = (INC_MemBlock*)new_page->start; - - if (new_page) - { - VERBOSE(DEBUG_HYBRID_GC, - ("INCGC: Allocation grabs a new page\n")); - inc_bibop = inc_bibop->next; - new_page->next = inc_used_mem; - inc_used_mem = new_page; - inc_used_pages++; - - new_free->prev = NULL; - new_free->next = inc_free_list; - new_free->size = INC_PAGESIZE; - if (inc_free_list) - inc_free_list->prev = new_free; - inc_free_list = new_free; - - this = new_free; - if (!(ma_gc_flags & GC_MAJOR)) - ma_gc_flags |= GC_NEED_MAJOR; - } - else - { - erl_exit(-1, "inc_alloc ran out of pages!\n"); - } - } - - if (((this->size) - need) * sizeof(Eterm) >= sizeof(INC_MemBlock)) - { - INC_MemBlock *rest = (INC_MemBlock*)((Eterm*)this + need); - - /* The order here IS important! */ - rest->next = this->next; - - if (rest->next) - rest->next->prev = rest; - - rest->prev = this->prev; - - if (rest->prev) - rest->prev->next = rest; - else - inc_free_list = rest; - - rest->size = this->size - need; - } - else - { - if (this->prev) - this->prev->next = this->next; - else - inc_free_list = this->next; - - if (this->next) - this->next->prev = this->prev; - } - - if (ma_gc_flags & GC_MAJOR) { - if (need > 254) { - blackmap[(Eterm*)this - global_old_heap] = 255; - *(int*)((UWord)(&blackmap[(Eterm*)this - global_old_heap]+4) & ~3) = - need; - } else - blackmap[(Eterm*)this - global_old_heap] = need; - } - return (Eterm*)this; -} -#endif /* INCREMENTAL */ diff --git a/erts/emulator/beam/erl_nmgc.h b/erts/emulator/beam/erl_nmgc.h deleted file mode 100644 index b207dd37fa..0000000000 --- a/erts/emulator/beam/erl_nmgc.h +++ /dev/null @@ -1,364 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2004-2009. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ - -#ifndef __ERL_NMGC_H__ -#define __ERL_NMGC_H__ - -#ifdef INCREMENTAL -#include <stddef.h> /* offsetof() */ -#include "erl_process.h" - -#define INC_FULLPAGE (INC_PAGESIZE + offsetof(INC_Page,start) / sizeof(void*)) - -#define BOXED_NEED(PTR,HDR) \ - (((HDR) & _HEADER_SUBTAG_MASK) == SUB_BINARY_SUBTAG ? \ - header_arity(HDR) + 2 : \ - ((HDR) & _HEADER_SUBTAG_MASK) == FUN_SUBTAG ? \ - header_arity(HDR) + ((ErlFunThing*)(PTR))->num_free + 2 : \ - header_arity(HDR) + 1) - - -#define INC_DECREASE_WORK(n) inc_words_to_go -= (n); - -#define INC_COPY_CONS(FROM,TO,PTR) \ -do { \ - TO[0] = FROM[0]; \ - TO[1] = FROM[1]; \ - INC_MARK_FORWARD(FROM,TO); \ - *(PTR) = make_list(TO); \ - INC_DECREASE_WORK(2); \ - (TO) += 2; \ -} while(0) - -#define INC_COPY_BOXED(FROM,TO,PTR) \ -do { \ - Sint nelts; \ - Eterm hdr = *(FROM); \ - \ - ASSERT(is_header(hdr)); \ - INC_MARK_FORWARD(FROM,TO); \ - *(PTR) = make_boxed(TO); \ - *(TO)++ = *(FROM)++; \ - nelts = header_arity(hdr); \ - switch ((hdr) & _HEADER_SUBTAG_MASK) { \ - case SUB_BINARY_SUBTAG: nelts++; break; \ - case FUN_SUBTAG: nelts+=((ErlFunThing*)(FROM-1))->num_free+1; break;\ - } \ - INC_DECREASE_WORK(nelts + 1); \ - while (nelts--) \ - *(TO)++ = *(FROM)++; \ -} while(0) - - -/* Things copied to the old generation are not marked in the blackmap. - * This is ok since the page they are copied to (aging) is not part of - * the sweep. - */ -#define COPYMARK_CONS(FROM,TO,PTR,LIMIT) \ -do { \ - if (ptr_within(FROM,inc_fromspc,inc_fromend)) { \ - if (INC_IS_FORWARDED(FROM)) { \ - *PTR = make_list(INC_FORWARD_VALUE(FROM)); \ - } else if (TO + 2 <= LIMIT) { \ - INC_STORE(gray,TO,2); \ - INC_COPY_CONS(FROM,TO,PTR); \ - } else { \ - Eterm *hp = erts_inc_alloc(2); \ - INC_STORE(gray,hp,2); \ - INC_COPY_CONS(FROM,hp,PTR); \ - } \ - } else if (ptr_within(FROM,global_old_heap,global_old_hend) && \ - (blackmap[FROM - global_old_heap] == 0)) { \ - blackmap[FROM - global_old_heap] = 2; \ - INC_DECREASE_WORK(2); \ - INC_STORE(gray,FROM,2); \ - } \ -} while(0) - -#define COPYMARK_BOXED(FROM,TO,PTR,LIMIT) \ -do { \ - if (ptr_within(FROM,inc_fromspc,inc_fromend)) { \ - int size = BOXED_NEED(FROM,*FROM); \ - if (INC_IS_FORWARDED(FROM)) { \ - *PTR = make_boxed(INC_FORWARD_VALUE(FROM)); \ - } else if (TO + size <= LIMIT) { \ - INC_STORE(gray,TO,size); \ - INC_COPY_BOXED(FROM,TO,PTR); \ - } else { \ - Eterm *hp = erts_inc_alloc(size); \ - INC_STORE(gray,hp,size); \ - INC_COPY_BOXED(FROM,hp,PTR); \ - } \ - } else if (ptr_within(FROM,global_old_heap,global_old_hend) && \ - (blackmap[FROM - global_old_heap] == 0)) { \ - int size = BOXED_NEED(FROM,*FROM); \ - if (size > 254) { \ - blackmap[FROM - global_old_heap] = 255; \ - *(int*)((long)(&blackmap[FROM - \ - global_old_heap] + 4) & ~3) = size; \ - } else \ - blackmap[FROM - global_old_heap] = size; \ - INC_DECREASE_WORK(size); \ - INC_STORE(gray,FROM,size); \ - } \ -} while(0) - -#define INC_MARK_FORWARD(ptr,dst) fwdptrs[(ptr) - inc_fromspc] = (dst); -#define INC_IS_FORWARDED(ptr) (fwdptrs[(ptr) - inc_fromspc] != 0) -#define INC_FORWARD_VALUE(ptr) fwdptrs[(ptr) - inc_fromspc] - -/* Note for BM_TIMER: Active timer should always be 'system' when IncAlloc - * is called! - */ -#define IncAlloc(p, sz, objv, nobj) \ - (ASSERT_EXPR((sz) >= 0), \ - (((inc_alloc_limit - global_htop) <= (sz)) ? \ - erts_incremental_gc((p),(sz),(objv),(nobj)) : 0), \ - ASSERT_EXPR(global_hend - global_htop > (sz)), \ - global_htop += (sz), global_htop - (sz)) - - -/************************************************************************ - * INC_STORAGE, a dynamic circular storage for objects (INC_Object). * - * Use INC_STORE to add objects to the storage. The storage can then * - * be used either as a queue, using INC_STORAGE_GET to retreive * - * values, or as a stack, using INC_STORAGE_POP. It is OK to mix calls * - * to GET and POP if that is desired. * - * An iterator can be declared to traverse the storage without removing * - * any elements, and INC_STORAGE_STEP will then return each element in * - * turn, oldest first. * - ***********************************************************************/ - -/* Declare a new storage; must be in the beginning of a block. Give - * the storage a name that is used in all later calls to the storage. - * If this is an external declaration of the storage, pass the keyword - * external as the first argument, otherwise leave it empty. - */ -#define INC_STORAGE_DECLARATION(ext,name) \ - ext INC_Storage *name##head; \ - ext INC_Storage *name##tail; \ - ext INC_Object *name##free; \ - ext INC_Object *name##last_free; \ - ext int name##size; - - -/* Initialize the storage. Note that memory allocation is involved - - * don't forget to erase the storage when you are done. - */ -#define INC_STORAGE_INIT(name) do { \ - name##head = (INC_Storage*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \ - sizeof(INC_Storage)); \ - name##head->next = name##head; \ - name##head->prev = name##head; \ - name##tail = name##head; \ - name##free = name##head->data; \ - name##last_free = name##free + INC_STORAGE_SIZE - 1; \ - name##size = 0; \ -} while(0) - - -/* -#define INC_STORAGE_SWAP(s1,s2) do { \ - INC_Storage *tmphead = s1##head; \ - INC_Storage *tmptail = s1##tail; \ - INC_Object *tmpfree = s1##free; \ - INC_Object *tmplast = s1##last_free; \ - int tmpsize = s1##size; \ - s1##head = s2##head; \ - s1##tail = s2##tail; \ - s1##free = s2##free; \ - s1##last_free = s2##last_free; \ - s1##size = s2##size; \ - s2##head = tmphead; \ - s2##tail = tmptail; \ - s2##free = tmpfree; \ - s2##last_free = tmplast; \ - s2##size = tmpsize; \ -} while(0) -*/ - - -/* Return and remove the youngest element - treat the storage as a - * stack. Always check that there are elements in the queue before - * using INC_STORAGE_POP! - */ -#define INC_STORAGE_POP(name) (ASSERT_EXPR(name##size != 0), \ - name##size--, \ - (--name##free != name##head->data - 1) ? \ - name##free : (name##head = name##head->prev, \ - name##free = name##head->data + INC_STORAGE_SIZE - 1)) - - -/* Return and remove the oldest element - treat the storage as a - * queue. Always check that there are elements in the queue before - * using INC_STORAGE_GET! - */ -#define INC_STORAGE_GET(name) (ASSERT_EXPR(name##size != 0), \ - name##size--, \ - (++name##last_free != name##tail->data + INC_STORAGE_SIZE) ? \ - name##last_free : (name##tail = name##tail->next, \ - name##last_free = name##tail->data)) - - -/* Advance the head to the next free location. If the storage is full, - * a new storage is allocated and linked into the list. - */ -#define INC_STORAGE_NEXT(name) do { \ - if (name##free == name##last_free) { \ - name##tail = (INC_Storage*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \ - sizeof(INC_Storage)); \ - memcpy(name##tail->data,name##head->data, \ - INC_STORAGE_SIZE * sizeof(INC_Object)); \ - name##tail->next = name##head->next; \ - name##head->next = name##tail; \ - name##tail->prev = name##tail->next->prev; \ - name##tail->next->prev = name##tail; \ - name##last_free = ((void*)name##tail + \ - ((void*)name##last_free - (void*)name##head)); \ - } \ - name##free++; \ - name##size++; \ - if (name##free == name##head->data + INC_STORAGE_SIZE) { \ - name##head = name##head->next; \ - name##free = name##head->data; \ - } \ -} while(0) - - -/* The head of this storage is the next free location. This is where - * the next element will be stored. - */ -#define INC_STORAGE_HEAD(name) (name##free) - - -/* Return the top - the youngest element in the storage. */ -/* #define INC_STORAGE_TOP(name) (name##free - 1 with some magic..) */ - - -/* True if the storage is empty, false otherwise */ -#define INC_STORAGE_EMPTY(name) (name##size == 0) - - -/* Store a new element in the head of the storage and advance the head - * to the next free location. - */ -#define INC_STORE(name,ptr,sz) do { \ - INC_STORAGE_HEAD(name)->this = ptr; \ - INC_STORAGE_HEAD(name)->size = sz; \ - INC_STORAGE_NEXT(name); \ -} while(0) - - -/* An iterator. Use it together with INC_STORAGE_STEP to browse throuh - * the storage. Please note that it is not possible to remove an entry - * in the middle of the storage, use GET or POP to remove enties. - */ -#define INC_STORAGE_ITERATOR(name) \ - INC_Storage *name##iterator_head = name##tail; \ - INC_Object *name##iterator_current = name##last_free; \ - int name##iterator_left = name##size; - - -/* Return the next element in the storage (sorted by age, oldest - * first) or NULL if the storage is empty or the last element has been - * returned already. - */ -#define INC_STORAGE_STEP(name) (name##iterator_left == 0 ? NULL : \ - (name##iterator_left--, \ - (++name##iterator_current != name##iterator_head->data + \ - INC_STORAGE_SIZE) ? name##iterator_current : \ - (name##iterator_head = name##iterator_head->next, \ - name##iterator_current = name##iterator_head->data))) - - -/* Erase the storage. */ -#define INC_STORAGE_ERASE(name)do { \ - name##head->prev->next = NULL; \ - while (name##head != NULL) { \ - name##tail = name##head; \ - name##head = name##head->next; \ - erts_free(ERTS_ALC_T_OBJECT_STACK,(void*)name##tail); \ - } \ - name##tail = NULL; \ - name##free = NULL; \ - name##last_free = NULL; \ - name##size = 0; \ -} while(0) - -/* - * Structures used by the non-moving memory manager - */ - -typedef struct -{ - Eterm *this; - unsigned long size; -} INC_Object; - -typedef struct inc_storage { - struct inc_storage *next; - struct inc_storage *prev; - INC_Object data[INC_STORAGE_SIZE]; -} INC_Storage; - -typedef struct inc_mem_block -{ - unsigned long size; - struct inc_mem_block *prev; - struct inc_mem_block *next; -} INC_MemBlock; - -typedef struct inc_page -{ - struct inc_page *next; - Eterm start[1]; /* Has to be last in struct, this is where the data start */ -} INC_Page; - - -/* - * Heap pointers for the non-moving memory area. - */ -extern INC_Page *inc_used_mem; -extern INC_MemBlock *inc_free_list; -extern unsigned char *blackmap; - -extern Eterm **fwdptrs; -extern Eterm *inc_fromspc; -extern Eterm *inc_fromend; -extern Process *inc_active_proc; -extern Process *inc_active_last; -extern Eterm *inc_alloc_limit; -extern int inc_words_to_go; - -INC_STORAGE_DECLARATION(extern,gray); -INC_STORAGE_DECLARATION(extern,root); - -void erts_init_incgc(void); -void erts_cleanup_incgc(void); -void erts_incremental_gc(Process *p, int sz, Eterm* objv, int nobj); -Eterm *erts_inc_alloc(int need); - -#else -# define INC_STORE(lst,ptr,sz) -# define INC_MARK_FORWARD(ptr) -# define INC_IS_FORWARDED(ptr) -# define INC_FORWARD_VALUE(ptr) -#endif /* INCREMENTAL */ - -#endif /* _ERL_NMGC_H_ */ diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index 1481f66b55..c7fd379367 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -849,9 +849,6 @@ static Eterm AM_dist_references; static Eterm AM_node_references; static Eterm AM_system; static Eterm AM_timer; -#ifdef HYBRID -static Eterm AM_processes; -#endif static void setup_reference_table(void); static Eterm reference_table_term(Uint **hpp, Uint *szp); @@ -936,9 +933,6 @@ erts_get_node_and_dist_references(struct process *proc) INIT_AM(node_references); INIT_AM(timer); INIT_AM(system); -#ifdef HYBRID - INIT_AM(processes); -#endif references_atoms_need_init = 0; } @@ -1301,12 +1295,6 @@ setup_reference_table(void) SYSTEM_REF, TUPLE2(&heap[0], AM_system, am_undefined)); -#ifdef HYBRID - /* Insert Heap */ - insert_offheap(&erts_global_offheap, - HEAP_REF, - TUPLE2(&heap[0], AM_processes, am_undefined)); -#endif UnUseTmpHeapNoproc(3); /* Insert all processes */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 95d408f79d..bd4c56eaa4 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -28,7 +28,6 @@ #include "erl_vm.h" #include "global.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "error.h" #include "bif.h" #include "erl_db.h" @@ -52,21 +51,22 @@ #define ERTS_SCHED_SPIN_UNTIL_YIELD 100 -#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT 10 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_LONG 20 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_LONG 1000 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM 10 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM 1000 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_SHORT 10 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_SHORT 0 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_SHORT 5 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_SHORT 0 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE 0 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_NONE 0 + #define ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT 1000 -#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT \ - (ERTS_SCHED_SYS_SLEEP_SPINCOUNT*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT) #define ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT 0 -#define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH (200*CONTEXT_REDS) -#define ERTS_WAKEUP_OTHER_LIMIT_HIGH (50*CONTEXT_REDS) -#define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM (10*CONTEXT_REDS) -#define ERTS_WAKEUP_OTHER_LIMIT_LOW (CONTEXT_REDS) -#define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW (CONTEXT_REDS/10) - -#define ERTS_WAKEUP_OTHER_DEC 10 -#define ERTS_WAKEUP_OTHER_FIXED_INC (CONTEXT_REDS/10) - #if 0 || defined(DEBUG) #define ERTS_FAKE_SCHED_BIND_PRINT_SORTED_CPU_DATA #endif @@ -123,14 +123,18 @@ Uint erts_no_schedulers; Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES; Uint erts_process_tab_index_mask; -static int wakeup_other_limit; - int erts_sched_thread_suggested_stack_size = -1; #ifdef ERTS_ENABLE_LOCK_CHECK ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE]; #endif +static struct { + int aux_work; + int tse; + int sys_schedule; +} sched_busy_wait; + #ifdef ERTS_SMP int erts_disable_proc_not_running_opt; @@ -240,11 +244,6 @@ struct erts_system_monitor_flags_t erts_system_monitor_flags; Eterm erts_system_profile; struct erts_system_profile_flags_t erts_system_profile_flags; -#ifdef HYBRID -Uint erts_num_active_procs; -Process** erts_active_procs; -#endif - #if ERTS_MAX_PROCESSES > 0x7fffffff #error "Need to store process_count in another type" #endif @@ -467,12 +466,6 @@ erts_init_process(int ncpu) process_tab = (Process**) erts_alloc(ERTS_ALC_T_PROC_TABLE, erts_max_processes*sizeof(Process*)); sys_memzero(process_tab, erts_max_processes * sizeof(Process*)); -#ifdef HYBRID - erts_active_procs = (Process**) - erts_alloc(ERTS_ALC_T_ACTIVE_PROCS, - erts_max_processes * sizeof(Process*)); - erts_num_active_procs = 0; -#endif erts_smp_mtx_init(&proc_tab_mtx, "proc_tab"); p_last = -1; @@ -2046,7 +2039,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_smp_runq_unlock(rq); - spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT; + spincount = sched_busy_wait.tse; tse_wait: @@ -2097,7 +2090,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) } flgs = sched_prep_cont_spin_wait(ssi); - spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT; + spincount = sched_busy_wait.aux_work; if (!(flgs & ERTS_SSI_FLG_WAITING)) { ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); @@ -2134,7 +2127,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ASSERT(working); sched_wall_time_change(esdp, working = 0); - spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT; + spincount = sched_busy_wait.sys_schedule; + if (spincount == 0) + goto sys_aux_work; while (spincount-- > 0) { @@ -3560,31 +3555,280 @@ erts_debug_nbalance(void) #endif } +/* Wakeup other schedulers */ + +typedef enum { + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH, + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH, + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM, + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW, + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW +} ErtsSchedWakeupOtherThreshold; + +typedef enum { + ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL, + ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY +} ErtsSchedWakeupOtherType; + +/* First proposal */ + +#define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH (200*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_HIGH (50*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM (10*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_LOW (CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW (CONTEXT_REDS/10) + +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_HIGH 3 +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_HIGH 1 +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_MEDIUM 0 +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_LOW -2 +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_LOW -5 + +#define ERTS_WAKEUP_OTHER_DEC_SHIFT 2 +#define ERTS_WAKEUP_OTHER_FIXED_INC (CONTEXT_REDS/10) + +/* To be legacy */ + +#define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH_LEGACY (200*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_HIGH_LEGACY (50*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM_LEGACY (10*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_LOW_LEGACY (CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW_LEGACY (CONTEXT_REDS/10) + +#define ERTS_WAKEUP_OTHER_DEC_LEGACY 10 +#define ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY (CONTEXT_REDS/10) + +#ifdef ERTS_SMP + +static struct { + ErtsSchedWakeupOtherThreshold threshold; + ErtsSchedWakeupOtherType type; + int limit; + int dec_shift; + int dec_mask; + void (*check)(ErtsRunQueue *rq); +} wakeup_other; + +static void +wakeup_other_check(ErtsRunQueue *rq) +{ + int wo_reds = rq->wakeup_other_reds; + if (wo_reds) { + int left_len = rq->len - 1; + if (left_len < 1) { + int wo_reduce = wo_reds << wakeup_other.dec_shift; + wo_reduce &= wakeup_other.dec_mask; + rq->wakeup_other -= wo_reduce; + if (rq->wakeup_other < 0) + rq->wakeup_other = 0; + } + else { + rq->wakeup_other += (left_len*wo_reds + + ERTS_WAKEUP_OTHER_FIXED_INC); + if (rq->wakeup_other > wakeup_other.limit) { + int empty_rqs = + erts_smp_atomic32_read_acqb(&no_empty_run_queues); + if (empty_rqs != 0) + wake_scheduler_on_empty_runq(rq); + rq->wakeup_other = 0; + } + } + rq->wakeup_other_reds = 0; + } +} + +static void +wakeup_other_set_limit(void) +{ + switch (wakeup_other.threshold) { + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_HIGH; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_HIGH; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_MEDIUM; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_LOW; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_LOW; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_LOW; + break; + } + if (wakeup_other.dec_shift < 0) + wakeup_other.dec_mask = (1 << (sizeof(wakeup_other.dec_mask)*8 + + wakeup_other.dec_shift)) - 1; + else { + wakeup_other.dec_mask = 0; + wakeup_other.dec_mask = ~wakeup_other.dec_mask; + } +} + +static void +wakeup_other_check_legacy(ErtsRunQueue *rq) +{ + int wo_reds = rq->wakeup_other_reds; + if (wo_reds) { + if (rq->len < 2) { + rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds; + if (rq->wakeup_other < 0) + rq->wakeup_other = 0; + } + else if (rq->wakeup_other < wakeup_other.limit) + rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY; + else { + if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { + wake_scheduler_on_empty_runq(rq); + rq->wakeup_other = 0; + } + rq->wakeup_other = 0; + } + } + rq->wakeup_other_reds = 0; +} + +static void +wakeup_other_set_limit_legacy(void) +{ + switch (wakeup_other.threshold) { + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH_LEGACY; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH_LEGACY; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM_LEGACY; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_LOW_LEGACY; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW_LEGACY; + break; + } +} + +static void +set_wakeup_other_data(void) +{ + switch (wakeup_other.type) { + case ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL: + wakeup_other.check = wakeup_other_check; + wakeup_other_set_limit(); + break; + case ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY: + wakeup_other.check = wakeup_other_check_legacy; + wakeup_other_set_limit_legacy(); + break; + } +} + +#endif + void erts_early_init_scheduling(int no_schedulers) { aux_work_timeout_early_init(no_schedulers); - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM; +#ifdef ERTS_SMP + wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM; + wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY; +#endif + sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM; + sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM + * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT); + sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM + * ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM); } int -erts_sched_set_wakeup_limit(char *str) +erts_sched_set_wakeup_other_thresold(char *str) { + ErtsSchedWakeupOtherThreshold threshold; if (sys_strcmp(str, "very_high") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH; else if (sys_strcmp(str, "high") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH; else if (sys_strcmp(str, "medium") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM; else if (sys_strcmp(str, "low") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_LOW; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW; else if (sys_strcmp(str, "very_low") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW; + else + return EINVAL; +#ifdef ERTS_SMP + wakeup_other.threshold = threshold; + set_wakeup_other_data(); +#endif + return 0; +} + +int +erts_sched_set_wakeup_other_type(char *str) +{ + ErtsSchedWakeupOtherType type; + if (sys_strcmp(str, "proposal") == 0) + type = ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL; + else if (sys_strcmp(str, "default") == 0) + type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY; + else if (sys_strcmp(str, "legacy") == 0) + type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY; else return EINVAL; +#ifdef ERTS_SMP + wakeup_other.type = type; +#endif return 0; } +int +erts_sched_set_busy_wait_threshold(char *str) +{ + int sys_sched; + int aux_work_fact; + + if (sys_strcmp(str, "very_long") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG; + } + else if (sys_strcmp(str, "long") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_LONG; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_LONG; + } + else if (sys_strcmp(str, "medium") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM; + } + else if (sys_strcmp(str, "short") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_SHORT; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_SHORT; + } + else if (sys_strcmp(str, "very_short") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_SHORT; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_SHORT; + } + else if (sys_strcmp(str, "none") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_NONE; + } + else { + return EINVAL; + } + + sched_busy_wait.sys_schedule = sys_sched; + sched_busy_wait.tse = sys_sched*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT; + sched_busy_wait.aux_work = sys_sched*aux_work_fact; + + return 0; +} static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp) { @@ -3613,6 +3857,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) init_misc_op_list_alloc(); +#ifdef ERTS_SMP + set_wakeup_other_data(); +#endif + ASSERT(no_schedulers_online <= no_schedulers); ASSERT(no_schedulers_online >= 1); ASSERT(no_schedulers >= 1); @@ -6502,26 +6750,7 @@ Process *schedule(Process *p, int calls) exec_misc_ops(rq); #ifdef ERTS_SMP - { - int wo_reds = rq->wakeup_other_reds; - if (wo_reds) { - if (rq->len < 2) { - rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC*wo_reds; - if (rq->wakeup_other < 0) - rq->wakeup_other = 0; - } - else if (rq->wakeup_other < wakeup_other_limit) - rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC; - else { - if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { - wake_scheduler_on_empty_runq(rq); - rq->wakeup_other = 0; - } - rq->wakeup_other = 0; - } - } - rq->wakeup_other_reds = 0; - } + wakeup_other.check(rq); #endif /* @@ -6685,7 +6914,6 @@ Process *schedule(Process *p, int calls) #endif ASSERT(p->status != P_SUSPENDED); /* Never run a suspended process */ - ACTIVATE(p); reds = context_reds; if (IS_TRACED(p)) { @@ -6726,7 +6954,6 @@ Process *schedule(Process *p, int calls) } p->fcalls = reds; - ASSERT(IS_ACTIVE(p)); ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); return p; } @@ -7074,9 +7301,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). ErtsRunQueue *rq, *notify_runq; Process *p; Sint arity; /* Number of arguments. */ -#ifndef HYBRID Uint arg_size; /* Size of arguments. */ -#endif Uint sz; /* Needed words on heap. */ Uint heap_need; /* Size needed on heap. */ Eterm res = THE_NON_VALUE; @@ -7085,17 +7310,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); #endif -#ifdef HYBRID - /* - * Copy the arguments to the global heap - * Since global GC might occur we want to do this before adding the - * new process to the process_tab. - */ - BM_SWAP_TIMER(system,copy); - LAZY_COPY(parent,args); - BM_SWAP_TIMER(copy,system); - heap_need = 0; -#endif /* HYBRID */ /* * Check for errors. */ @@ -7118,12 +7332,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #endif BM_COUNT(processes_spawned); -#ifndef HYBRID BM_SWAP_TIMER(system,size); arg_size = size_object(args); BM_SWAP_TIMER(size,system); heap_need = arg_size; -#endif p->flags = erts_default_process_flags; @@ -7174,9 +7386,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz); p->old_hend = p->old_htop = p->old_heap = NULL; p->high_water = p->heap; -#ifdef INCREMENTAL - p->scan_top = p->high_water; -#endif p->gen_gcs = 0; p->stop = p->hend = p->heap + sz; p->htop = p->heap; @@ -7202,19 +7411,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). BM_STOP_TIMER(system); BM_MESSAGE(args,p,parent); BM_START_TIMER(system); -#ifdef HYBRID - p->arg_reg[2] = args; -#ifdef INCREMENTAL - p->active = 0; - if (ptr_val(args) >= inc_fromspc && ptr_val(args) < inc_fromend) - INC_ACTIVATE(p); -#endif -#else BM_SWAP_TIMER(system,copy); p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap); BM_MESSAGE_COPIED(arg_size); BM_SWAP_TIMER(copy,system); -#endif p->arity = 3; p->fvalue = NIL; @@ -7272,13 +7472,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #endif p->parent = parent->id == ERTS_INVALID_PID ? NIL : parent->id; -#ifdef HYBRID - p->rrma = NULL; - p->rrsrc = NULL; - p->nrr = 0; - p->rrsz = 0; -#endif - INIT_HOLE_CHECK(p); #ifdef DEBUG p->last_old_htop = NULL; @@ -7347,15 +7540,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). so->mref = mref; } -#ifdef HYBRID - /* - * Add process to the array of active processes. - */ - ACTIVATE(p); - p->active_index = erts_num_active_procs++; - erts_active_procs[p->active_index] = p; -#endif - #ifdef ERTS_SMP p->scheduler_data = NULL; p->is_exiting = 0; @@ -7466,9 +7650,6 @@ void erts_init_empty_process(Process *p) p->reg = NULL; p->heap_sz = 0; p->high_water = NULL; -#ifdef INCREMENTAL - p->scan_top = NULL; -#endif p->old_hend = NULL; p->old_htop = NULL; p->old_heap = NULL; @@ -7520,14 +7701,6 @@ void erts_init_empty_process(Process *p) #endif #endif - ACTIVATE(p); - -#ifdef HYBRID - p->rrma = NULL; - p->rrsrc = NULL; - p->nrr = 0; - p->rrsz = 0; -#endif INIT_HOLE_CHECK(p); #ifdef DEBUG p->last_old_htop = NULL; @@ -7575,9 +7748,6 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->reg == NULL); ASSERT(p->heap_sz == 0); ASSERT(p->high_water == NULL); -#ifdef INCREMENTAL - ASSERT(p->scan_top == NULL); -#endif ASSERT(p->old_hend == NULL); ASSERT(p->old_htop == NULL); ASSERT(p->old_heap == NULL); @@ -7725,22 +7895,6 @@ delete_process(Process* p) ASSERT(!p->suspend_monitors); p->fvalue = NIL; - -#ifdef HYBRID - erts_active_procs[p->active_index] = - erts_active_procs[--erts_num_active_procs]; - erts_active_procs[p->active_index]->active_index = p->active_index; -#ifdef INCREMENTAL - if (INC_IS_ACTIVE(p)) - INC_DEACTIVATE(p); -#endif - - if (p->rrma != NULL) { - erts_free(ERTS_ALC_T_ROOTSET,p->rrma); - erts_free(ERTS_ALC_T_ROOTSET,p->rrsrc); - } -#endif - } static ERTS_INLINE void @@ -8112,7 +8266,6 @@ send_exit_signal(Process *c_p, /* current process if and only set_proc_exiting(rp, is_immed(rsn) ? rsn : copy_object(rsn, rp), NULL); - ACTIVATE(rp); if (old_status != P_RUNABLE && old_status != P_RUNNING) erts_add_to_runq(rp); } diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index ca194ab90c..5b79c40d93 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -749,24 +749,6 @@ struct process { #endif #endif -#ifdef HYBRID - Eterm *rrma; /* Remembered roots to Message Area */ - Eterm **rrsrc; /* The source of the root */ - Uint nrr; /* Number of remembered roots */ - Uint rrsz; /* Size of root array */ -#endif - -#ifdef HYBRID - Uint active; /* Active since last major collection? */ - Uint active_index; /* Index in the active process array */ -#endif - -#ifdef INCREMENTAL - Process *active_next; /* Active processes to scan for roots */ - Process *active_prev; /* in collection of the message area */ - Eterm *scan_top; -#endif - #ifdef CHECK_FOR_HOLES Eterm* last_htop; /* No need to scan the heap below this point. */ ErlHeapFragment* last_mbuf; /* No need to scan beyond this mbuf. */ @@ -888,10 +870,6 @@ Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz); #endif extern Process** process_tab; -#ifdef HYBRID -extern Uint erts_num_active_procs; -extern Process** erts_active_procs; -#endif extern Uint erts_max_processes; extern Uint erts_process_tab_index_mask; extern Uint erts_default_process_flags; @@ -1096,7 +1074,9 @@ ErtsProcList *erts_proclist_create(Process *); void erts_proclist_destroy(ErtsProcList *); int erts_proclist_same(ErtsProcList *, Process *); -int erts_sched_set_wakeup_limit(char *str); +int erts_sched_set_wakeup_other_thresold(char *str); +int erts_sched_set_wakeup_other_type(char *str); +int erts_sched_set_busy_wait_threshold(char *str); #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) int erts_dbg_check_halloc_lock(Process *p); diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index 5dc307e383..a0e12e57f2 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -29,19 +29,10 @@ /* #define FORCE_HEAP_FRAGS */ -#if defined(HYBRID) -/* # define CHECK_FOR_HOLES */ -#endif - #if defined(DEBUG) && !defined(CHECK_FOR_HOLES) && !defined(__WIN32__) # define CHECK_FOR_HOLES #endif -#if defined(HYBRID) -/* # define INCREMENTAL 1 */ /* Incremental garbage collection */ -/* # define INC_TIME_BASED 1 */ /* Time-based incremental GC (vs Work-based) */ -#endif - #define BEAM 1 #define EMULATOR "BEAM" #define SEQ_TRACE 1 @@ -70,16 +61,6 @@ #define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */ #define VH_DEFAULT_SIZE 32768 /* default virtual (bin) heap min size (words) */ -#ifdef HYBRID -# define SH_DEFAULT_SIZE 2629425 /* default message area min size */ -#endif - -#ifdef INCREMENTAL -# define INC_NoPAGES 256 /* Number of pages in the old generation */ -# define INC_PAGESIZE 32768 /* The size of each page */ -# define INC_STORAGE_SIZE 1024 /* The size of gray stack and similar */ -#endif - #define CP_SIZE 1 #define ErtsHAllocLockCheck(P) \ diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index 44abc83d6d..4348578694 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -2626,14 +2626,12 @@ dec_term_atom_common: } old_uniq = unsigned_val(temp); -#ifndef HYBRID /* FIND ME! */ /* * It is safe to link the fun into the fun list only when * no more validity tests can fail. */ funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*)funp; -#endif funp->fe = erts_put_fun_entry2(module, old_uniq, old_index, uniq, index, arity); @@ -2704,14 +2702,12 @@ dec_term_atom_common: goto error; } -#ifndef HYBRID /* FIND ME! */ /* * It is safe to link the fun into the fun list only when * no more validity tests can fail. */ funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*)funp; -#endif old_uniq = unsigned_val(temp); funp->fe = erts_put_fun_entry(module, old_uniq, old_index); diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 894872dbc0..1b15c4ac3b 100755 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -565,92 +565,6 @@ extern erts_smp_atomic32_t erts_max_gen_gcs; extern int erts_disable_tolerant_timeofday; -#ifdef HYBRID - -/* Message Area heap pointers */ -extern Eterm *global_heap; /* Heap start */ -extern Eterm *global_hend; /* Heap end */ -extern Eterm *global_htop; /* Heap top (heap pointer) */ -extern Eterm *global_saved_htop; /* Saved heap top (heap pointer) */ -extern Uint global_heap_sz; /* Heap size, in words */ -extern Eterm *global_old_heap; /* Old generation */ -extern Eterm *global_old_hend; -extern ErlOffHeap erts_global_offheap; /* Global MSO (OffHeap) list */ - -extern Uint16 global_gen_gcs; -extern Uint16 global_max_gen_gcs; -extern Uint global_gc_flags; - -#ifdef INCREMENTAL -#define ACTIVATE(p) -#define DEACTIVATE(p) -#define IS_ACTIVE(p) 1 - -#define INC_ACTIVATE(p) do { \ - if ((p)->active) { \ - if ((p)->active_next != NULL) { \ - (p)->active_next->active_prev = (p)->active_prev; \ - if ((p)->active_prev) { \ - (p)->active_prev->active_next = (p)->active_next; \ - } else { \ - inc_active_proc = (p)->active_next; \ - } \ - inc_active_last->active_next = (p); \ - (p)->active_next = NULL; \ - (p)->active_prev = inc_active_last; \ - inc_active_last = (p); \ - } \ - } else { \ - (p)->active_next = NULL; \ - (p)->active_prev = inc_active_last; \ - if (inc_active_last) { \ - inc_active_last->active_next = (p); \ - } else { \ - inc_active_proc = (p); \ - } \ - inc_active_last = (p); \ - (p)->active = 1; \ - } \ -} while(0); - -#define INC_DEACTIVATE(p) do { \ - ASSERT((p)->active == 1); \ - if ((p)->active_next == NULL) { \ - inc_active_last = (p)->active_prev; \ - } else { \ - (p)->active_next->active_prev = (p)->active_prev; \ - } \ - if ((p)->active_prev == NULL) { \ - inc_active_proc = (p)->active_next; \ - } else { \ - (p)->active_prev->active_next = (p)->active_next; \ - } \ - (p)->active = 0; \ -} while(0); - -#define INC_IS_ACTIVE(p) ((p)->active != 0) - -#else -extern Eterm *global_old_htop; -extern Eterm *global_high_water; -#define ACTIVATE(p) (p)->active = 1; -#define DEACTIVATE(p) (p)->active = 0; -#define IS_ACTIVE(p) ((p)->active != 0) -#define INC_ACTIVATE(p) -#define INC_IS_ACTIVE(p) 1 -#endif /* INCREMENTAL */ - -#else -# define ACTIVATE(p) -# define DEACTIVATE(p) -# define IS_ACTIVE(p) 1 -# define INC_ACTIVATE(p) -#endif /* HYBRID */ - -#ifdef HYBRID -extern Uint global_heap_min_sz; -#endif - extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */ extern int stackdump_on_exit; @@ -906,7 +820,6 @@ __decl_noreturn void __noreturn erl_exit_flush_async(int n, char*, ...); void erl_error(char*, va_list); /* copy.c */ -void init_copy(void); Eterm copy_object(Eterm, Process*); #if HALFWORD_HEAP @@ -936,116 +849,6 @@ Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*); void move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first, Eterm* refs, unsigned nrefs); -#ifdef HYBRID -#define RRMA_DEFAULT_SIZE 256 -#define RRMA_STORE(p,ptr,src) do { \ - ASSERT((p)->rrma != NULL); \ - ASSERT((p)->rrsrc != NULL); \ - (p)->rrma[(p)->nrr] = (ptr); \ - (p)->rrsrc[(p)->nrr++] = (src); \ - if ((p)->nrr == (p)->rrsz) \ - { \ - (p)->rrsz *= 2; \ - (p)->rrma = (Eterm *) erts_realloc(ERTS_ALC_T_ROOTSET, \ - (void*)(p)->rrma, \ - sizeof(Eterm) * (p)->rrsz); \ - (p)->rrsrc = (Eterm **) erts_realloc(ERTS_ALC_T_ROOTSET, \ - (void*)(p)->rrsrc, \ - sizeof(Eterm) * (p)->rrsz); \ - } \ -} while(0) - -/* Note that RRMA_REMOVE decreases the given index after deletion. - * This is done so that a loop with an increasing index can call - * remove without having to decrease the index to see the element - * placed in the hole after the deleted element. - */ -#define RRMA_REMOVE(p,index) do { \ - p->rrsrc[index] = p->rrsrc[--p->nrr]; \ - p->rrma[index--] = p->rrma[p->nrr]; \ - } while(0); - - -/* The MessageArea STACKs are used while copying messages to the - * message area. - */ -#define MA_STACK_EXTERNAL_DECLARE(type,_s_) \ - typedef type ma_##_s_##_type; \ - extern ma_##_s_##_type *ma_##_s_##_stack; \ - extern Uint ma_##_s_##_top; \ - extern Uint ma_##_s_##_size; - -#define MA_STACK_DECLARE(_s_) \ - ma_##_s_##_type *ma_##_s_##_stack; Uint ma_##_s_##_top; Uint ma_##_s_##_size; - -#define MA_STACK_ALLOC(_s_) do { \ - ma_##_s_##_top = 0; \ - ma_##_s_##_size = 512; \ - ma_##_s_##_stack = (ma_##_s_##_type*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \ - sizeof(ma_##_s_##_type) * ma_##_s_##_size); \ -} while(0) - - -#define MA_STACK_PUSH(_s_,val) do { \ - ma_##_s_##_stack[ma_##_s_##_top++] = (val); \ - if (ma_##_s_##_top == ma_##_s_##_size) \ - { \ - ma_##_s_##_size *= 2; \ - ma_##_s_##_stack = \ - (ma_##_s_##_type*) erts_realloc(ERTS_ALC_T_OBJECT_STACK, \ - (void*)ma_##_s_##_stack, \ - sizeof(ma_##_s_##_type) * ma_##_s_##_size); \ - } \ -} while(0) - -#define MA_STACK_POP(_s_) (ma_##_s_##_top != 0 ? ma_##_s_##_stack[--ma_##_s_##_top] : 0) -#define MA_STACK_TOP(_s_) (ma_##_s_##_stack[ma_##_s_##_top - 1]) -#define MA_STACK_UPDATE(_s_,offset,value) \ - *(ma_##_s_##_stack[ma_##_s_##_top - 1] + (offset)) = (value) -#define MA_STACK_SIZE(_s_) (ma_##_s_##_top) -#define MA_STACK_ELM(_s_,i) ma_##_s_##_stack[i] - -MA_STACK_EXTERNAL_DECLARE(Eterm,src); -MA_STACK_EXTERNAL_DECLARE(Eterm*,dst); -MA_STACK_EXTERNAL_DECLARE(Uint,offset); - - -#ifdef INCREMENTAL -extern Eterm *ma_pending_stack; -extern Uint ma_pending_top; -extern Uint ma_pending_size; - -#define NO_COPY(obj) (IS_CONST(obj) || \ - (((ptr_val(obj) >= global_heap) && \ - (ptr_val(obj) < global_htop)) || \ - ((ptr_val(obj) >= inc_fromspc) && \ - (ptr_val(obj) < inc_fromend)) || \ - ((ptr_val(obj) >= global_old_heap) && \ - (ptr_val(obj) < global_old_hend)))) - -#else - -#define NO_COPY(obj) (IS_CONST(obj) || \ - (((ptr_val(obj) >= global_heap) && \ - (ptr_val(obj) < global_htop)) || \ - ((ptr_val(obj) >= global_old_heap) && \ - (ptr_val(obj) < global_old_hend)))) - -#endif /* INCREMENTAL */ - -#define LAZY_COPY(from,obj) do { \ - if (!NO_COPY(obj)) { \ - BM_LAZY_COPY_START; \ - BM_COUNT(messages_copied); \ - obj = copy_struct_lazy(from,obj,0); \ - BM_LAZY_COPY_STOP; \ - } \ -} while(0) - -Eterm copy_struct_lazy(Process*, Eterm, Uint); - -#endif /* HYBRID */ - /* Utilities */ extern void erts_delete_nodes_monitors(Process *, ErtsProcLocks); extern Eterm erts_monitor_nodes(Process *, Eterm, Eterm); @@ -1139,10 +942,6 @@ void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*); void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*); void erts_free_heap_frags(Process* p); -#ifdef HYBRID -int erts_global_garbage_collect(Process*, int, Eterm*, int); -#endif - /* io.c */ struct erl_drv_port_data_lock { diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index a685f41c4d..204bff299e 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -1364,7 +1364,7 @@ void init_io(void) #else "port_state", #endif - 0); + make_small(0)); #endif erts_port[i].tracer_proc = NIL; erts_port[i].trace_flags = 0; @@ -3260,6 +3260,8 @@ driver_deliver_term(ErlDrvPort port, Uint size = ptr[1]; Uint offset = ptr[2]; + erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size); + if (size <= ERL_ONHEAP_BIN_LIMIT) { ErlHeapBin* hbp = (ErlHeapBin *) hp; hp += heap_bin_size(size); @@ -3291,6 +3293,9 @@ driver_deliver_term(ErlDrvPort port, case ERL_DRV_BUF2BINARY: { /* char*, size */ byte *bufp = (byte *) ptr[0]; Uint size = (Uint) ptr[1]; + + erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size); + if (size <= ERL_ONHEAP_BIN_LIMIT) { ErlHeapBin* hbp = (ErlHeapBin *) hp; hp += heap_bin_size(size); @@ -3327,6 +3332,7 @@ driver_deliver_term(ErlDrvPort port, } case ERL_DRV_STRING: /* char*, length */ + erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) ptr[1]); mess = buf_to_intlist(&hp, (char*)ptr[0], ptr[1], NIL); ptr += 2; break; diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index bf376f0494..76a9b55179 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -3124,6 +3124,7 @@ static int tcp_message(inet_descriptor* desc, const char* buf, int len) int i = 0; DEBUGF(("tcp_message(%ld): len = %d\r\n", (long)desc->port, len)); + /* XXX fprintf(stderr,"tcp_message send.\r\n"); */ i = LOAD_ATOM(spec, i, am_tcp); i = LOAD_PORT(spec, i, desc->dport); @@ -5426,6 +5427,7 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len) if (IS_SCTP(desc)) return sctp_set_opts(desc, ptr, len); #endif + /* XXX { int i; for(i=0;i<len;++i) fprintf(stderr,"0x%02X, ", (unsigned) ptr[i]); fprintf(stderr,"\r\n");} */ while(len >= 5) { opt = *ptr++; @@ -5755,10 +5757,16 @@ skip_os_setopt: if (desc->active != old_active) sock_select(desc, (FD_READ|FD_CLOSE), (desc->active>0)); + /* XXX: UDP sockets could also trigger immediate read here NIY */ if ((desc->stype==SOCK_STREAM) && desc->active) { if (!old_active || (desc->htype != old_htype)) { /* passive => active change OR header type change in active mode */ - return 1; + /* Return > 1 if only active changed to INET_ONCE -> direct read if + header type is unchanged. */ + /* XXX fprintf(stderr,"desc->htype == %d, old_htype == %d, + desc->active == %d, old_active == %d\r\n",(int)desc->htype, + (int) old_htype, (int) desc->active, (int) old_active );*/ + return 1+(desc->htype == old_htype && desc->active == INET_ONCE); } return 0; } @@ -7592,17 +7600,27 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, case INET_REQ_SETOPTS: { /* set options */ DEBUGF(("inet_ctl(%ld): SETOPTS\r\n", (long)desc->port)); + /* XXX fprintf(stderr,"inet_ctl(%ld): SETOPTS (len = %d)\r\n", (long)desc->port,(int) len); */ switch(inet_set_opts(desc, buf, len)) { case -1: return ctl_error(EINVAL, rbuf, rsize); case 0: return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize); - default: /* active/passive change!! */ + case 1: /* * Let's hope that the descriptor really is a tcp_descriptor here. */ + /* fprintf(stderr,"Triggered tcp_deliver by setopt.\r\n"); */ tcp_deliver((tcp_descriptor *) desc, 0); return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize); + default: + /* fprintf(stderr,"Triggered tcp_recv by setopt.\r\n"); */ + /* + * Same as above, but active changed to once w/o header type + * change, so try a read instead of just deliver. + */ + tcp_recv((tcp_descriptor *) desc, 0); + return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize); } } @@ -9196,6 +9214,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event) #endif ASSERT(!INETP(desc)->is_ignored); DEBUGF(("tcp_inet_input(%ld) {s=%d\r\n", port, desc->inet.s)); + /* XXX fprintf(stderr,"tcp_inet_input(%ld) {s=%d}\r\n",(long) desc->inet.port, desc->inet.s); */ if (desc->inet.state == INET_STATE_ACCEPTING) { SOCKET s; unsigned int len; diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c index 26f183dc25..af593229c0 100644 --- a/erts/emulator/hipe/hipe_bif0.c +++ b/erts/emulator/hipe/hipe_bif0.c @@ -1093,10 +1093,8 @@ BIF_RETTYPE hipe_bifs_make_fun_3(BIF_ALIST_3) if (is_not_nil(free_vars)) BIF_ERROR(BIF_P, BADARG); -#ifndef HYBRID /* FIND ME! */ funp->next = MSO(BIF_P).funs; MSO(BIF_P).funs = funp; -#endif BIF_RET(make_fun(funp)); } diff --git a/erts/emulator/hipe/hipe_bif1.c b/erts/emulator/hipe/hipe_bif1.c index 87cdfb8c7a..64de754e18 100644 --- a/erts/emulator/hipe/hipe_bif1.c +++ b/erts/emulator/hipe/hipe_bif1.c @@ -449,7 +449,7 @@ BIF_RETTYPE hipe_bifs_gc_info_0(BIF_ALIST_0) BIF_RETTYPE hipe_bifs_shared_gc_info_0(BIF_ALIST_0) { #ifdef __BENCHMARK__ -#if !(defined(BM_COUNTERS) && defined(HYBRID)) +#if !(defined(BM_COUNTERS)) Uint minor_global_gc = 0; Uint major_global_gc = 0; #endif @@ -459,17 +459,9 @@ BIF_RETTYPE hipe_bifs_shared_gc_info_0(BIF_ALIST_0) #endif Eterm *hp; -#if defined(HYBRID) - Uint tmp_used_heap = (Uint)((BIF_P->htop - BIF_P->heap) + - (OLD_HTOP(BIF_P) - OLD_HEAP(BIF_P)) + - MBUF_SIZE(BIF_P)); - Uint tmp_allocated_heap = (Uint)((BIF_P->hend - BIF_P->heap) + - (OLD_HEND(BIF_P) - OLD_HEAP(BIF_P)) + - MBUF_SIZE(BIF_P)); -#else Uint tmp_used_heap = 0; Uint tmp_allocated_heap = 0; -#endif + hp = HAlloc(BIF_P, 7); BIF_RET(TUPLE6(hp, make_small((uint)minor_global_gc), @@ -486,7 +478,7 @@ BIF_RETTYPE hipe_bifs_shared_gc_info_0(BIF_ALIST_0) BIF_RETTYPE hipe_bifs_incremental_gc_info_0(BIF_ALIST_0) { #ifdef __BENCHMARK__ -#if !(defined(BM_COUNTERS) && defined(INCREMENTAL)) +#if !defined(BM_COUNTERS) Uint minor_gc_cycles = 0; Uint major_gc_cycles = 0; Uint minor_gc_stages = 0; @@ -512,17 +504,6 @@ BIF_RETTYPE hipe_bifs_gc_info_clear_0(BIF_ALIST_0) #ifdef BM_COUNTERS minor_gc = 0; major_gc = 0; -#ifdef HYBRID - minor_global_gc = 0; - major_global_gc = 0; - gc_in_copy = 0; -#ifdef INCREMENTAL - minor_gc_cycles = 0; - major_gc_cycles = 0; - minor_gc_stages = 0; - major_gc_stages = 0; -#endif -#endif #endif #ifdef BM_HEAP_SIZES diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c index ee97541e15..37a1cc193b 100644 --- a/erts/emulator/hipe/hipe_bif2.c +++ b/erts/emulator/hipe/hipe_bif2.c @@ -151,22 +151,6 @@ BIF_RETTYPE hipe_bifs_modeswitch_debug_off_0(BIF_ALIST_0) BIF_RET(am_true); } -/* BIFs for handling the message area */ - -BIF_RETTYPE hipe_bifs_show_message_area_0(BIF_ALIST_0) -{ -#ifdef HYBRID -#ifdef DEBUG - print_message_area(); -#else - printf("Only available in debug compiled emulator\r\n"); -#endif - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - #if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1); diff --git a/erts/emulator/hipe/hipe_bif2.tab b/erts/emulator/hipe/hipe_bif2.tab index 51323ce7af..aac27e8bed 100644 --- a/erts/emulator/hipe/hipe_bif2.tab +++ b/erts/emulator/hipe/hipe_bif2.tab @@ -29,4 +29,3 @@ bif hipe_bifs:show_term/1 bif hipe_bifs:in_native/0 bif hipe_bifs:modeswitch_debug_on/0 bif hipe_bifs:modeswitch_debug_off/0 -bif hipe_bifs:show_message_area/0 diff --git a/erts/emulator/hipe/hipe_gc.c b/erts/emulator/hipe/hipe_gc.c index e0575c35ff..07e4b8a4d6 100644 --- a/erts/emulator/hipe/hipe_gc.c +++ b/erts/emulator/hipe/hipe_gc.c @@ -237,329 +237,3 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop) } abort(); } - -#ifdef HYBRID - -#ifdef INCREMENTAL -Eterm *ma_fullsweep_nstack(Process *p, Eterm *n_htop, Eterm *n_hend) -{ - /* known nstack walk state */ - Eterm *nsp; - Eterm *nsp_end; - const struct sdesc *sdesc; - unsigned int sdesc_size; - unsigned long ra; - unsigned int i; - unsigned int mask; - /* arch-specific nstack walk state */ - struct nstack_walk_state walk_state; - - if (!nstack_walk_init_check(p)) - return n_htop; - - nsp = nstack_walk_nsp_begin(p); - nsp_end = nstack_walk_nsp_end(p); - - sdesc = nstack_walk_init_sdesc(p, &walk_state); - - for (;;) { - if (nstack_walk_nsp_reached_end(nsp, nsp_end)) { - if (nsp == nsp_end) - return n_htop; - fprintf(stderr, "%s: passed end of stack\r\n", __FUNCTION__); - break; - } - sdesc_size = nstack_walk_frame_size(sdesc); - i = 0; - mask = sdesc->livebits[0]; - for (;;) { - if (mask & 1) { - Eterm *nsp_i = nstack_walk_frame_index(nsp, i); - Eterm val = *nsp_i; - Eterm *obj_ptr = ptr_val(val); - switch (primary_tag(val)) { - case TAG_PRIMARY_LIST: - COPYMARK_CONS(obj_ptr, n_htop, nsp_i, n_hend); - break; - case TAG_PRIMARY_BOXED: - COPYMARK_BOXED(obj_ptr, n_htop, nsp_i, n_hend); - break; - default: - break; - } - } - if (++i >= sdesc_size) - break; - if (i & 31) - mask >>= 1; - else - mask = sdesc->livebits[i >> 5]; - } - ra = nstack_walk_frame_ra(nsp, sdesc); - if (ra == (unsigned long)nbif_stack_trap_ra) - ra = (unsigned long)p->hipe.ngra; - sdesc = hipe_find_sdesc(ra); - nsp = nstack_walk_next_frame(nsp, sdesc_size); - } - abort(); -} - -void ma_gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop) -{ - /* known nstack walk state */ - Eterm *nsp; - Eterm *nsp_end; - const struct sdesc *sdesc; - unsigned int sdesc_size; - unsigned long ra; - unsigned int i; - unsigned int mask; - /* arch-specific nstack walk state */ - struct nstack_walk_state walk_state; - - /* ma_gensweep-specific state */ - Eterm *low_water, *high_water, *surface; - Eterm *n_htop; - Eterm *old_htop; - - if (!nstack_walk_init_check(p)) - return; - - nsp = nstack_walk_nsp_begin(p); - nsp_end = nstack_walk_nsp_end(p); - - low_water = global_heap; - //high_water = global_high_water; - surface = global_htop; - - old_htop = *ptr_old_htop; - n_htop = *ptr_n_htop; - - sdesc = nstack_walk_init_sdesc(p, &walk_state); - - for (;;) { - if (nstack_walk_nsp_reached_end(nsp, nsp_end)) { - if (nsp == nsp_end) { - *ptr_old_htop = old_htop; - *ptr_n_htop = n_htop; - return; - } - fprintf(stderr, "%s: passed end of stack\r\n", __FUNCTION__); - break; - } - sdesc_size = nstack_walk_frame_size(sdesc); - i = 0; - mask = sdesc->livebits[0]; - for (;;) { - if (mask & 1) { - Eterm *nsp_i = nstack_walk_frame_index(nsp, i); - Eterm gval = *nsp_i; - if (is_boxed(gval)) { - Eterm *ptr = boxed_val(gval); - Eterm val = *ptr; - if (MY_IS_MOVED(val)) { - *nsp_i = val; - } else if (ptr_within(ptr, low_water, high_water)) { - MOVE_BOXED(ptr, val, old_htop, nsp_i); - } else if (ptr_within(ptr, high_water, surface)) { - MOVE_BOXED(ptr, val, n_htop, nsp_i); - } - } else if (is_list(gval)) { - Eterm *ptr = list_val(gval); - Eterm val = *ptr; - if (is_non_value(val)) { - *nsp_i = ptr[1]; - } else if (ptr_within(ptr, low_water, high_water)) { - MOVE_CONS(ptr, val, old_htop, nsp_i); - } else if (ptr_within(ptr, high_water, surface)) { - MOVE_CONS(ptr, val, n_htop, nsp_i); - } - } - } - if (++i >= sdesc_size) - break; - if (i & 31) - mask >>= 1; - else - mask = sdesc->livebits[i >> 5]; - } - ra = nstack_walk_frame_ra(nsp, sdesc); - if (ra == (unsigned long)nbif_stack_trap_ra) - ra = (unsigned long)p->hipe.ngra; - sdesc = hipe_find_sdesc(ra); - nsp = nstack_walk_next_frame(nsp, sdesc_size); - } - abort(); -} - -#else /* not INCREMENTAL */ - -Eterm *ma_fullsweep_nstack(Process *p, Eterm *n_htop) -{ - /* known nstack walk state */ - Eterm *nsp; - Eterm *nsp_end; - const struct sdesc *sdesc; - unsigned int sdesc_size; - unsigned long ra; - unsigned int i; - unsigned int mask; - /* arch-specific nstack walk state */ - struct nstack_walk_state walk_state; - - /* ma_fullsweep-specific state */ - Eterm *gheap = global_heap; - Eterm *ghtop = global_htop; - Eterm *goheap = global_old_heap; - Eterm *gohtop = global_old_htop; - - if (!nstack_walk_init_check(p)) - return n_htop; - - nsp = nstack_walk_nsp_begin(p); - nsp_end = nstack_walk_nsp_end(p); - - sdesc = nstack_walk_init_sdesc(p, &walk_state); - - for (;;) { - if (nstack_walk_nsp_reached_end(nsp, nsp_end)) { - if (nsp == nsp_end) - return n_htop; - fprintf(stderr, "%s: passed end of stack\r\n", __FUNCTION__); - break; - } - sdesc_size = nstack_walk_frame_size(sdesc); - i = 0; - mask = sdesc->livebits[0]; - for (;;) { - if (mask & 1) { - Eterm *nsp_i = nstack_walk_frame_index(nsp, i); - Eterm gval = *nsp_i; - if (is_boxed(gval)) { - Eterm *ptr = boxed_val(gval); - Eterm val = *ptr; - if (MY_IS_MOVED(val)) { - *nsp_i = val; - } else if (ptr_within(ptr, gheap, ghtop)) { - MOVE_BOXED(ptr, val, n_htop, nsp_i); - } else if (ptr_within(ptr, goheap, gohtop)) { - MOVE_BOXED(ptr, val, n_htop, nsp_i); - } - } else if (is_list(gval)) { - Eterm *ptr = list_val(gval); - Eterm val = *ptr; - if (is_non_value(val)) { - *nsp_i = ptr[1]; - } else if (ptr_within(ptr, gheap, ghtop)) { - MOVE_CONS(ptr, val, n_htop, nsp_i); - } else if (ptr_within(ptr, gheap, ghtop)) { - MOVE_CONS(ptr, val, n_htop, nsp_i); - } - } - } - if (++i >= sdesc_size) - break; - if (i & 31) - mask >>= 1; - else - mask = sdesc->livebits[i >> 5]; - } - ra = nstack_walk_frame_ra(nsp, sdesc); - if (ra == (unsigned long)nbif_stack_trap_ra) - ra = (unsigned long)p->hipe.ngra; - sdesc = hipe_find_sdesc(ra); - nsp = nstack_walk_next_frame(nsp, sdesc_size); - } - abort(); -} - -void ma_gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop) -{ - /* known nstack walk state */ - Eterm *nsp; - Eterm *nsp_end; - const struct sdesc *sdesc; - unsigned int sdesc_size; - unsigned long ra; - unsigned int i; - unsigned int mask; - /* arch-specific nstack walk state */ - struct nstack_walk_state walk_state; - - /* ma_gensweep-specific state */ - Eterm *low_water, *high_water, *surface; - Eterm *n_htop; - Eterm *old_htop; - - if (!nstack_walk_init_check(p)) - return; - - nsp = nstack_walk_nsp_begin(p); - nsp_end = nstack_walk_nsp_end(p); - - low_water = global_heap; - high_water = global_high_water; - surface = global_htop; - - old_htop = *ptr_old_htop; - n_htop = *ptr_n_htop; - - sdesc = nstack_walk_init_sdesc(p, &walk_state); - - for (;;) { - if (nstack_walk_nsp_reached_end(nsp, nsp_end)) { - if (nsp == nsp_end) { - *ptr_old_htop = old_htop; - *ptr_n_htop = n_htop; - return; - } - fprintf(stderr, "%s: passed end of stack\r\n", __FUNCTION__); - break; - } - sdesc_size = nstack_walk_frame_size(sdesc); - i = 0; - mask = sdesc->livebits[0]; - for (;;) { - if (mask & 1) { - Eterm *nsp_i = nstack_walk_frame_index(nsp, i); - Eterm gval = *nsp_i; - if (is_boxed(gval)) { - Eterm *ptr = boxed_val(gval); - Eterm val = *ptr; - if (MY_IS_MOVED(val)) { - *nsp_i = val; - } else if (ptr_within(ptr, low_water, high_water)) { - MOVE_BOXED(ptr, val, old_htop, nsp_i); - } else if (ptr_within(ptr, high_water, surface)) { - MOVE_BOXED(ptr, val, n_htop, nsp_i); - } - } else if (is_list(gval)) { - Eterm *ptr = list_val(gval); - Eterm val = *ptr; - if (is_non_value(val)) { - *nsp_i = ptr[1]; - } else if (ptr_within(ptr, low_water, high_water)) { - MOVE_CONS(ptr, val, old_htop, nsp_i); - } else if (ptr_within(ptr, high_water, surface)) { - MOVE_CONS(ptr, val, n_htop, nsp_i); - } - } - } - if (++i >= sdesc_size) - break; - if (i & 31) - mask >>= 1; - else - mask = sdesc->livebits[i >> 5]; - } - ra = nstack_walk_frame_ra(nsp, sdesc); - if (ra == (unsigned long)nbif_stack_trap_ra) - ra = (unsigned long)p->hipe.ngra; - sdesc = hipe_find_sdesc(ra); - nsp = nstack_walk_next_frame(nsp, sdesc_size); - } - abort(); -} -#endif /* INCREMENTAL */ - -#endif /* HYBRID */ diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c index d07d14028c..6e9041c84a 100644 --- a/erts/emulator/hipe/hipe_mkliterals.c +++ b/erts/emulator/hipe/hipe_mkliterals.c @@ -467,15 +467,11 @@ static const struct rts_param { int value; } rts_params[] = { { 1, "P_OFF_HEAP_FUNS", -#if !defined(HYBRID) 1, offsetof(struct process, off_heap.first) -#endif }, { 4, "EFT_NEXT", -#if !defined(HYBRID) 1, offsetof(struct erl_fun_thing, next) -#endif }, /* These are always defined, but their values depend on the diff --git a/erts/emulator/hipe/hipe_stack.h b/erts/emulator/hipe/hipe_stack.h index 4c14b4a519..4e3076caf5 100644 --- a/erts/emulator/hipe/hipe_stack.h +++ b/erts/emulator/hipe/hipe_stack.h @@ -116,13 +116,4 @@ extern int hipe_fill_stacktrace(Process*, int, Eterm**); extern Eterm *fullsweep_nstack(Process *p, Eterm *n_htop); extern void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop); -#ifdef HYBRID -#ifdef INCREMENTAL -extern Eterm *ma_fullsweep_nstack(Process *p, Eterm *n_htop, Eterm *n_hend); -#else -extern Eterm *ma_fullsweep_nstack(Process *p, Eterm *n_htop); -#endif -extern void ma_gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop); -#endif /* HYBRID */ - #endif /* HIPE_STACK_H */ diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl index d9fc876482..c9f43bb00b 100644 --- a/erts/emulator/test/binary_SUITE.erl +++ b/erts/emulator/test/binary_SUITE.erl @@ -1135,15 +1135,8 @@ sleeper() -> ?line receive after infinity -> ok end. -gc_test(doc) -> "Test that binaries are garbage collected properly."; -gc_test(suite) -> []; +%% Test that binaries are garbage collected properly. gc_test(Config) when is_list(Config) -> - case erlang:system_info(heap_type) of - private -> gc_test_1(); - hybrid -> {skip,"Hybrid heap"} - end. - -gc_test_1() -> %% Note: This test is only relevant for REFC binaries. %% Therefore, we take care that all binaries are REFC binaries. B = list_to_binary(lists:seq(0, ?heap_binary_size)), diff --git a/erts/emulator/test/bs_construct_SUITE.erl b/erts/emulator/test/bs_construct_SUITE.erl index 7fdf36711b..dea02b1f4b 100644 --- a/erts/emulator/test/bs_construct_SUITE.erl +++ b/erts/emulator/test/bs_construct_SUITE.erl @@ -460,7 +460,6 @@ mem_leak(0, _) -> ok; mem_leak(N, B) -> ?line big_bin(B, <<23>>), ?line {'EXIT',{badarg,_}} = (catch big_bin(B, bad)), - maybe_gc(), mem_leak(N-1, B). big_bin(B1, B2) -> @@ -473,13 +472,6 @@ big_bin(B1, B2) -> make_bin(0, Acc) -> Acc; make_bin(N, Acc) -> make_bin(N-1, <<Acc/binary,Acc/binary>>). -maybe_gc() -> - case erlang:system_info(heap_type) of - shared -> erlang:garbage_collect(); - hybrid -> erlang:garbage_collect(); - private -> ok - end. - -define(COF(Int0), ?line (fun(Int) -> true = <<Int:32/float>> =:= <<(float(Int)):32/float>>, diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl index 25ce94096f..74ce5e397a 100644 --- a/erts/emulator/test/code_SUITE.erl +++ b/erts/emulator/test/code_SUITE.erl @@ -135,14 +135,7 @@ new_binary_types(Config) when is_list(Config) -> bit_sized_binary(Bin))), ok. -t_check_process_code(doc) -> "Test check_process_code/2."; t_check_process_code(Config) when is_list(Config) -> - case erlang:system_info(heap_type) of - private -> t_check_process_code_1(Config); - hybrid -> {skip,"Hybrid heap"} - end. - -t_check_process_code_1(Config) -> ?line Priv = ?config(priv_dir, Config), ?line Data = ?config(data_dir, Config), ?line File = filename:join(Data, "my_code_test"), @@ -247,12 +240,10 @@ gc1() -> ok. t_check_process_code_ets(doc) -> "Test check_process_code/2 in combination with a fun obtained from an ets table."; t_check_process_code_ets(Config) when is_list(Config) -> - case {test_server:is_native(?MODULE),erlang:system_info(heap_type)} of - {true,_} -> - {skipped,"Native code"}; - {_,hybrid} -> - {skipped,"Hybrid heap"}; - {false,private} -> + case test_server:is_native(?MODULE) of + true -> + {skip,"Native code"}; + false -> do_check_process_code_ets(Config) end. @@ -397,9 +388,7 @@ module_md5_ok(Code) -> make_stub(Config) when is_list(Config) -> - %% No old code to purge if hybrid heap because of skipped test cases, - %% so we'll need a catch here. - ?line (catch erlang:purge_module(my_code_test)), + catch erlang:purge_module(my_code_test), ?line Data = ?config(data_dir, Config), ?line File = filename:join(Data, "my_code_test"), @@ -433,9 +422,7 @@ make_stub(Config) when is_list(Config) -> ok. make_stub_many_funs(Config) when is_list(Config) -> - %% No old code to purge if hybrid heap because of skipped test cases, - %% so we'll need a catch here. - ?line (catch erlang:purge_module(many_funs)), + catch erlang:purge_module(many_funs), ?line Data = ?config(data_dir, Config), ?line File = filename:join(Data, "many_funs"), diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl index 559e540016..839ad6a4f4 100644 --- a/erts/emulator/test/fun_SUITE.erl +++ b/erts/emulator/test/fun_SUITE.erl @@ -539,12 +539,6 @@ bad_md5(Bad) -> {'EXIT',{badarg,_}} = (catch erlang:md5(Bad)). refc(Config) when is_list(Config) -> - case erlang:system_info(heap_type) of - private -> refc_1(); - hybrid -> {skip,"Hybrid heap"} - end. - -refc_1() -> ?line F1 = fun_factory(2), ?line {refc,2} = erlang:fun_info(F1, refc), ?line F2 = fun_factory(42), @@ -570,12 +564,6 @@ fun_factory(Const) -> fun(X) -> X + Const end. refc_ets(Config) when is_list(Config) -> - case erlang:system_info(heap_type) of - private -> refc_ets_1(); - hybrid -> {skip,"Hybrid heap"} - end. - -refc_ets_1() -> ?line F = fun(X) -> X + 33 end, ?line {refc,2} = erlang:fun_info(F, refc), @@ -622,12 +610,6 @@ refc_ets_bag(F1, Options) -> ok. refc_dist(Config) when is_list(Config) -> - case erlang:system_info(heap_type) of - private -> refc_dist_1(); - hybrid -> {skip,"Hybrid heap"} - end. - -refc_dist_1() -> ?line {ok,Node} = start_node(fun_SUITE_refc_dist), ?line process_flag(trap_exit, true), ?line Pid = spawn_link(Node, diff --git a/erts/emulator/test/hibernate_SUITE.erl b/erts/emulator/test/hibernate_SUITE.erl index 82a0aad189..68bc3434d4 100644 --- a/erts/emulator/test/hibernate_SUITE.erl +++ b/erts/emulator/test/hibernate_SUITE.erl @@ -67,10 +67,7 @@ end_per_testcase(_Func, Config) -> basic(Config) when is_list(Config) -> Ref = make_ref(), Info = {self(),Ref}, - ExpectedHeapSz = case erlang:system_info(heap_type) of - private -> erts_debug:size([Info]); - hybrid -> erts_debug:size([a|b]) - end, + ExpectedHeapSz = erts_debug:size([Info]), ?line Child = spawn_link(fun() -> basic_hibernator(Info) end), ?line hibernate_wake_up(100, ExpectedHeapSz, Child), ?line Child ! please_quit_now, @@ -166,10 +163,7 @@ whats_up_calc(A1, A2, A3, A4, A5, A6, A7, A8, A9, Acc) -> dynamic_call(Config) when is_list(Config) -> Ref = make_ref(), Info = {self(),Ref}, - ExpectedHeapSz = case erlang:system_info(heap_type) of - private -> erts_debug:size([Info]); - hybrid -> erts_debug:size([a|b]) - end, + ExpectedHeapSz = erts_debug:size([Info]), ?line Child = spawn_link(fun() -> ?MODULE:dynamic_call_hibernator(Info, hibernate) end), ?line hibernate_wake_up(100, ExpectedHeapSz, Child), ?line Child ! please_quit_now, diff --git a/erts/emulator/test/node_container_SUITE.erl b/erts/emulator/test/node_container_SUITE.erl index aa83459ef8..0bf2c03233 100644 --- a/erts/emulator/test/node_container_SUITE.erl +++ b/erts/emulator/test/node_container_SUITE.erl @@ -459,10 +459,6 @@ make_node_garbage(_, _, _, Ps) -> end, lists:foreach(fun (P) -> wait_until(fun () -> ProcIsCleanedUp(P) end) end, Ps), - ?line case erlang:system_info(heap_type) of - shared -> ?line garbage_collect(); - _ -> ?line ok - end, ?line ok. @@ -605,17 +601,7 @@ node_controller_refc(Config) when is_list(Config) -> % Get rid of all references to Node ?line exec(P, fun () -> exit(normal) end), ?line wait_until(fun () -> not is_process_alive(P) end), - ?line case erlang:system_info(heap_type) of - shared -> - ?line garbage_collect(); - hybrid -> - ?line lists:foreach(fun (Proc) -> garbage_collect(Proc) end, - processes()), - ?line erlang:garbage_collect_message_area(); - _ -> - ?line lists:foreach(fun (Proc) -> garbage_collect(Proc) end, - processes()) - end, + lists:foreach(fun (Proc) -> garbage_collect(Proc) end, processes()), ?line false = get_node_references({Node,Creation}), ?line false = get_dist_references(Node), ?line false = lists:member(Node, nodes(known)), diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl index 0a1ef5a78f..0b99b3438a 100644 --- a/erts/emulator/test/port_SUITE.erl +++ b/erts/emulator/test/port_SUITE.erl @@ -1244,8 +1244,8 @@ otp_3906_forker(N, Parent, Ref, Sup, Prog) -> otp_4389(suite) -> []; otp_4389(doc) -> []; otp_4389(Config) when is_list(Config) -> - case {os:type(),erlang:system_info(heap_type)} of - {{unix, _},private} -> + case os:type() of + {unix, _} -> ?line Dog = test_server:timetrap(test_server:seconds(240)), ?line TCR = self(), case get_true_cmd() of @@ -1293,7 +1293,7 @@ otp_4389(Config) when is_list(Config) -> ?line {skipped, "\"true\" command not found"} end; _ -> - {skip,"Only run on Unix and private heaps"} + {skip,"Only run on Unix"} end. get_true_cmd() -> diff --git a/erts/emulator/valgrind/suppress.patched.3.6.0 b/erts/emulator/valgrind/suppress.patched.3.6.0 index 8cf4cba2c8..62ba032520 100644 --- a/erts/emulator/valgrind/suppress.patched.3.6.0 +++ b/erts/emulator/valgrind/suppress.patched.3.6.0 @@ -57,18 +57,16 @@ fun:putenv fun:erts_sys_putenv fun:os_putenv_2 - fun:process_main + ... } { -Leak in libc putenv -Memcheck:Leak -fun:malloc -fun:erts_sys_alloc -... -fun:erts_alloc -fun:erts_sys_putenv -fun:os_putenv_2 -fun:process_main + Leak in libc putenv + Memcheck:Leak + ... + fun:erts_alloc + fun:erts_sys_putenv + fun:os_putenv_2 + ... } { erronous warning @@ -348,3 +346,14 @@ fun:erl_start fun:main } +{ +Harmless leak of ErtsThrPrgrData from async threads in exiting emulator +Memcheck:Leak +... +fun:erts_alloc +fun:erts_thr_progress_register_unmanaged_thread +fun:async_thread_init +fun:async_main +... +} + diff --git a/erts/emulator/valgrind/suppress.standard b/erts/emulator/valgrind/suppress.standard index 26e34e3757..5a129bfd10 100644 --- a/erts/emulator/valgrind/suppress.standard +++ b/erts/emulator/valgrind/suppress.standard @@ -54,18 +54,16 @@ fun:putenv fun:erts_sys_putenv fun:os_putenv_2 - fun:process_main + ... } { -Leak in libc putenv -Memcheck:Leak -fun:malloc -fun:erts_sys_alloc -... -fun:erts_alloc -fun:erts_sys_putenv -fun:os_putenv_2 -fun:process_main + Leak in libc putenv + Memcheck:Leak + ... + fun:erts_alloc + fun:erts_sys_putenv + fun:os_putenv_2 + ... } { erronous warning @@ -306,3 +304,14 @@ fun:erl_start fun:main } +{ +Harmless leak of ErtsThrPrgrData from async threads in exiting emulator +Memcheck:Leak +... +fun:erts_alloc +fun:erts_thr_progress_register_unmanaged_thread +fun:async_thread_init +fun:async_main +... +} + |