diff options
author | Sverker Eriksson <[email protected]> | 2015-06-02 19:23:49 +0200 |
---|---|---|
committer | Sverker Eriksson <[email protected]> | 2015-06-15 14:48:22 +0200 |
commit | 02d0ce034598297565f2b35ecc3d1af121787f33 (patch) | |
tree | a415e4b7d21c240352ae8737806e516caa2f2b43 /erts/emulator/beam/erl_gc.c | |
parent | 512c321bdaf25b685124405e287a3839be467149 (diff) | |
download | otp-02d0ce034598297565f2b35ecc3d1af121787f33.tar.gz otp-02d0ce034598297565f2b35ecc3d1af121787f33.tar.bz2 otp-02d0ce034598297565f2b35ecc3d1af121787f33.zip |
erts: Remove hashmap probabilistic heap overestimation
by adding a dynamic heap factory.
"binary_to_term" is now a hybrid solution with both
a call to decoded_size() to calculate needed heap space
AND possible dynamic allocation of more heap space
if needed for big maps.
The heap size returned from decoded_size() is guaranteed
to be sufficient for all term heap data except for hashmap
nodes. All hashmap nodes are created at the end of dec_term()
by invoking the heap factory interface that may allocate more
heap space on process heap or in fragments.
With this commit it is no longer guaranteed that a message
is confined to only one heap fragment.
Diffstat (limited to 'erts/emulator/beam/erl_gc.c')
-rw-r--r-- | erts/emulator/beam/erl_gc.c | 93 |
1 files changed, 48 insertions, 45 deletions
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 14e7dde778..a456689b5c 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -108,8 +108,7 @@ static Eterm* sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint src_size); static Eterm* collect_heap_frags(Process* p, Eterm* heap, Eterm* htop, Eterm* objv, int nobj); -static Uint adjust_after_fullsweep(Process *p, Uint size_before, - int need, Eterm *objv, int nobj); +static void adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj); static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj); static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj); static void sweep_off_heap(Process *p, int fullsweep); @@ -868,29 +867,37 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) ErlMessage *msgp; Uint size_after; Uint need_after; - Uint stack_size = STACK_SZ_ON_HEAP(p); - Uint fragments = MBUF_SIZE(p) + combined_message_size(p); - Uint size_before = fragments + (HEAP_TOP(p) - HEAP_START(p)); - Uint new_sz = next_heap_size(p, HEAP_SIZE(p) + fragments, 0); + const Uint stack_size = STACK_SZ_ON_HEAP(p); + const Uint size_before = MBUF_SIZE(p) + (HEAP_TOP(p) - HEAP_START(p)); + Uint new_sz = HEAP_SIZE(p) + MBUF_SIZE(p) + combined_message_size(p); + new_sz = next_heap_size(p, new_sz, 0); do_minor(p, new_sz, objv, nobj); - /* + size_after = HEAP_TOP(p) - HEAP_START(p); + *recl += (size_before - size_after); + + /* * Copy newly received message onto the end of the new heap. */ - ErtsGcQuickSanityCheck(p); - for (msgp = p->msg.first; msgp; msgp = msgp->next) { - if (msgp->data.attached) { - erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp); - ErtsGcQuickSanityCheck(p); - } - } + ErtsGcQuickSanityCheck(p); + for (msgp = p->msg.first; msgp; msgp = msgp->next) { + if (msgp->data.attached) { + ErtsHeapFactory factory; + erts_factory_proc_prealloc_init(&factory, p, + erts_msg_attached_data_size(msgp)); + erts_move_msg_attached_data_to_heap(&factory, msgp); + erts_factory_close(&factory); + ErtsGcQuickSanityCheck(p); + } + } ErtsGcQuickSanityCheck(p); GEN_GCS(p)++; - size_after = HEAP_TOP(p) - HEAP_START(p); - need_after = size_after + need + stack_size; - *recl += (size_before - size_after); + need_after = ((HEAP_TOP(p) - HEAP_START(p)) + + erts_used_frag_sz(MBUF(p)) + + need + + stack_size); /* * Excessively large heaps should be shrunk, but @@ -925,6 +932,7 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) } ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0)); + ASSERT(MBUF(p) == NULL); return 1; /* We are done. */ } @@ -933,6 +941,7 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) * The heap size turned out to be just right. We are done. */ ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0)); + ASSERT(MBUF(p) == NULL); return 1; } } @@ -1212,7 +1221,9 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) { Rootset rootset; Roots* roots; - Uint size_before; + const Uint size_before = ((HEAP_TOP(p) - HEAP_START(p)) + + (OLD_HTOP(p) - OLD_HEAP(p)) + + MBUF_SIZE(p)); Eterm* n_heap; Eterm* n_htop; char* src = (char *) HEAP_START(p); @@ -1221,25 +1232,15 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) Uint oh_size = (char *) OLD_HTOP(p) - oh; Uint n; Uint new_sz; - Uint fragments = MBUF_SIZE(p) + combined_message_size(p); - - size_before = fragments + (HEAP_TOP(p) - HEAP_START(p)) - + (OLD_HTOP(p) - OLD_HEAP(p)); /* * Do a fullsweep GC. First figure out the size of the heap * to receive all live data. */ - new_sz = HEAP_SIZE(p) + fragments + (OLD_HTOP(p) - OLD_HEAP(p)); - /* - * We used to do - * - * new_sz += STACK_SZ_ON_HEAP(p); - * - * here for no obvious reason. (The stack size is already counted once - * in HEAP_SIZE(p).) - */ + new_sz = (HEAP_SIZE(p) + MBUF_SIZE(p) + + combined_message_size(p) + + (OLD_HTOP(p) - OLD_HEAP(p))); new_sz = next_heap_size(p, new_sz, 0); /* @@ -1432,20 +1433,27 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) ErtsGcQuickSanityCheck(p); + *recl += size_before - (HEAP_TOP(p) - HEAP_START(p)); + { ErlMessage *msgp; + /* * Copy newly received message onto the end of the new heap. */ - for (msgp = p->msg.first; msgp; msgp = msgp->next) { + for (msgp = p->msg.first; msgp; msgp = msgp->next) { if (msgp->data.attached) { - erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp); + ErtsHeapFactory factory; + erts_factory_proc_prealloc_init(&factory, p, + erts_msg_attached_data_size(msgp)); + erts_move_msg_attached_data_to_heap(&factory, msgp); + erts_factory_close(&factory); ErtsGcQuickSanityCheck(p); } } } - *recl += adjust_after_fullsweep(p, size_before, need, objv, nobj); + adjust_after_fullsweep(p, need, objv, nobj); #ifdef HARDDEBUG disallow_heap_frag_ref_in_heap(p); @@ -1456,21 +1464,17 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) return 1; /* We are done. */ } -static Uint -adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int nobj) +static void +adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj) { - Uint wanted, sz, size_after, need_after; + Uint wanted, sz, need_after; Uint stack_size = STACK_SZ_ON_HEAP(p); - Uint reclaimed_now; - - size_after = (HEAP_TOP(p) - HEAP_START(p)); - reclaimed_now = (size_before - size_after); /* * Resize the heap if needed. */ - need_after = size_after + need + stack_size; + need_after = (HEAP_TOP(p) - HEAP_START(p)) + need + stack_size; if (HEAP_SIZE(p) < need_after) { /* Too small - grow to match requested need */ sz = next_heap_size(p, need_after, 0); @@ -1493,8 +1497,6 @@ adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int shrink_new_heap(p, sz, objv, nobj); } } - - return reclaimed_now; } /* @@ -1942,7 +1944,8 @@ collect_heap_frags(Process* p, Eterm* n_hstart, Eterm* n_htop, * until next GC. */ qb = MBUF(p); - while (qb != NULL) { + while (qb != NULL) { + ASSERT(!qb->off_heap.first); /* process fragments use the MSO(p) list */ frag_size = qb->used_size * sizeof(Eterm); if (frag_size != 0) { frag_begin = (char *) qb->mem; |