diff options
author | Lukas Larsson <[email protected]> | 2017-11-20 10:06:29 +0100 |
---|---|---|
committer | Lukas Larsson <[email protected]> | 2017-11-20 10:06:29 +0100 |
commit | 7de66cbf895db8650e2b3253d910869c67989b35 (patch) | |
tree | 75dce536cf3f69215fcf7311af831c0efef57bc2 /erts/emulator | |
parent | d99803e7625e474dee40f0dfebf2b8092add0336 (diff) | |
parent | 10912210641fe7d784c4ba6398c24504200ed339 (diff) | |
download | otp-7de66cbf895db8650e2b3253d910869c67989b35.tar.gz otp-7de66cbf895db8650e2b3253d910869c67989b35.tar.bz2 otp-7de66cbf895db8650e2b3253d910869c67989b35.zip |
Merge branch 'lukas/stdlib/maps_iterators/OTP-14012'
* lukas/stdlib/maps_iterators/OTP-14012:
erts: Limit size of first iterator for hashmaps
Update primary bootstrap
Update preloaded modules
erts: Remove erts_internal:maps_to_list/2
stdlib: Make io_lib and io_lib_pretty use maps iterator
erts: Implement batching maps:iterator
erts: Implement maps path iterator
erts: Implement map iterator using a stack
stdlib: Introduce maps iterator API
Conflicts:
bootstrap/lib/stdlib/ebin/io_lib.beam
bootstrap/lib/stdlib/ebin/io_lib_pretty.beam
erts/emulator/beam/bif.tab
erts/preloaded/ebin/erlang.beam
erts/preloaded/ebin/erts_internal.beam
erts/preloaded/ebin/zlib.beam
Diffstat (limited to 'erts/emulator')
-rw-r--r-- | erts/emulator/beam/atom.names | 1 | ||||
-rw-r--r-- | erts/emulator/beam/bif.tab | 5 | ||||
-rw-r--r-- | erts/emulator/beam/erl_map.c | 500 | ||||
-rw-r--r-- | erts/emulator/beam/erl_map.h | 1 | ||||
-rw-r--r-- | erts/emulator/test/map_SUITE.erl | 98 |
5 files changed, 437 insertions, 168 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 75a70c3716..cef633bd93 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -348,6 +348,7 @@ atom instruction_counts atom invalid atom is_constant atom is_seq_trace +atom iterator atom io atom keypos atom kill diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index f739f414de..1bd4acbf95 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -627,7 +627,6 @@ bif re:inspect/2 ubif erlang:is_map/1 gcbif erlang:map_size/1 -bif maps:to_list/1 bif maps:find/2 bif maps:get/2 bif maps:from_list/1 @@ -682,12 +681,10 @@ bif math:floor/1 bif math:ceil/1 bif math:fmod/2 bif os:set_signal/2 -bif erts_internal:maps_to_list/2 # # New in 20.1 # - bif erlang:iolist_to_iovec/1 # @@ -696,4 +693,4 @@ bif erlang:iolist_to_iovec/1 bif erts_internal:new_connection/1 bif erts_internal:abort_connection/2 - +bif erts_internal:map_next/3 diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c index f0c54e05f7..8047a9567f 100644 --- a/erts/emulator/beam/erl_map.c +++ b/erts/emulator/beam/erl_map.c @@ -91,7 +91,6 @@ static BIF_RETTYPE hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB, int swap_ static Export hashmap_merge_trap_export; static BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1); static Uint hashmap_subtree_size(Eterm node); -static Eterm hashmap_to_list(Process *p, Eterm map, Sint n); static Eterm hashmap_keys(Process *p, Eterm map); static Eterm hashmap_values(Process *p, Eterm map); static Eterm hashmap_delete(Process *p, Uint32 hx, Eterm key, Eterm node, Eterm *value); @@ -139,80 +138,6 @@ BIF_RETTYPE map_size_1(BIF_ALIST_1) { BIF_ERROR(BIF_P, BADMAP); } -/* maps:to_list/1 */ - -BIF_RETTYPE maps_to_list_1(BIF_ALIST_1) { - if (is_flatmap(BIF_ARG_1)) { - Uint n; - Eterm* hp; - Eterm *ks,*vs, res, tup; - flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1); - - ks = flatmap_get_keys(mp); - vs = flatmap_get_values(mp); - n = flatmap_get_size(mp); - hp = HAlloc(BIF_P, (2 + 3) * n); - res = NIL; - - while(n--) { - tup = TUPLE2(hp, ks[n], vs[n]); hp += 3; - res = CONS(hp, tup, res); hp += 2; - } - - BIF_RET(res); - } else if (is_hashmap(BIF_ARG_1)) { - return hashmap_to_list(BIF_P, BIF_ARG_1, -1); - } - - BIF_P->fvalue = BIF_ARG_1; - BIF_ERROR(BIF_P, BADMAP); -} - -/* erts_internal:maps_to_list/2 - * - * This function should be removed once iterators are in place. - * Never document it. - * Never encourage its usage. - * - * A negative value in ARG 2 means the entire map. - */ - -BIF_RETTYPE erts_internal_maps_to_list_2(BIF_ALIST_2) { - Sint m; - if (term_to_Sint(BIF_ARG_2, &m)) { - if (is_flatmap(BIF_ARG_1)) { - Uint n; - Eterm* hp; - Eterm *ks,*vs, res, tup; - flatmap_t *mp = (flatmap_t*)flatmap_val(BIF_ARG_1); - - ks = flatmap_get_keys(mp); - vs = flatmap_get_values(mp); - n = flatmap_get_size(mp); - - if (m >= 0) { - n = m < n ? m : n; - } - - hp = HAlloc(BIF_P, (2 + 3) * n); - res = NIL; - - while(n--) { - tup = TUPLE2(hp, ks[n], vs[n]); hp += 3; - res = CONS(hp, tup, res); hp += 2; - } - - BIF_RET(res); - } else if (is_hashmap(BIF_ARG_1)) { - return hashmap_to_list(BIF_P, BIF_ARG_1, m); - } - BIF_P->fvalue = BIF_ARG_1; - BIF_ERROR(BIF_P, BADMAP); - } - BIF_ERROR(BIF_P, BADARG); -} - - /* maps:find/2 * return value if key *matches* a key in the map */ @@ -1962,45 +1887,31 @@ BIF_RETTYPE maps_values_1(BIF_ALIST_1) { BIF_ERROR(BIF_P, BADMAP); } -static Eterm hashmap_to_list(Process *p, Eterm node, Sint m) { - DECLARE_WSTACK(stack); - Eterm *hp, *kv; - Eterm tup, res = NIL; - Uint n = hashmap_size(node); - - if (m >= 0) { - n = m < n ? m : n; - } - - hp = HAlloc(p, n * (2 + 3)); - hashmap_iterator_init(&stack, node, 0); - while (n--) { - kv = hashmap_iterator_next(&stack); - ASSERT(kv != NULL); - tup = TUPLE2(hp, CAR(kv), CDR(kv)); - hp += 3; - res = CONS(hp, tup, res); - hp += 2; - } - DESTROY_WSTACK(stack); - return res; -} - -void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) { - Eterm hdr = *hashmap_val(node); +static ERTS_INLINE +Uint hashmap_node_size(Eterm hdr, Eterm **nodep) +{ Uint sz; switch(hdr & _HEADER_MAP_SUBTAG_MASK) { case HAMT_SUBTAG_HEAD_ARRAY: sz = 16; + if (nodep) ++*nodep; break; case HAMT_SUBTAG_HEAD_BITMAP: + if (nodep) ++*nodep; case HAMT_SUBTAG_NODE_BITMAP: sz = hashmap_bitcount(MAP_HEADER_VAL(hdr)); + ASSERT(sz < 17); break; default: erts_exit(ERTS_ABORT_EXIT, "bad header"); } + return sz; +} + +void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) { + Eterm hdr = *hashmap_val(node); + Uint sz = hashmap_node_size(hdr, NULL); WSTACK_PUSH3((*s), (UWord)THE_NON_VALUE, /* end marker */ (UWord)(!reverse ? 0 : sz+1), @@ -2024,20 +1935,7 @@ Eterm* hashmap_iterator_next(ErtsWStack* s) { ptr = boxed_val(node); hdr = *ptr; ASSERT(is_header(hdr)); - switch(hdr & _HEADER_MAP_SUBTAG_MASK) { - case HAMT_SUBTAG_HEAD_ARRAY: - ptr++; - sz = 16; - break; - case HAMT_SUBTAG_HEAD_BITMAP: - ptr++; - case HAMT_SUBTAG_NODE_BITMAP: - sz = hashmap_bitcount(MAP_HEADER_VAL(hdr)); - ASSERT(sz < 17); - break; - default: - erts_exit(ERTS_ABORT_EXIT, "bad header"); - } + sz = hashmap_node_size(hdr, &ptr); idx++; @@ -2074,20 +1972,7 @@ Eterm* hashmap_iterator_prev(ErtsWStack* s) { ptr = boxed_val(node); hdr = *ptr; ASSERT(is_header(hdr)); - switch(hdr & _HEADER_MAP_SUBTAG_MASK) { - case HAMT_SUBTAG_HEAD_ARRAY: - ptr++; - sz = 16; - break; - case HAMT_SUBTAG_HEAD_BITMAP: - ptr++; - case HAMT_SUBTAG_NODE_BITMAP: - sz = hashmap_bitcount(MAP_HEADER_VAL(hdr)); - ASSERT(sz < 17); - break; - default: - erts_exit(ERTS_ERROR_EXIT, "bad header"); - } + sz = hashmap_node_size(hdr, &ptr); if (idx > sz) idx = sz; @@ -3061,6 +2946,363 @@ static Eterm hashmap_bld_tuple_uint(Uint **hpp, Uint *szp, Uint n, Uint nums[]) } +/** + * In hashmap the Path is a bit pattern that describes + * which slot we should traverse in each hashmap node. + * Since each hashmap node can only be up to 16 elements + * large we use 4 bits per level in the path. + * + * So a Path with value 0x110 will first get the 0:th + * slot in the head node, and then the 1:st slot in the + * resulting node and then finally the 1:st slot in the + * node beneath. If that slot is not a leaf, then the path + * continues down the 0:th slot until it finds a leaf. + * + * Once the leaf has been found, the return value is created + * by traversing the tree using the the stack that was built + * when searching for the first leaf to return. + * + * The index can become a bignum, which complicates the code + * a bit. However it should be very rare that this happens + * even on a 32bit system as you would need a tree of depth + * 7 or more. + * + * If the number of elements remaining in the map is greater + * than how many we want to return, we build a new Path, using + * the stack, that points to the next leaf. + * + * The third argument to this function controls how the data + * is returned. + * + * iterator: The key-value associations are to be used by + * maps:iterator. The return has this format: + * {K1,V1,{K2,V2,none | [Path | Map]}} + * this makes the maps:next function very simple + * and performant. + * + * list(): The key-value associations are to be used by + * maps:to_list. The return has this format: + * [Path, Map | [{K1,V1},{K2,V2} | BIF_ARG_3]] + * or if no more associations remain + * [{K1,V1},{K2,V2} | BIF_ARG_3] + */ + +#define PATH_ELEM_SIZE 4 +#define PATH_ELEM_MASK 0xf +#define PATH_ELEM(PATH) ((PATH) & PATH_ELEM_MASK) +#define PATH_ELEMS_PER_DIGIT (sizeof(ErtsDigit) * 8 / PATH_ELEM_SIZE) + +BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) { + + Eterm path, map; + enum { iterator, list } type; + + path = BIF_ARG_1; + map = BIF_ARG_2; + + if (!is_map(map)) + BIF_ERROR(BIF_P, BADARG); + + if (BIF_ARG_3 == am_iterator) { + type = iterator; + } else if (is_nil(BIF_ARG_3) || is_list(BIF_ARG_3)) { + type = list; + } else { + BIF_ERROR(BIF_P, BADARG); + } + + if (is_flatmap(map)) { + Uint n; + Eterm *ks,*vs, res, *hp; + flatmap_t *mp = (flatmap_t*)flatmap_val(map); + + ks = flatmap_get_keys(mp); + vs = flatmap_get_values(mp); + n = flatmap_get_size(mp); + + if (!is_small(BIF_ARG_1) || n < unsigned_val(BIF_ARG_1)) + BIF_ERROR(BIF_P, BADARG); + + if (type == iterator) { + hp = HAlloc(BIF_P, 4 * n); + res = am_none; + + while(n--) { + res = TUPLE3(hp, ks[n], vs[n], res); hp += 4; + } + } else { + hp = HAlloc(BIF_P, (2 + 3) * n); + res = BIF_ARG_3; + + while(n--) { + Eterm tup = TUPLE2(hp, ks[n], vs[n]); hp += 3; + res = CONS(hp, tup, res); hp += 2; + } + } + + BIF_RET(res); + } else { + Uint curr_path; + Uint path_length = 0; + Uint *path_rest = NULL; + int i, elems, orig_elems; + Eterm node = map, res, *path_ptr = NULL, *hp; + + /* A stack WSTACK is used when traversing the hashmap. + * It contains: node, idx, sz, ptr + * + * `node` is not really needed, but it is very nice to + * have when debugging. + * + * `idx` always points to the next un-explored entry in + * a node. If there are no more un-explored entries, + * `idx` is equal to `sz`. + * + * `sz` is the number of elements in the node. + * + * `ptr` is a pointer to where the elements of the node begins. + */ + DECLARE_WSTACK(stack); + + ASSERT(is_hashmap(node)); + +/* How many elements we return in one call depends on the number of reductions + * that the process has left to run. In debug we return fewer elements to test + * the Path implementation better. + * + * Also, when the path is 0 (i.e. for the first call) we limit the number of + * elements to MAP_SMALL_MAP_LIMIT in order to not use a huge amount of heap + * when only the first X associations in the hashmap was needed. + */ +#if defined(DEBUG) +#define FCALLS_ELEMS(BIF_P) ((BIF_P->fcalls / 4) & 0xF) +#else +#define FCALLS_ELEMS(BIF_P) (BIF_P->fcalls / 4) +#endif + + if (MAX(FCALLS_ELEMS(BIF_P), 1) < hashmap_size(map)) + elems = MAX(FCALLS_ELEMS(BIF_P), 1); + else + elems = hashmap_size(map); + +#undef FCALLS_ELEMS + + if (is_small(path)) { + curr_path = unsigned_val(path); + + if (curr_path == 0 && elems > MAP_SMALL_MAP_LIMIT) { + elems = MAP_SMALL_MAP_LIMIT; + } + } else if (is_big(path)) { + Eterm *big = big_val(path); + if (bignum_header_is_neg(*big)) + BIF_ERROR(BIF_P, BADARG); + path_length = BIG_ARITY(big) - 1; + curr_path = BIG_DIGIT(big, 0); + path_rest = BIG_V(big) + 1; + } else { + BIF_ERROR(BIF_P, BADARG); + } + + if (type == iterator) { + /* iterator uses the format {K, V, {K, V, {K, V, [Path | Map]}}}, + * so each element is 4 words large */ + hp = HAlloc(BIF_P, 4 * elems); + res = am_none; + } else { + /* list used the format [Path, Map, {K,V}, {K,V} | BIF_ARG_3], + * so each element is 2+3 words large */ + hp = HAlloc(BIF_P, (2 + 3) * elems); + res = BIF_ARG_3; + } + + orig_elems = elems; + + /* First we look for the leaf to start at using the + path given. While doing so, we push each map node + and the index onto the stack to use later. */ + for (i = 1; ; i++) { + Eterm *ptr = hashmap_val(node), + hdr = *ptr++; + Uint sz; + + sz = hashmap_node_size(hdr, &ptr); + + if (PATH_ELEM(curr_path) >= sz) + goto badarg; + + WSTACK_PUSH4(stack, node, PATH_ELEM(curr_path)+1, sz, (UWord)ptr); + + /* We have found a leaf, return it and the next X elements */ + if (is_list(ptr[PATH_ELEM(curr_path)])) { + Eterm *lst = list_val(ptr[PATH_ELEM(curr_path)]); + if (type == iterator) { + res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4; + /* Note where we should patch the Iterator is needed */ + path_ptr = hp-1; + } else { + Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3; + res = CONS(hp, tup, res); hp += 2; + } + elems--; + break; + } + + node = ptr[PATH_ELEM(curr_path)]; + + curr_path >>= PATH_ELEM_SIZE; + + if (i == PATH_ELEMS_PER_DIGIT) { + /* Switch to next bignum word if available, + otherwise just follow 0 path */ + i = 0; + if (path_length) { + curr_path = *path_rest; + path_length--; + path_rest++; + } else { + curr_path = 0; + } + } + } + + /* We traverse the hashmap and return at most `elems` elements */ + while(1) { + Eterm *ptr = (Eterm*)WSTACK_POP(stack); + Uint sz = (Uint)WSTACK_POP(stack); + Uint idx = (Uint)WSTACK_POP(stack); + Eterm node = (Eterm)WSTACK_POP(stack); + + while (idx < sz && elems != 0 && is_list(ptr[idx])) { + Eterm *lst = list_val(ptr[idx]); + if (type == iterator) { + res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4; + } else { + Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3; + res = CONS(hp, tup, res); hp += 2; + } + elems--; + idx++; + } + + if (elems == 0) { + if (idx < sz) { + /* There are more elements in this node to explore */ + WSTACK_PUSH4(stack, node, idx+1, sz, (UWord)ptr); + } else { + /* pop stack to find the next value */ + while (!WSTACK_ISEMPTY(stack)) { + Eterm *ptr = (Eterm*)WSTACK_POP(stack); + Uint sz = (Uint)WSTACK_POP(stack); + Uint idx = (Uint)WSTACK_POP(stack); + Eterm node = (Eterm)WSTACK_POP(stack); + if (idx < sz) { + WSTACK_PUSH4(stack, node, idx+1, sz, (UWord)ptr); + break; + } + } + } + break; + } else { + if (idx < sz) { + Eterm hdr; + /* Push next idx in current node */ + WSTACK_PUSH4(stack, node, idx+1, sz, (UWord)ptr); + + /* Push first idx in child node */ + node = ptr[idx]; + ptr = hashmap_val(ptr[idx]); + hdr = *ptr++; + sz = hashmap_node_size(hdr, &ptr); + WSTACK_PUSH4(stack, node, 0, sz, (UWord)ptr); + } + } + + /* There are no more element in the hashmap */ + if (WSTACK_ISEMPTY(stack)) { + break; + } + + } + + if (!WSTACK_ISEMPTY(stack)) { + Uint depth = WSTACK_COUNT(stack) / 4 + 1; + /* +1 because we already have the first element in curr_path */ + Eterm *path_digits = NULL; + Uint curr_path = 0; + + /* If the path cannot fit in a small, we allocate a bignum */ + if (depth >= PATH_ELEMS_PER_DIGIT) { + /* We need multiple ErtsDigit's to represent the path */ + int big_size = BIG_NEED_FOR_BITS(depth * PATH_ELEM_SIZE); + hp = HAlloc(BIF_P, big_size); + hp[0] = make_pos_bignum_header(big_size - BIG_NEED_SIZE(0)); + path_digits = hp + big_size - 1; + } + + + /* Pop the stack to create the complete path to the next leaf */ + while(!WSTACK_ISEMPTY(stack)) { + Uint idx; + + (void)WSTACK_POP(stack); + (void)WSTACK_POP(stack); + idx = (Uint)WSTACK_POP(stack)-1; + /* idx - 1 because idx in the stack is pointing to + the next element to fetch. */ + (void)WSTACK_POP(stack); + + depth--; + if (depth % PATH_ELEMS_PER_DIGIT == 0) { + /* Switch to next bignum element */ + path_digits[0] = curr_path; + path_digits--; + curr_path = 0; + } + + curr_path <<= PATH_ELEM_SIZE; + curr_path |= idx; + } + + if (path_digits) { + path_digits[0] = curr_path; + path = make_big(hp); + } else { + /* The Uint could be too large for a small */ + path = erts_make_integer(curr_path, BIF_P); + } + + if (type == iterator) { + hp = HAlloc(BIF_P, 2); + *path_ptr = CONS(hp, path, map); hp += 2; + } else { + hp = HAlloc(BIF_P, 4); + res = CONS(hp, map, res); hp += 2; + res = CONS(hp, path, res); hp += 2; + } + } else { + if (type == iterator) { + HRelease(BIF_P, hp + 4 * elems, hp); + } else { + HRelease(BIF_P, hp + (2+3) * elems, hp); + } + } + BIF_P->fcalls -= 4 * (orig_elems - elems); + DESTROY_WSTACK(stack); + BIF_RET(res); + + badarg: + if (type == iterator) { + HRelease(BIF_P, hp + 4 * elems, hp); + } else { + HRelease(BIF_P, hp + (2+3) * elems, hp); + } + BIF_P->fcalls -= 4 * (orig_elems - elems); + DESTROY_WSTACK(stack); + BIF_ERROR(BIF_P, BADARG); + } +} + /* implementation of builtin emulations */ #if !ERTS_AT_LEAST_GCC_VSN__(3, 4, 0) diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h index c3ccf80b85..718d400e22 100644 --- a/erts/emulator/beam/erl_map.h +++ b/erts/emulator/beam/erl_map.h @@ -64,7 +64,6 @@ typedef struct flatmap_s { #define hashmap_shift_hash(Heap,Hx,Lvl,Key) \ (((++(Lvl)) & 7) ? (Hx) >> 4 : hashmap_make_hash(CONS(Heap, make_small((Lvl)>>3), Key))) - /* erl_term.h stuff */ #define flatmap_get_values(x) (((Eterm *)(x)) + sizeof(flatmap_t)/sizeof(Eterm)) #define flatmap_get_keys(x) (((Eterm *)tuple_val(((flatmap_t *)(x))->keys)) + 1) diff --git a/erts/emulator/test/map_SUITE.erl b/erts/emulator/test/map_SUITE.erl index 02f3c89318..c9e971af8a 100644 --- a/erts/emulator/test/map_SUITE.erl +++ b/erts/emulator/test/map_SUITE.erl @@ -52,7 +52,7 @@ t_bif_map_values/1, t_bif_map_to_list/1, t_bif_map_from_list/1, - t_bif_erts_internal_maps_to_list/1, + t_bif_map_next/1, %% erlang t_erlang_hash/1, @@ -119,7 +119,7 @@ all() -> [t_build_and_match_literals, t_build_and_match_literals_large, t_bif_map_update, t_bif_map_values, t_bif_map_to_list, t_bif_map_from_list, - t_bif_erts_internal_maps_to_list, + t_bif_map_next, %% erlang t_erlang_hash, t_map_encode_decode, @@ -2364,41 +2364,71 @@ t_bif_map_from_list(Config) when is_list(Config) -> {'EXIT', {badarg,_}} = (catch maps:from_list(id(42))), ok. -t_bif_erts_internal_maps_to_list(Config) when is_list(Config) -> - %% small maps - [] = erts_internal:maps_to_list(#{},-1), - [] = erts_internal:maps_to_list(#{},-2), - [] = erts_internal:maps_to_list(#{},10), - [{a,1},{b,2}] = lists:sort(erts_internal:maps_to_list(#{a=>1,b=>2}, 2)), - [{a,1},{b,2}] = lists:sort(erts_internal:maps_to_list(#{a=>1,b=>2}, -1)), - [{_,_}] = erts_internal:maps_to_list(#{a=>1,b=>2}, 1), - [{a,1},{b,2},{c,3}] = lists:sort(erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},-2)), - [{a,1},{b,2},{c,3}] = lists:sort(erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},3)), - [{a,1},{b,2},{c,3}] = lists:sort(erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},5)), - [{_,_},{_,_}] = erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},2), - [{_,_}] = erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},1), - [] = erts_internal:maps_to_list(#{c=>3,a=>1,b=>2},0), - - %% big maps - M = maps:from_list([{I,ok}||I <- lists:seq(1,500)]), - [] = erts_internal:maps_to_list(M,0), - [{_,_}] = erts_internal:maps_to_list(M,1), - [{_,_},{_,_}] = erts_internal:maps_to_list(M,2), - Ls1 = erts_internal:maps_to_list(M,10), - 10 = length(Ls1), - Ls2 = erts_internal:maps_to_list(M,20), - 20 = length(Ls2), - Ls3 = erts_internal:maps_to_list(M,120), - 120 = length(Ls3), - Ls4 = erts_internal:maps_to_list(M,-1), - 500 = length(Ls4), +t_bif_map_next(Config) when is_list(Config) -> - %% error cases - {'EXIT', {{badmap,[{a,b},b]},_}} = (catch erts_internal:maps_to_list(id([{a,b},b]),id(1))), - {'EXIT', {badarg,_}} = (catch erts_internal:maps_to_list(id(#{}),id(a))), - {'EXIT', {badarg,_}} = (catch erts_internal:maps_to_list(id(#{1=>2}),id(<<>>))), + erts_debug:set_internal_state(available_internal_state, true), + + try + + none = maps:next(maps:iterator(id(#{}))), + + verify_iterator(#{}), + verify_iterator(#{a => 1, b => 2, c => 3}), + + %% Use fatmap in order to test iterating in very deep maps + FM = fatmap(43), + verify_iterator(FM), + + {'EXIT', {{badmap,[{a,b},b]},_}} = (catch maps:iterator(id([{a,b},b]))), + {'EXIT', {badarg,_}} = (catch maps:next(id(a))), + {'EXIT', {badarg,_}} = (catch maps:next(id([a|FM]))), + {'EXIT', {badarg,_}} = (catch maps:next(id([1|#{}]))), + {'EXIT', {badarg,_}} = (catch maps:next(id([-1|#{}]))), + {'EXIT', {badarg,_}} = (catch maps:next(id([-1|FM]))), + {'EXIT', {badarg,_}} = (catch maps:next(id([16#FFFFFFFFFFFFFFFF|FM]))), + {'EXIT', {badarg,_}} = (catch maps:next(id([-16#FFFFFFFFFFFFFFFF|FM]))), + + %% This us a whitebox test that the error code works correctly. + %% It uses a path for a tree of depth 4 and tries to do next on + %% each of those paths. + (fun F(0) -> ok; + F(N) -> + try maps:next([N|FM]) of + none -> + F(N-1); + {_K,_V,_I} -> + F(N-1) + catch error:badarg -> + F(N-1) + end + end)(16#FFFF), + + ok + after + erts_debug:set_internal_state(available_internal_state, false) + end. + +verify_iterator(Map) -> + KVs = t_fold(fun(K, V, A) -> [{K, V} | A] end, [], Map), + + %% Verify that KVs created by iterating Map is of + %% correct size and contains all elements + true = length(KVs) == maps:size(Map), + [maps:get(K, Map) || {K, _} <- KVs], ok. + +t_fold(Fun, Init, Map) -> + t_fold_1(Fun, Init, maps:iterator(Map)). + +t_fold_1(Fun, Acc, Iter) -> + case maps:next(Iter) of + {K, V, NextIter} -> + t_fold_1(Fun, Fun(K,V,Acc), NextIter); + none -> + Acc + end. + t_bif_build_and_check(Config) when is_list(Config) -> ok = check_build_and_remove(750,[fun(K) -> [K,K] end, fun(K) -> [float(K),K] end, |