diff options
author | Lukas Larsson <[email protected]> | 2015-12-15 10:10:10 +0100 |
---|---|---|
committer | Lukas Larsson <[email protected]> | 2015-12-15 10:21:40 +0100 |
commit | 9083bc7e2172937296b7e8fbe09ed595c6893f08 (patch) | |
tree | b3b4255af12bf6cc023ee7a47bcf5ae5ae115077 /erts/emulator/beam/hash.c | |
parent | d7cd5ee8a34d289fbc91627a3f096a870bf70393 (diff) | |
parent | 4b1b3bf6c62f8208b2eea506c9dac1504df6e916 (diff) | |
download | otp-9083bc7e2172937296b7e8fbe09ed595c6893f08.tar.gz otp-9083bc7e2172937296b7e8fbe09ed595c6893f08.tar.bz2 otp-9083bc7e2172937296b7e8fbe09ed595c6893f08.zip |
Merge branch 'lukas/erts/forker'
* lukas/erts/forker: (28 commits)
erts: Never abort in the forked child
erts: Mend ASSERT makro for erl_child_setup
erts: Allow enomem failures in port_SUITE
erts: iter_port sleep longer on freebsd
erts: Allow one dangling fd if there is a gethost port
erts: Only use forker StackAck on freebsd
erts: It is not possible to exit the forker driver
erts: Add forker StartAck for port start flowcontrol
erts: Fix large open_port arg segfault for win32
erts: Fix memory leak at async open port
kernel: Remove cmd server for unix os:cmd
erts: Add testcase for huge port environment
erts: Move os_pid to port hash to child setup
erts: Handle all EINTR and EAGAIN cases in child setup
erts: Make child_setup work with large environments
erts: Fix forker driver ifdefs for win32
erts: Fix uds socket handling for os x
erts: Fix dereferencing of unaligned integer for sparc
erts: Flatten too long io vectors in uds write
erts: Add fd count test for spawn_driver
...
Conflicts:
erts/emulator/beam/erl_node_tables.c
erts/preloaded/src/erts_internal.erl
Diffstat (limited to 'erts/emulator/beam/hash.c')
-rw-r--r-- | erts/emulator/beam/hash.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/erts/emulator/beam/hash.c b/erts/emulator/beam/hash.c index 75d091d11c..5a0b93f693 100644 --- a/erts/emulator/beam/hash.c +++ b/erts/emulator/beam/hash.c @@ -101,11 +101,11 @@ void hash_info(int to, void *arg, Hash* h) hash_get_info(&hi, h); - erts_print(to, arg, "=hash_table:%s\n", hi.name); - erts_print(to, arg, "size: %d\n", hi.size); - erts_print(to, arg, "used: %d\n", hi.used); - erts_print(to, arg, "objs: %d\n", hi.objs); - erts_print(to, arg, "depth: %d\n", hi.depth); + h->fun.meta_print(to, arg, "=hash_table:%s\n", hi.name); + h->fun.meta_print(to, arg, "size: %d\n", hi.size); + h->fun.meta_print(to, arg, "used: %d\n", hi.used); + h->fun.meta_print(to, arg, "objs: %d\n", hi.objs); + h->fun.meta_print(to, arg, "depth: %d\n", hi.depth); } @@ -135,22 +135,22 @@ static ERTS_INLINE void set_thresholds(Hash* h) ** init a pre allocated or static hash structure ** and allocate buckets. */ -Hash* hash_init(ErtsAlcType_t type, Hash* h, char* name, int size, HashFunctions fun) +Hash* hash_init(int type, Hash* h, char* name, int size, HashFunctions fun) { int sz; int ix = 0; - h->type = type; + h->meta_alloc_type = type; while (h_size_table[ix] != -1 && h_size_table[ix] < size) ix++; if (h_size_table[ix] == -1) - erl_exit(1, "panic: too large hash table size (%d)\n", size); + return NULL; size = h_size_table[ix]; sz = size*sizeof(HashBucket*); - h->bucket = (HashBucket**) erts_alloc(h->type, sz); + h->bucket = (HashBucket**) fun.meta_alloc(h->meta_alloc_type, sz); sys_memzero(h->bucket, sz); h->is_allocated = 0; @@ -167,11 +167,11 @@ Hash* hash_init(ErtsAlcType_t type, Hash* h, char* name, int size, HashFunctions /* ** Create a new hash table */ -Hash* hash_new(ErtsAlcType_t type, char* name, int size, HashFunctions fun) +Hash* hash_new(int type, char* name, int size, HashFunctions fun) { Hash* h; - h = erts_alloc(type, sizeof(Hash)); + h = fun.meta_alloc(type, sizeof(Hash)); h = hash_init(type, h, name, size, fun); h->is_allocated = 1; @@ -195,9 +195,9 @@ void hash_delete(Hash* h) b = b_next; } } - erts_free(h->type, h->bucket); + h->fun.meta_free(h->meta_alloc_type, h->bucket); if (h->is_allocated) - erts_free(h->type, (void*) h); + h->fun.meta_free(h->meta_alloc_type, (void*) h); } /* @@ -223,7 +223,7 @@ static void rehash(Hash* h, int grow) h->size = h_size_table[h->size_ix]; sz = h->size*sizeof(HashBucket*); - new_bucket = (HashBucket **) erts_alloc(h->type, sz); + new_bucket = (HashBucket **) h->fun.meta_alloc(h->meta_alloc_type, sz); sys_memzero(new_bucket, sz); for (i = 0; i < old_size; i++) { @@ -236,7 +236,7 @@ static void rehash(Hash* h, int grow) b = b_next; } } - erts_free(h->type, (void *) h->bucket); + h->fun.meta_free(h->meta_alloc_type, (void *) h->bucket); h->bucket = new_bucket; set_thresholds(h); } |