diff options
Diffstat (limited to 'erts/emulator/beam/export.c')
-rw-r--r-- | erts/emulator/beam/export.c | 352 |
1 files changed, 232 insertions, 120 deletions
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c index fb0ee99119..229641cb32 100644 --- a/erts/emulator/beam/export.c +++ b/erts/emulator/beam/export.c @@ -32,98 +32,163 @@ #define EXPORT_HASH(m,f,a) ((m)*(f)+(a)) -static IndexTable export_table; /* Not locked. */ -static Hash secondary_export_table; /* Locked. */ +#ifdef DEBUG +# define IF_DEBUG(x) x +#else +# define IF_DEBUG(x) +#endif -#include "erl_smp.h" +static IndexTable export_tables[ERTS_NUM_CODE_IX]; /* Active not locked */ -static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table. */ +static erts_smp_atomic_t total_entries_bytes; -#define export_read_lock() erts_smp_rwmtx_rlock(&export_table_lock) -#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock) -#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock) -#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock) +#include "erl_smp.h" + +/* This lock protects the staging export table from concurrent access + * AND it protects the staging table from becoming active. + */ +erts_smp_mtx_t export_staging_lock; extern BeamInstr* em_call_error_handler; extern BeamInstr* em_call_traced_function; +struct export_entry +{ + IndexSlot slot; /* MUST BE LOCATED AT TOP OF STRUCT!!! */ + Export* ep; +}; + +/* Helper struct that brings things together in one allocation +*/ +struct export_blob +{ + Export exp; + struct export_entry entryv[ERTS_NUM_CODE_IX]; + /* Note that entryv is not indexed by "code_ix". + */ +}; + +/* Helper struct only used as template +*/ +struct export_templ +{ + struct export_entry entry; + Export exp; +}; + +static struct export_blob* entry_to_blob(struct export_entry* ee) +{ + return (struct export_blob*) + ((char*)ee->ep - offsetof(struct export_blob,exp)); +} + void export_info(int to, void *to_arg) { #ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - export_read_lock(); + export_staging_lock(); #endif - index_info(to, to_arg, &export_table); - hash_info(to, to_arg, &secondary_export_table); + index_info(to, to_arg, &export_tables[erts_active_code_ix()]); + hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable); #ifdef ERTS_SMP if (lock) - export_read_unlock(); + export_staging_unlock(); #endif } static HashValue -export_hash(Export* x) +export_hash(struct export_entry* ee) { + Export* x = ee->ep; return EXPORT_HASH(x->code[0], x->code[1], x->code[2]); } static int -export_cmp(Export* tmpl, Export* obj) +export_cmp(struct export_entry* tmpl_e, struct export_entry* obj_e) { + Export* tmpl = tmpl_e->ep; + Export* obj = obj_e->ep; return !(tmpl->code[0] == obj->code[0] && tmpl->code[1] == obj->code[1] && tmpl->code[2] == obj->code[2]); } -static Export* -export_alloc(Export* tmpl) +static struct export_entry* +export_alloc(struct export_entry* tmpl_e) { - Export* obj = (Export*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(Export)); - - obj->fake_op_func_info_for_hipe[0] = 0; - obj->fake_op_func_info_for_hipe[1] = 0; - obj->code[0] = tmpl->code[0]; - obj->code[1] = tmpl->code[1]; - obj->code[2] = tmpl->code[2]; - obj->slot.index = -1; - obj->address = obj->code+3; - obj->code[3] = (BeamInstr) em_call_error_handler; - obj->code[4] = 0; - obj->match_prog_set = NULL; - return obj; + struct export_blob* blob; + unsigned ix; + + if (tmpl_e->slot.index == -1) { /* Template, allocate blob */ + Export* tmpl = tmpl_e->ep; + Export* obj; + + blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob)); + erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); + obj = &blob->exp; + obj->fake_op_func_info_for_hipe[0] = 0; + obj->fake_op_func_info_for_hipe[1] = 0; + obj->code[0] = tmpl->code[0]; + obj->code[1] = tmpl->code[1]; + obj->code[2] = tmpl->code[2]; + obj->code[3] = (BeamInstr) em_call_error_handler; + obj->code[4] = 0; + obj->match_prog_set = NULL; + + for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) { + obj->addressv[ix] = obj->code+3; + + blob->entryv[ix].slot.index = -1; + blob->entryv[ix].ep = &blob->exp; + } + ix = 0; + } + else { /* Existing entry in another table, use free entry in blob */ + blob = entry_to_blob(tmpl_e); + for (ix = 0; blob->entryv[ix].slot.index >= 0; ix++) { + ASSERT(ix < ERTS_NUM_CODE_IX); + } + } + return &blob->entryv[ix]; } - -static void -export_free(Export* obj) +static void +export_free(struct export_entry* obj) { - erts_free(ERTS_ALC_T_EXPORT, (void*) obj); + struct export_blob* blob = entry_to_blob(obj); + int i; + obj->slot.index = -1; + for (i=0; i < ERTS_NUM_CODE_IX; i++) { + if (blob->entryv[i].slot.index >= 0) { + return; + } + } + erts_free(ERTS_ALC_T_EXPORT, blob); + erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob)); } - void init_export_table(void) { HashFunctions f; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + int i; - erts_smp_rwmtx_init_opt(&export_table_lock, &rwmtx_opt, "export_tab"); + erts_smp_mtx_init(&export_staging_lock, "export_tab"); + erts_smp_atomic_init_nob(&total_entries_bytes, 0); f.hash = (H_FUN) export_hash; f.cmp = (HCMP_FUN) export_cmp; f.alloc = (HALLOC_FUN) export_alloc; f.free = (HFREE_FUN) export_free; - erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_table, "export_list", - EXPORT_INITIAL_SIZE, EXPORT_LIMIT, f); - hash_init(ERTS_ALC_T_EXPORT_TABLE, &secondary_export_table, - "secondary_export_table", 50, f); + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_tables[i], "export_list", + EXPORT_INITIAL_SIZE, EXPORT_LIMIT, f); + } } /* @@ -138,29 +203,42 @@ init_export_table(void) * called through such an export entry. * 3) This function is suitable for the implementation of erlang:apply/3. */ +extern Export* /* inline-helper */ +erts_find_export_entry(Eterm m, Eterm f, unsigned int a,ErtsCodeIndex code_ix); Export* -erts_find_export_entry(Eterm m, Eterm f, unsigned int a) +erts_find_export_entry(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix) { HashValue hval = EXPORT_HASH((BeamInstr) m, (BeamInstr) f, (BeamInstr) a); int ix; HashBucket* b; - ix = hval % export_table.htable.size; - b = export_table.htable.bucket[ix]; + ix = hval % export_tables[code_ix].htable.size; + b = export_tables[code_ix].htable.bucket[ix]; /* * Note: We have inlined the code from hash.c for speed. */ while (b != (HashBucket*) 0) { - Export* ep = (Export *) b; + Export* ep = ((struct export_entry*) b)->ep; if (ep->code[0] == m && ep->code[1] == f && ep->code[2] == a) { - break; + return ep; } b = b->next; } - return (Export*)b; + return NULL; +} + +static struct export_entry* init_template(struct export_templ* templ, + Eterm m, Eterm f, unsigned a) +{ + templ->entry.ep = &templ->exp; + templ->entry.slot.index = -1; + templ->exp.code[0] = m; + templ->exp.code[1] = f; + templ->exp.code[2] = a; + return &templ->entry; } @@ -176,47 +254,41 @@ erts_find_export_entry(Eterm m, Eterm f, unsigned int a) */ Export* -erts_find_function(Eterm m, Eterm f, unsigned int a) +erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix) { - Export e; - Export* ep; - - e.code[0] = m; - e.code[1] = f; - e.code[2] = a; + struct export_templ templ; + struct export_entry* ee; - ep = hash_get(&export_table.htable, (void*) &e); - if (ep != NULL && ep->address == ep->code+3 && - ep->code[3] != (BeamInstr) em_call_traced_function) { - ep = NULL; + ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a)); + if (ee == NULL || (ee->ep->addressv[code_ix] == ee->ep->code+3 && + ee->ep->code[3] != (BeamInstr) em_call_traced_function)) { + return NULL; } - return ep; + return ee->ep; } /* * Returns a pointer to an existing export entry for a MFA, * or creates a new one and returns the pointer. * - * This function provides unlocked write access to the main export - * table. It should only be used during start up or when - * all other threads are blocked. + * This function acts on the staging export table. It should only be used + * to load new code. */ Export* erts_export_put(Eterm mod, Eterm func, unsigned int arity) { - Export e; - int ix; + ErtsCodeIndex code_ix = erts_staging_code_ix(); + struct export_templ templ; + struct export_entry* ee; - ERTS_SMP_LC_ASSERT(erts_initialized == 0 - || erts_smp_thr_progress_is_blocking()); ASSERT(is_atom(mod)); ASSERT(is_atom(func)); - e.code[0] = mod; - e.code[1] = func; - e.code[2] = arity; - ix = index_put(&export_table, (void*) &e); - return (Export*) erts_index_lookup(&export_table, ix); + export_staging_lock(); + ee = (struct export_entry*) index_put_entry(&export_tables[code_ix], + init_template(&templ, mod, func, arity)); + export_staging_unlock(); + return ee->ep; } /* @@ -224,77 +296,117 @@ erts_export_put(Eterm mod, Eterm func, unsigned int arity) * export entry (making a call through it will cause the error_handler to * be called). * - * Stub export entries will be placed in the secondary export table. - * erts_export_consolidate() will move all stub export entries into the - * main export table (will be done the next time code is loaded). + * Stub export entries will be placed in the loader export table. */ Export* erts_export_get_or_make_stub(Eterm mod, Eterm func, unsigned int arity) { - Export e; + ErtsCodeIndex code_ix; Export* ep; + IF_DEBUG(int retrying = 0;) ASSERT(is_atom(mod)); ASSERT(is_atom(func)); - - e.code[0] = mod; - e.code[1] = func; - e.code[2] = arity; - ep = erts_find_export_entry(mod, func, arity); - if (ep == 0) { - /* - * The code is not loaded (yet). Put the export in the secondary - * export table, to avoid having to lock the main export table. - */ - export_write_lock(); - ep = (Export *) hash_put(&secondary_export_table, (void*) &e); - export_write_unlock(); - } + + do { + code_ix = erts_active_code_ix(); + ep = erts_find_export_entry(mod, func, arity, code_ix); + if (ep == 0) { + /* + * The code is not loaded (yet). Put the export in the staging + * export table, to avoid having to lock the active export table. + */ + export_staging_lock(); + if (erts_active_code_ix() == code_ix) { + struct export_templ templ; + struct export_entry* entry; + + IndexTable* tab = &export_tables[erts_staging_code_ix()]; + init_template(&templ, mod, func, arity); + entry = (struct export_entry *) index_put_entry(tab, &templ.entry); + ep = entry->ep; + ASSERT(ep); + } + else { /* race */ + ASSERT(!retrying); + IF_DEBUG(retrying = 1); + } + export_staging_unlock(); + } + } while (!ep); return ep; } -/* - * To be called before loading code (with other threads blocked). - * This function will move all export entries from the secondary - * export table into the primary. - */ -void -erts_export_consolidate(void) +Export *export_list(int i, ErtsCodeIndex code_ix) { -#ifdef DEBUG - HashInfo hi; -#endif - - ERTS_SMP_LC_ASSERT(erts_initialized == 0 - || erts_smp_thr_progress_is_blocking()); - - export_write_lock(); - erts_index_merge(&secondary_export_table, &export_table); - erts_hash_merge(&secondary_export_table, &export_table.htable); - export_write_unlock(); -#ifdef DEBUG - hash_get_info(&hi, &export_table.htable); - ASSERT(export_table.entries == hi.objs); -#endif + return ((struct export_entry*) erts_index_lookup(&export_tables[code_ix], i))->ep; } -Export *export_list(int i) +int export_list_size(ErtsCodeIndex code_ix) { - return (Export*) erts_index_lookup(&export_table, i); + return export_tables[code_ix].entries; } -int export_list_size(void) +int export_table_sz(void) { - return export_table.entries; + int i, bytes = 0; + + export_staging_lock(); + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + bytes += index_table_sz(&export_tables[i]); + } + export_staging_unlock(); + return bytes; +} +int export_entries_sz(void) +{ + return erts_smp_atomic_read_nob(&total_entries_bytes); +} +Export *export_get(Export *e) +{ + struct export_entry ee; + struct export_entry* entry; + + ee.ep = e; + entry = (struct export_entry*)hash_get(&export_tables[erts_active_code_ix()].htable, &ee); + return entry ? entry->ep : NULL; } -int export_table_sz(void) +IF_DEBUG(static ErtsCodeIndex debug_start_load_ix = 0;) + + +void export_start_staging(void) { - return index_table_sz(&export_table); + ErtsCodeIndex dst_ix = erts_staging_code_ix(); + ErtsCodeIndex src_ix = erts_active_code_ix(); + IndexTable* dst = &export_tables[dst_ix]; + IndexTable* src = &export_tables[src_ix]; + struct export_entry* src_entry; + struct export_entry* dst_entry; + int i; + + ASSERT(dst_ix != src_ix); + ASSERT(debug_start_load_ix == -1); + + export_staging_lock(); + /* + * Insert all entries in src into dst table + */ + for (i = 0; i < src->entries; i++) { + src_entry = (struct export_entry*) erts_index_lookup(src, i); + src_entry->ep->addressv[dst_ix] = src_entry->ep->addressv[src_ix]; + dst_entry = (struct export_entry*) index_put_entry(dst, src_entry); + ASSERT(entry_to_blob(src_entry) == entry_to_blob(dst_entry)); + } + export_staging_unlock(); + + IF_DEBUG(debug_start_load_ix = dst_ix); } -Export *export_get(Export *e) +void export_end_staging(int commit) { - return hash_get(&export_table.htable, e); + ASSERT(debug_start_load_ix == erts_staging_code_ix()); + IF_DEBUG(debug_start_load_ix = -1); } + |