diff options
Diffstat (limited to 'erts/emulator/beam/erl_nif.c')
| -rw-r--r-- | erts/emulator/beam/erl_nif.c | 650 |
1 files changed, 555 insertions, 95 deletions
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index 1097916c97..941f44b9ec 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2009-2014. All Rights Reserved. + * Copyright Ericsson AB 2009-2016. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -92,11 +92,12 @@ static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, unsigned need) } static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp) -{ +{ env->hp = hp; - if (env->heap_frag == NULL) { + if (env->heap_frag == NULL) { ASSERT(HEAP_LIMIT(env->proc) == env->hp_end); - HEAP_TOP(env->proc) = env->hp; + ASSERT(env->hp + need > env->hp_end); + HEAP_TOP(env->proc) = env->hp; } else { env->heap_frag->used_size = hp - env->heap_frag->mem; @@ -120,7 +121,8 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, unsigned may_need) } #endif -void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif) +void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, + Process* tracee) { env->mod_nif = mod_nif; env->proc = p; @@ -130,17 +132,7 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif) env->fpe_was_unmasked = erts_block_fpe(); env->tmp_obj_list = NULL; env->exception_thrown = 0; -} - -static void pre_nif_noproc(ErlNifEnv* env, struct erl_module_nif* mod_nif) -{ - env->mod_nif = mod_nif; - env->proc = NULL; - env->hp = NULL; - env->hp_end = NULL; - env->heap_frag = NULL; - env->fpe_was_unmasked = erts_block_fpe(); - env->tmp_obj_list = NULL; + env->tracee = tracee; } /* Temporary object header, auto-deallocated when NIF returns @@ -180,13 +172,6 @@ void erts_post_nif(ErlNifEnv* env) free_tmp_objs(env); } -static void post_nif_noproc(ErlNifEnv* env) -{ - erts_unblock_fpe(env->fpe_was_unmasked); - free_tmp_objs(env); -} - - /* Flush out our cached heap pointers to allow an ordinary HAlloc */ static void flush_env(ErlNifEnv* env) @@ -209,6 +194,7 @@ static void flush_env(ErlNifEnv* env) */ static void cache_env(ErlNifEnv* env) { + env->heap_frag = MBUF(env->proc); if (env->heap_frag == NULL) { ASSERT(env->hp_end == HEAP_LIMIT(env->proc)); ASSERT(env->hp <= HEAP_TOP(env->proc)); @@ -216,10 +202,6 @@ static void cache_env(ErlNifEnv* env) env->hp = HEAP_TOP(env->proc); } else { - ASSERT(env->hp_end != HEAP_LIMIT(env->proc)); - ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); - env->heap_frag = MBUF(env->proc); - ASSERT(env->heap_frag != NULL); env->hp = env->heap_frag->mem + env->heap_frag->used_size; env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size; } @@ -250,18 +232,20 @@ struct enif_msg_environment_t Process phony_proc; }; -ErlNifEnv* enif_alloc_env(void) +static ERTS_INLINE void +setup_nif_env(struct enif_msg_environment_t* msg_env, + struct erl_module_nif* mod, + Process* tracee) { - struct enif_msg_environment_t* msg_env = - erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t)); Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */ - - msg_env->env.hp = phony_heap; + + msg_env->env.hp = phony_heap; msg_env->env.hp_end = phony_heap; msg_env->env.heap_frag = NULL; - msg_env->env.mod_nif = NULL; + msg_env->env.mod_nif = mod; msg_env->env.tmp_obj_list = NULL; msg_env->env.proc = &msg_env->phony_proc; + msg_env->env.exception_thrown = 0; memset(&msg_env->phony_proc, 0, sizeof(Process)); HEAP_START(&msg_env->phony_proc) = phony_heap; HEAP_TOP(&msg_env->phony_proc) = phony_heap; @@ -273,6 +257,14 @@ ErlNifEnv* enif_alloc_env(void) msg_env->phony_proc.space_verified = 0; msg_env->phony_proc.space_verified_from = NULL; #endif + msg_env->env.tracee = tracee; +} + +ErlNifEnv* enif_alloc_env(void) +{ + struct enif_msg_environment_t* msg_env = + erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t)); + setup_nif_env(msg_env, NULL, NULL); return &msg_env->env; } void enif_free_env(ErlNifEnv* env) @@ -281,6 +273,20 @@ void enif_free_env(ErlNifEnv* env) erts_free(ERTS_ALC_T_NIF, env); } +static ERTS_INLINE void pre_nif_noproc(struct enif_msg_environment_t* msg_env, + struct erl_module_nif* mod, + Process* tracee) +{ + setup_nif_env(msg_env, mod, tracee); + msg_env->env.fpe_was_unmasked = erts_block_fpe(); +} + +static ERTS_INLINE void post_nif_noproc(struct enif_msg_environment_t* msg_env) +{ + erts_unblock_fpe(msg_env->env.fpe_was_unmasked); + enif_clear_env(&msg_env->env); +} + static ERTS_INLINE void clear_offheap(ErlOffHeap* oh) { oh->first = NULL; @@ -307,22 +313,111 @@ void enif_clear_env(ErlNifEnv* env) ASSERT(!is_offheap(&MSO(p))); free_tmp_objs(env); } -int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, - ErlNifEnv* msg_env, ERL_NIF_TERM msg) + +#ifdef ERTS_SMP +#ifdef DEBUG +static int enif_send_delay = 0; +#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0) +#else +#ifdef ERTS_PROC_LOCK_OWN_IMPL +#define ERTS_FORCE_ENIF_SEND_DELAY() 0 +#else +/* + * We always schedule messages if we do not use our own + * process lock implementation, as if we try to do a trylock on + * a lock that might already be locked by the same thread. + * And what happens then with different mutex implementations + * is not always guaranteed. + */ +#define ERTS_FORCE_ENIF_SEND_DELAY() 1 +#endif +#endif + +int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) +{ + ErlTraceMessageQueue *msgq, **last_msgq; + int reds = 0; + + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); + + msgq = c_p->trace_msg_q; + + if (!msgq) + goto error; + + do { + Process* rp; + ErtsProcLocks rp_locks; + ErtsMessage *first, **last; + Uint len; + + first = msgq->first; + last = msgq->last; + len = msgq->len; + msgq->first = NULL; + msgq->last = &msgq->first; + msgq->len = 0; + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); + + ASSERT(len != 0); + + rp = erts_proc_lookup(msgq->receiver); + if (rp) { + rp_locks = 0; + if (rp->common.id == c_p->common.id) + rp_locks = c_p_locks; + erts_queue_messages(rp, &rp_locks, first, last, len); + if (rp->common.id == c_p->common.id) + rp_locks &= ~c_p_locks; + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + reds += len; + } else { + erts_cleanup_messages(first); + } + reds += 1; + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); + msgq = msgq->next; + } while (msgq); + + last_msgq = &c_p->trace_msg_q; + + while (*last_msgq) { + msgq = *last_msgq; + if (msgq->len == 0) { + *last_msgq = msgq->next; + erts_free(ERTS_ALC_T_TRACE_MSG_QUEUE, msgq); + } else { + last_msgq = &msgq->next; + } + } + +error: + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); + + return reds; +} + +#endif + +int +enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, + ErlNifEnv* msg_env, ERL_NIF_TERM msg) { struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env; - ErtsProcLocks rp_locks = 0; + ErtsProcLocks rp_locks = 0, lc_locks = 0, c_p_locks = ERTS_PROC_LOCK_MAIN; Process* rp; Process* c_p; - ErlHeapFragment* frags; + ErtsMessage *mp; Eterm receiver = to_pid->pid; int flush_me = 0; - int scheduler = erts_get_scheduler_id() != 0; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + int scheduler = esdp ? esdp->no : 0; if (env != NULL) { c_p = env->proc; if (receiver == c_p->common.id) { - rp_locks = ERTS_PROC_LOCK_MAIN; + rp_locks = c_p_locks; flush_me = 1; } } @@ -332,38 +427,124 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, #else erts_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM"); #endif - } + } rp = (scheduler ? erts_proc_lookup(receiver) : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, receiver, rp_locks, ERTS_P2P_FLG_INC_REFC)); + if (rp == NULL) { ASSERT(env == NULL || receiver != c_p->common.id); return 0; } - flush_env(msg_env); - frags = menv->env.heap_frag; - ASSERT(frags == MBUF(&menv->phony_proc)); - if (frags != NULL) { - /* Move all offheap's from phony proc to the first fragment. - Quick and dirty, but erts_move_msg_mbuf_to_heap doesn't care. */ - ASSERT(!is_offheap(&frags->off_heap)); - frags->off_heap = MSO(&menv->phony_proc); - clear_offheap(&MSO(&menv->phony_proc)); - menv->env.heap_frag = NULL; - MBUF(&menv->phony_proc) = NULL; + if (menv) { + flush_env(msg_env); + mp = erts_alloc_message(0, NULL); + mp->data.heap_frag = menv->env.heap_frag; + ASSERT(mp->data.heap_frag == MBUF(&menv->phony_proc)); + if (mp->data.heap_frag != NULL) { + /* Move all offheap's from phony proc to the first fragment. + Quick and dirty... */ + ASSERT(!is_offheap(&mp->data.heap_frag->off_heap)); + mp->data.heap_frag->off_heap = MSO(&menv->phony_proc); + clear_offheap(&MSO(&menv->phony_proc)); + menv->env.heap_frag = NULL; + MBUF(&menv->phony_proc) = NULL; + } + } else { + Uint sz = size_object(msg); + Eterm *hp; + mp = erts_alloc_message(sz, &hp); + msg = copy_struct(msg, sz, &hp, &mp->hfrag.off_heap); + ASSERT(hp == mp->hfrag.mem+mp->hfrag.used_size); } - ASSERT(!is_offheap(&MSO(&menv->phony_proc))); - if (flush_me) { - flush_env(env); /* Needed for ERTS_HOLE_CHECK */ + ERL_MESSAGE_TERM(mp) = msg; + + if (flush_me) { + flush_env(env); /* Needed for ERTS_HOLE_CHECK */ + } + + if (!env || !env->tracee) { + + if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND)) + trace_send(c_p, receiver, msg); + +#ifndef ERTS_SMP + } +#endif + + erts_queue_message(rp, &rp_locks, mp, msg); +#ifdef ERTS_SMP + } + else { + /* This clause is taken when the nif is called in the context + of a traced process. We do not know which locks we have + so we have to do a try lock and if that fails we enqueue + the message in a special trace message output queue of the + tracee */ + ErlTraceMessageQueue *msgq; + Process *t_p = env->tracee; + + + erts_smp_proc_lock(t_p, ERTS_PROC_LOCK_TRACE); + + msgq = t_p->trace_msg_q; + + while (msgq != NULL) { + if (msgq->receiver == receiver) { + break; + } + msgq = msgq->next; + } + +#ifdef ERTS_ENABLE_LOCK_CHECK + lc_locks = erts_proc_lc_my_proc_locks(rp); + rp_locks |= lc_locks; + if (receiver == c_p->common.id) + c_p_locks |= lc_locks; +#endif + if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq || + rp_locks & ERTS_PROC_LOCK_MSGQ || + erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + + if (!msgq) { + + msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE, + sizeof(ErlTraceMessageQueue)); + msgq->receiver = receiver; + msgq->first = mp; + msgq->last = &mp->next; + msgq->len = 1; + + /* Insert in linked list */ + msgq->next = t_p->trace_msg_q; + t_p->trace_msg_q = msgq; + + erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + + erts_schedule_flush_trace_messages(t_p->common.id); + + } else { + msgq->len++; + *msgq->last = mp; + msgq->last = &mp->next; + erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + } + } else { + erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + rp_locks &= ~ERTS_PROC_LOCK_TRACE; + rp_locks |= ERTS_PROC_LOCK_MSGQ; + erts_queue_message(rp, &rp_locks, mp, msg); + } } - erts_queue_message(rp, &rp_locks, frags, msg, am_undefined); +#endif + if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + if (rp_locks & ~lc_locks) + erts_smp_proc_unlock(rp, rp_locks & ~lc_locks); if (!scheduler) erts_proc_dec_refc(rp); if (flush_me) { @@ -372,13 +553,49 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, return 1; } +int +enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port, + ErlNifEnv *msg_env, ERL_NIF_TERM msg) +{ + + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + int scheduler = esdp ? esdp->no : 0; + Port *prt; + + if (scheduler == 0 || !env) + return 0; + + prt = erts_port_lookup(to_port->port_id, + (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP)); + + if (!prt) + return 0; + + if (IS_TRACED_FL(prt, F_TRACE_RECEIVE)) + trace_port_receive(prt, env->proc->common.id, am_command, msg); + + return erts_port_output_async(prt, env->proc->common.id, msg); +} + ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term) { Uint sz; Eterm* hp; +#ifdef SHCOPY + erts_shcopy_t info; + INITIALIZE_SHCOPY(info); + sz = copy_shared_calculate(src_term, &info); + hp = alloc_heap(dst_env, sz); + src_term = copy_shared_perform(src_term, sz, &info, &hp, &MSO(dst_env->proc)); + DESTROY_SHCOPY(info); + return src_term; +#else sz = size_object(src_term); hp = alloc_heap(dst_env, sz); return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc)); +#endif } @@ -391,12 +608,28 @@ static int is_offheap(const ErlOffHeap* oh) ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid) { + if (caller_env->proc->common.id == ERTS_INVALID_PID) + return NULL; pid->pid = caller_env->proc->common.id; return pid; } + int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid) { - return is_internal_pid(term) ? (pid->pid=term, 1) : 0; + if (is_internal_pid(term)) { + pid->pid=term; + return 1; + } + return 0; +} + +int enif_get_local_port(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPort* port) +{ + if (is_internal_port(term)) { + port->port_id=term; + return 1; + } + return 0; } int enif_is_atom(ErlNifEnv* env, ERL_NIF_TERM term) @@ -609,6 +842,68 @@ unsigned char* enif_make_new_binary(ErlNifEnv* env, size_t size, return binary_bytes(*termp); } +int enif_term_to_binary(ErlNifEnv *dst_env, ERL_NIF_TERM term, + ErlNifBinary *bin) +{ + Sint size; + byte *bp; + Binary* refbin; + + size = erts_encode_ext_size(term); + if (!enif_alloc_binary(size, bin)) + return 0; + + refbin = bin->ref_bin; + + bp = bin->data; + + erts_encode_ext(term, &bp); + + bin->size = bp - bin->data; + refbin->orig_size = bin->size; + + ASSERT(bin->data + bin->size == bp); + + return 1; +} + +size_t enif_binary_to_term(ErlNifEnv *dst_env, + const unsigned char* data, + size_t data_sz, + ERL_NIF_TERM *term, + ErlNifBinaryToTerm opts) +{ + Sint size; + ErtsHeapFactory factory; + byte *bp = (byte*) data; + + ERTS_CT_ASSERT(ERL_NIF_BIN2TERM_SAFE == ERTS_DIST_EXT_BTT_SAFE); + + if (opts & ~ERL_NIF_BIN2TERM_SAFE) { + return 0; + } + if ((size = erts_decode_ext_size(bp, data_sz)) < 0) + return 0; + + if (size > 0) { + flush_env(dst_env); + erts_factory_proc_prealloc_init(&factory, dst_env->proc, size); + } else { + erts_factory_dummy_init(&factory); + } + + *term = erts_decode_ext(&factory, &bp, (Uint32)opts); + + if (is_non_value(*term)) { + return 0; + } + erts_factory_close(&factory); + cache_env(dst_env); + + ASSERT(bp > data); + return bp - data; +} + int enif_is_identical(Eterm lhs, Eterm rhs) { return EQ(lhs,rhs); @@ -898,8 +1193,13 @@ int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail) int enif_get_list_length(ErlNifEnv* env, Eterm term, unsigned* len) { - if (is_not_list(term) && is_not_nil(term)) return 0; - *len = erts_list_length(term); + Sint i; + Uint u; + + if ((i = erts_list_length(term)) < 0) return 0; + u = (Uint)i; + if ((unsigned)u != u) return 0; + *len = u; return 1; } @@ -1140,6 +1440,103 @@ int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list return 1; } +int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc) +{ + ErtsProcLocks rp_locks = 0; /* We don't need any locks, + just to check if it is alive */ + Eterm target = proc->pid; + Process* rp; + Process* c_p; + int scheduler = erts_get_scheduler_id() != 0; + + if (env != NULL) { + c_p = env->proc; + if (target == c_p->common.id) { + /* We are alive! */ + return 1; + } + } + else { +#ifdef ERTS_SMP + c_p = NULL; +#else + erts_exit(ERTS_ABORT_EXIT,"enif_is_process_alive: " + "env==NULL on non-SMP VM"); +#endif + } + + rp = (scheduler + ? erts_proc_lookup(target) + : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, + target, rp_locks, ERTS_P2P_FLG_INC_REFC)); + if (rp == NULL) { + ASSERT(env == NULL || target != c_p->common.id); + return 0; + } else { + if (!scheduler) + erts_proc_dec_refc(rp); + return 1; + } +} + +int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port) +{ + /* only allowed if called from scheduler */ + if (erts_get_scheduler_id() == 0) + erts_exit(ERTS_ABORT_EXIT,"enif_is_port_alive: called from non-scheduler"); + + return erts_port_lookup( + port->port_id, + (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP)) != NULL; +} + +ERL_NIF_TERM +enif_now_time(ErlNifEnv *env) +{ + Uint mega, sec, micro; + Eterm *hp; + get_now(&mega, &sec, µ); + hp = alloc_heap(env, 4); + return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro)); +} + +ERL_NIF_TERM +enif_cpu_time(ErlNifEnv *env) +{ +#ifdef HAVE_ERTS_NOW_CPU + Uint mega, sec, micro; + Eterm *hp; + erts_get_now_cpu(&mega, &sec, µ); + hp = alloc_heap(env, 4); + return TUPLE3(hp, make_small(mega), make_small(sec), make_small(micro)); +#else + return enif_make_badarg(env); +#endif +} + +ERL_NIF_TERM +enif_make_unique_integer(ErlNifEnv *env, ErlNifUniqueInteger properties) +{ + int monotonic = properties & ERL_NIF_UNIQUE_MONOTONIC; + int positive = properties & ERL_NIF_UNIQUE_POSITIVE; + Eterm *hp; + Uint hsz; + + if (monotonic) { + Sint64 raw_unique = erts_raw_get_unique_monotonic_integer(); + hsz = erts_raw_unique_monotonic_integer_heap_size(raw_unique, positive); + hp = alloc_heap(env, hsz); + return erts_raw_make_unique_monotonic_integer_value(&hp, raw_unique, positive); + } else { + Uint64 raw_unique[ERTS_UNIQUE_INT_RAW_VALUES]; + erts_raw_get_unique_integer(raw_unique); + hsz = erts_raw_unique_integer_heap_size(raw_unique, positive); + hp = alloc_heap(env, hsz); + return erts_raw_make_unique_integer(&hp, raw_unique, positive); + } +} ErlNifMutex* enif_mutex_create(char *name) { return erl_drv_mutex_create(name); } void enif_mutex_destroy(ErlNifMutex *mtx) { erl_drv_mutex_destroy(mtx); } @@ -1266,10 +1663,10 @@ static void close_lib(struct erl_module_nif* lib) ASSERT(erts_refc_read(&lib->rt_dtor_cnt,0) == 0); if (lib->entry != NULL && lib->entry->unload != NULL) { - ErlNifEnv env; - pre_nif_noproc(&env, lib); - lib->entry->unload(&env, lib->priv_data); - post_nif_noproc(&env); + struct enif_msg_environment_t msg_env; + pre_nif_noproc(&msg_env, lib, NULL); + lib->entry->unload(&msg_env.env, lib->priv_data); + post_nif_noproc(&msg_env); } if (!erts_is_static_nif(lib->handle)) erts_sys_ddll_close(lib->handle); @@ -1415,10 +1812,10 @@ static void nif_resource_dtor(Binary* bin) ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor); if (type->dtor != NULL) { - ErlNifEnv env; - pre_nif_noproc(&env, type->owner); - type->dtor(&env,resource->data); - post_nif_noproc(&env); + struct enif_msg_environment_t msg_env; + pre_nif_noproc(&msg_env, type->owner, NULL); + type->dtor(&msg_env.env, resource->data); + post_nif_noproc(&msg_env); } if (erts_refc_dectest(&type->refc, 0) == 0) { ASSERT(type->next == NULL); @@ -1635,7 +2032,7 @@ allocate_nif_sched_data(Process* proc, int argc) ep->exp.addressv[i] = &ep->exp.code[3]; } ep->exp.code[3] = (BeamInstr) em_call_nif; - (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, ep); + (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ep); return ep; } @@ -1792,12 +2189,10 @@ execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); ep->fp = NULL; + erts_smp_atomic32_read_band_mb(&proc->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC + | ERTS_PSFLG_DIRTY_IO_PROC)); result = (*fp)(env, argc, argv); - erts_smp_atomic32_read_band_mb(&proc->state, - ~(ERTS_PSFLG_DIRTY_CPU_PROC - |ERTS_PSFLG_DIRTY_IO_PROC - |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q - |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); + if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL) close_lib(env->mod_nif); /* @@ -1845,13 +2240,6 @@ schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[ a = erts_smp_atomic32_read_acqb(&proc->state); while (1) { n = state = a; - /* - * clear any current dirty flags and dirty queue indicators, - * in case the application is shifting a job from one type - * of dirty scheduler to the other - */ - n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC - |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q); if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND) n |= ERTS_PSFLG_DIRTY_CPU_PROC; else @@ -2055,14 +2443,13 @@ int enif_make_map_remove(ErlNifEnv* env, Eterm key, Eterm *map_out) { - int res; if (!is_map(map_in)) { return 0; } flush_env(env); - res = erts_maps_remove(env->proc, key, map_in, map_out); + (void) erts_maps_take(env->proc, key, map_in, map_out, NULL); cache_env(env); - return res; + return 1; } int enif_map_iterator_create(ErlNifEnv *env, @@ -2251,17 +2638,17 @@ int enif_map_iterator_get_pair(ErlNifEnv *env, ***************************************************************************/ -static BeamInstr** get_func_pp(BeamInstr* mod_code, Eterm f_atom, unsigned arity) +static BeamInstr** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsigned arity) { - int n = (int) mod_code[MI_NUM_FUNCTIONS]; + int n = (int) mod_code->num_functions; int j; for (j = 0; j < n; ++j) { - BeamInstr* code_ptr = (BeamInstr*) mod_code[MI_FUNCTIONS+j]; + BeamInstr* code_ptr = (BeamInstr*) mod_code->functions[j]; ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); if (f_atom == ((Eterm) code_ptr[3]) && arity == ((unsigned) code_ptr[4])) { - return (BeamInstr**) &mod_code[MI_FUNCTIONS+j]; + return (BeamInstr**) &mod_code->functions[j]; } } return NULL; @@ -2444,8 +2831,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) if (init_func != NULL) handle = init_func; - if (!in_area(caller, mod->curr.code, mod->curr.code_length)) { - ASSERT(in_area(caller, mod->old.code, mod->old.code_length)); + if (!in_area(caller, mod->curr.code_hdr, mod->curr.code_length)) { + ASSERT(in_area(caller, mod->old.code_hdr, mod->old.code_length)); ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old " "module '%T' not allowed", mod_atom); @@ -2499,7 +2886,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) { BeamInstr** code_pp; if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1) - || (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) { + || (code_pp = get_func_pp(mod->curr.code_hdr, f_atom, f->arity))==NULL) { ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u", mod_atom, f->name, f->arity); } @@ -2587,7 +2974,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } old_func = next_func(mod->curr.nif->entry, &old_incr, old_func); } - erts_pre_nif(&env, BIF_P, lib); + erts_pre_nif(&env, BIF_P, lib, NULL); veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { @@ -2608,7 +2995,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library."); goto error; } - erts_pre_nif(&env, BIF_P, lib); + erts_pre_nif(&env, BIF_P, lib, NULL); veto = entry->upgrade(&env, &lib->priv_data, &mod->old.nif->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { @@ -2619,7 +3006,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) commit_opened_resource_types(lib); } else if (entry->load != NULL) { /********* Initial load ***********/ - erts_pre_nif(&env, BIF_P, lib); + erts_pre_nif(&env, BIF_P, lib, NULL); veto = entry->load(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { @@ -2642,7 +3029,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) { BeamInstr* code_ptr; erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1); - code_ptr = *get_func_pp(mod->curr.code, f_atom, f->arity); + code_ptr = *get_func_pp(mod->curr.code_hdr, f_atom, f->arity); if (code_ptr[1] == 0) { code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif); @@ -2706,6 +3093,9 @@ erts_unload_nif(struct erl_module_nif* lib) ASSERT(erts_smp_thr_progress_is_blocking()); ASSERT(lib != NULL); ASSERT(lib->mod != NULL); + + erts_tracer_nif_clear(); + for (rt = resource_type_list.next; rt != &resource_type_list; rt = next) { @@ -2748,6 +3138,76 @@ void erl_nif_init() resource_type_list.owner = NULL; resource_type_list.module = THE_NON_VALUE; resource_type_list.name = THE_NON_VALUE; + +} + +int erts_nif_get_funcs(struct erl_module_nif* mod, + ErlNifFunc **funcs) +{ + *funcs = mod->entry->funcs; + return mod->entry->num_of_funcs; +} + +Eterm erts_nif_call_function(Process *p, Process *tracee, + struct erl_module_nif* mod, + ErlNifFunc *fun, int argc, Eterm *argv) +{ + Eterm nif_result; +#ifdef DEBUG + /* Verify that function is part of this module */ + int i; + for (i = 0; i < mod->entry->num_of_funcs; i++) + if (fun == &(mod->entry->funcs[i])) + break; + ASSERT(i < mod->entry->num_of_funcs); + if (p) + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN + || erts_smp_thr_progress_is_blocking()); +#endif + if (p) { + /* This is almost a normal nif call like in beam_emu, + except that any heap fragment created in the nif will be + discarded without checking if anything in it is live. + This is because we cannot do a GC here as we don't know + the number of live registers that have to be preserved. + This means that any heap part of the returned term may + not be used outside this function. */ + struct enif_environment_t env; + ErlHeapFragment *orig_hf = MBUF(p); + ErlOffHeap orig_oh = MSO(p); + ASSERT(is_internal_pid(p->common.id)); + MBUF(p) = NULL; + clear_offheap(&MSO(p)); + + erts_pre_nif(&env, p, mod, tracee); + nif_result = (*fun->fptr)(&env, argc, argv); + if (env.exception_thrown) + nif_result = THE_NON_VALUE; + erts_post_nif(&env); + + /* Free any offheap and heap fragments created in nif */ + if (MSO(p).first) { + erts_cleanup_offheap(&MSO(p)); + clear_offheap(&MSO(p)); + } + if (MBUF(p)) + free_message_buffer(MBUF(p)); + + /* restore original heap fragment list */ + MBUF(p) = orig_hf; + MSO(p) = orig_oh; + } else { + /* Nif call was done without a process context, + so we create a phony one. */ + struct enif_msg_environment_t msg_env; + pre_nif_noproc(&msg_env, mod, tracee); + nif_result = (*fun->fptr)(&msg_env.env, argc, argv); + if (msg_env.env.exception_thrown) + nif_result = THE_NON_VALUE; + post_nif_noproc(&msg_env); + } + + return nif_result; } #ifdef USE_VM_PROBES |
