aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
authorErlang/OTP <[email protected]>2009-11-20 14:54:40 +0000
committerErlang/OTP <[email protected]>2009-11-20 14:54:40 +0000
commit84adefa331c4159d432d22840663c38f155cd4c1 (patch)
treebff9a9c66adda4df2106dfd0e5c053ab182a12bd /erts/emulator/beam
downloadotp-84adefa331c4159d432d22840663c38f155cd4c1.tar.gz
otp-84adefa331c4159d432d22840663c38f155cd4c1.tar.bz2
otp-84adefa331c4159d432d22840663c38f155cd4c1.zip
The R13B03 release.OTP_R13B03
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.c354
-rw-r--r--erts/emulator/beam/atom.h104
-rw-r--r--erts/emulator/beam/atom.names540
-rw-r--r--erts/emulator/beam/beam_bif_load.c795
-rw-r--r--erts/emulator/beam/beam_bp.c785
-rw-r--r--erts/emulator/beam/beam_bp.h165
-rw-r--r--erts/emulator/beam/beam_catches.c102
-rw-r--r--erts/emulator/beam/beam_catches.h32
-rw-r--r--erts/emulator/beam/beam_debug.c548
-rw-r--r--erts/emulator/beam/beam_emu.c6198
-rw-r--r--erts/emulator/beam/beam_load.c5234
-rw-r--r--erts/emulator/beam/beam_load.h120
-rw-r--r--erts/emulator/beam/benchmark.c395
-rw-r--r--erts/emulator/beam/benchmark.h340
-rw-r--r--erts/emulator/beam/bif.c4201
-rw-r--r--erts/emulator/beam/bif.h386
-rw-r--r--erts/emulator/beam/bif.tab761
-rw-r--r--erts/emulator/beam/big.c2241
-rw-r--r--erts/emulator/beam/big.h155
-rw-r--r--erts/emulator/beam/binary.c677
-rw-r--r--erts/emulator/beam/break.c747
-rw-r--r--erts/emulator/beam/copy.c981
-rw-r--r--erts/emulator/beam/decl.h55
-rw-r--r--erts/emulator/beam/dist.c3256
-rw-r--r--erts/emulator/beam/dist.h290
-rw-r--r--erts/emulator/beam/elib_malloc.c2334
-rw-r--r--erts/emulator/beam/elib_memmove.c113
-rw-r--r--erts/emulator/beam/elib_stat.h45
-rw-r--r--erts/emulator/beam/erl_afit_alloc.c256
-rw-r--r--erts/emulator/beam/erl_afit_alloc.h67
-rw-r--r--erts/emulator/beam/erl_alloc.c3157
-rw-r--r--erts/emulator/beam/erl_alloc.h564
-rw-r--r--erts/emulator/beam/erl_alloc.types383
-rw-r--r--erts/emulator/beam/erl_alloc_util.c3467
-rw-r--r--erts/emulator/beam/erl_alloc_util.h342
-rw-r--r--erts/emulator/beam/erl_arith.c2040
-rw-r--r--erts/emulator/beam/erl_async.c469
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c1161
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.h64
-rw-r--r--erts/emulator/beam/erl_bif_chksum.c612
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c1964
-rw-r--r--erts/emulator/beam/erl_bif_guard.c628
-rw-r--r--erts/emulator/beam/erl_bif_info.c3803
-rw-r--r--erts/emulator/beam/erl_bif_lists.c392
-rw-r--r--erts/emulator/beam/erl_bif_op.c327
-rw-r--r--erts/emulator/beam/erl_bif_os.c190
-rw-r--r--erts/emulator/beam/erl_bif_port.c1476
-rw-r--r--erts/emulator/beam/erl_bif_re.c1142
-rw-r--r--erts/emulator/beam/erl_bif_timer.c701
-rw-r--r--erts/emulator/beam/erl_bif_timer.h36
-rw-r--r--erts/emulator/beam/erl_bif_trace.c2106
-rw-r--r--erts/emulator/beam/erl_binary.h282
-rw-r--r--erts/emulator/beam/erl_bits.c1975
-rw-r--r--erts/emulator/beam/erl_bits.h212
-rw-r--r--erts/emulator/beam/erl_db.c3631
-rw-r--r--erts/emulator/beam/erl_db.h247
-rw-r--r--erts/emulator/beam/erl_db_hash.c2868
-rw-r--r--erts/emulator/beam/erl_db_hash.h103
-rw-r--r--erts/emulator/beam/erl_db_tree.c3289
-rw-r--r--erts/emulator/beam/erl_db_tree.h55
-rw-r--r--erts/emulator/beam/erl_db_util.c4651
-rw-r--r--erts/emulator/beam/erl_db_util.h405
-rw-r--r--erts/emulator/beam/erl_debug.c899
-rw-r--r--erts/emulator/beam/erl_debug.h102
-rw-r--r--erts/emulator/beam/erl_driver.h626
-rw-r--r--erts/emulator/beam/erl_drv_thread.c706
-rw-r--r--erts/emulator/beam/erl_fun.c315
-rw-r--r--erts/emulator/beam/erl_fun.h92
-rw-r--r--erts/emulator/beam/erl_gc.c2690
-rw-r--r--erts/emulator/beam/erl_gc.h72
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c662
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.h88
-rw-r--r--erts/emulator/beam/erl_init.c1461
-rw-r--r--erts/emulator/beam/erl_instrument.c1221
-rw-r--r--erts/emulator/beam/erl_instrument.h41
-rw-r--r--erts/emulator/beam/erl_lock_check.c1307
-rw-r--r--erts/emulator/beam/erl_lock_check.h117
-rw-r--r--erts/emulator/beam/erl_lock_count.c675
-rw-r--r--erts/emulator/beam/erl_lock_count.h195
-rw-r--r--erts/emulator/beam/erl_math.c233
-rw-r--r--erts/emulator/beam/erl_md5.c340
-rw-r--r--erts/emulator/beam/erl_message.c1070
-rw-r--r--erts/emulator/beam/erl_message.h251
-rw-r--r--erts/emulator/beam/erl_monitors.c1019
-rw-r--r--erts/emulator/beam/erl_monitors.h180
-rw-r--r--erts/emulator/beam/erl_mtrace.c1240
-rw-r--r--erts/emulator/beam/erl_mtrace.h51
-rw-r--r--erts/emulator/beam/erl_nif.c641
-rw-r--r--erts/emulator/beam/erl_nif.h122
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h68
-rw-r--r--erts/emulator/beam/erl_nmgc.c1402
-rw-r--r--erts/emulator/beam/erl_nmgc.h364
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h318
-rw-r--r--erts/emulator/beam/erl_node_tables.c1660
-rw-r--r--erts/emulator/beam/erl_node_tables.h261
-rw-r--r--erts/emulator/beam/erl_obsolete.c186
-rw-r--r--erts/emulator/beam/erl_port_task.c1100
-rw-r--r--erts/emulator/beam/erl_port_task.h135
-rw-r--r--erts/emulator/beam/erl_posix_str.c641
-rw-r--r--erts/emulator/beam/erl_printf_term.c458
-rw-r--r--erts/emulator/beam/erl_printf_term.h26
-rw-r--r--erts/emulator/beam/erl_process.c9469
-rw-r--r--erts/emulator/beam/erl_process.h1495
-rw-r--r--erts/emulator/beam/erl_process_dict.c1001
-rw-r--r--erts/emulator/beam/erl_process_dict.h42
-rw-r--r--erts/emulator/beam/erl_process_dump.c454
-rw-r--r--erts/emulator/beam/erl_process_lock.c1431
-rw-r--r--erts/emulator/beam/erl_process_lock.h990
-rw-r--r--erts/emulator/beam/erl_resolv_dns.c23
-rw-r--r--erts/emulator/beam/erl_resolv_nodns.c23
-rw-r--r--erts/emulator/beam/erl_smp.h993
-rw-r--r--erts/emulator/beam/erl_sock.h44
-rw-r--r--erts/emulator/beam/erl_sys_driver.h44
-rw-r--r--erts/emulator/beam/erl_term.c174
-rw-r--r--erts/emulator/beam/erl_term.h1056
-rw-r--r--erts/emulator/beam/erl_threads.h1524
-rw-r--r--erts/emulator/beam/erl_time.h67
-rw-r--r--erts/emulator/beam/erl_time_sup.c899
-rw-r--r--erts/emulator/beam/erl_trace.c3260
-rw-r--r--erts/emulator/beam/erl_unicode.c1815
-rw-r--r--erts/emulator/beam/erl_unicode.h23
-rw-r--r--erts/emulator/beam/erl_vm.h204
-rw-r--r--erts/emulator/beam/erl_zlib.c113
-rw-r--r--erts/emulator/beam/erl_zlib.h52
-rw-r--r--erts/emulator/beam/error.h196
-rw-r--r--erts/emulator/beam/export.c296
-rw-r--r--erts/emulator/beam/export.h79
-rw-r--r--erts/emulator/beam/external.c2839
-rw-r--r--erts/emulator/beam/external.h211
-rw-r--r--erts/emulator/beam/fix_alloc.c287
-rw-r--r--erts/emulator/beam/global.h1800
-rw-r--r--erts/emulator/beam/hash.c407
-rw-r--r--erts/emulator/beam/hash.h97
-rw-r--r--erts/emulator/beam/index.c137
-rw-r--r--erts/emulator/beam/index.h71
-rw-r--r--erts/emulator/beam/io.c4732
-rw-r--r--erts/emulator/beam/module.c134
-rw-r--r--erts/emulator/beam/module.h56
-rw-r--r--erts/emulator/beam/ops.tab1430
-rw-r--r--erts/emulator/beam/packet_parser.c847
-rw-r--r--erts/emulator/beam/packet_parser.h181
-rw-r--r--erts/emulator/beam/register.c655
-rw-r--r--erts/emulator/beam/register.h66
-rw-r--r--erts/emulator/beam/safe_hash.c276
-rw-r--r--erts/emulator/beam/safe_hash.h104
-rw-r--r--erts/emulator/beam/sys.h1257
-rw-r--r--erts/emulator/beam/time.c571
-rw-r--r--erts/emulator/beam/utils.c4053
-rw-r--r--erts/emulator/beam/version.h19
149 files changed, 144051 insertions, 0 deletions
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
new file mode 100644
index 0000000000..dfc3cde6a7
--- /dev/null
+++ b/erts/emulator/beam/atom.c
@@ -0,0 +1,354 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_sys_driver.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "hash.h"
+#include "atom.h"
+
+
+#define ATOM_SIZE 3000
+
+IndexTable erts_atom_table; /* The index table */
+
+#include "erl_smp.h"
+
+static erts_smp_rwmtx_t atom_table_lock;
+
+#define atom_read_lock() erts_smp_rwmtx_rlock(&atom_table_lock)
+#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock)
+#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock)
+#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock)
+#define atom_init_lock() erts_smp_rwmtx_init(&atom_table_lock, \
+ "atom_tab")
+#if 0
+#define ERTS_ATOM_PUT_OPS_STAT
+#endif
+#ifdef ERTS_ATOM_PUT_OPS_STAT
+static erts_smp_atomic_t atom_put_ops;
+#endif
+
+/* Functions for allocating space for the ext of atoms. We do not
+ * use malloc for each atom to prevent excessive memory fragmentation
+ */
+
+typedef struct _atom_text {
+ struct _atom_text* next;
+ unsigned char text[ATOM_TEXT_SIZE];
+} AtomText;
+
+static AtomText* text_list; /* List of text buffers */
+static byte *atom_text_pos;
+static byte *atom_text_end;
+static Uint reserved_atom_space; /* Total amount of atom text space */
+static Uint atom_space; /* Amount of atom text space used */
+
+/*
+ * Print info about atom tables
+ */
+void atom_info(int to, void *to_arg)
+{
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ atom_read_lock();
+ index_info(to, to_arg, &erts_atom_table);
+#ifdef ERTS_ATOM_PUT_OPS_STAT
+ erts_print(to, to_arg, "atom_put_ops: %ld\n",
+ erts_smp_atomic_read(&atom_put_ops));
+#endif
+
+ if (lock)
+ atom_read_unlock();
+}
+
+/*
+ * Allocate an atom text segment.
+ */
+static void
+more_atom_space(void)
+{
+ AtomText* ptr;
+
+ ptr = (AtomText*) erts_alloc(ERTS_ALC_T_ATOM_TXT, sizeof(AtomText));
+
+ ptr->next = text_list;
+ text_list = ptr;
+
+ atom_text_pos = ptr->text;
+ atom_text_end = atom_text_pos + ATOM_TEXT_SIZE;
+ reserved_atom_space += sizeof(AtomText);
+
+ VERBOSE(DEBUG_SYSTEM,("Allocated %d atom space\n",ATOM_TEXT_SIZE));
+}
+
+/*
+ * Allocate string space within an atom text segment.
+ */
+
+static byte*
+atom_text_alloc(int bytes)
+{
+ byte *res;
+
+ ASSERT(bytes <= MAX_ATOM_LENGTH);
+ if (atom_text_pos + bytes >= atom_text_end) {
+ more_atom_space();
+ }
+ res = atom_text_pos;
+ atom_text_pos += bytes;
+ atom_space += bytes;
+ return res;
+}
+
+/*
+ * Calculate atom hash value (using the hash algorithm
+ * hashpjw from the Dragon Book).
+ */
+
+static HashValue
+atom_hash(Atom* obj)
+{
+ byte* p = obj->name;
+ int len = obj->len;
+ HashValue h = 0, g;
+
+ while(len--) {
+ h = (h << 4) + *p++;
+ if ((g = h & 0xf0000000)) {
+ h ^= (g >> 24);
+ h ^= g;
+ }
+ }
+ return h;
+}
+
+
+static int
+atom_cmp(Atom* tmpl, Atom* obj)
+{
+ if (tmpl->len == obj->len &&
+ sys_memcmp(tmpl->name, obj->name, tmpl->len) == 0)
+ return 0;
+ return 1;
+}
+
+
+static Atom*
+atom_alloc(Atom* tmpl)
+{
+ Atom* obj = (Atom*) erts_alloc(ERTS_ALC_T_ATOM, sizeof(Atom));
+
+ obj->name = atom_text_alloc(tmpl->len);
+ sys_memcpy(obj->name, tmpl->name, tmpl->len);
+ obj->len = tmpl->len;
+ obj->slot.index = -1;
+
+ /*
+ * Precompute ordinal value of first 3 bytes + 7 bits.
+ * This is used by utils.c:cmp_atoms().
+ * We cannot use the full 32 bits of the first 4 bytes,
+ * since we use the sign of the difference between two
+ * ordinal values to represent their relative order.
+ */
+ {
+ unsigned char c[4];
+ int i;
+ int j;
+
+ j = (tmpl->len < 4) ? tmpl->len : 4;
+ for(i = 0; i < j; ++i)
+ c[i] = tmpl->name[i];
+ for(; i < 4; ++i)
+ c[i] = '\0';
+ obj->ord0 = (c[0] << 23) + (c[1] << 15) + (c[2] << 7) + (c[3] >> 1);
+ }
+ return obj;
+}
+
+static void
+atom_free(Atom* obj)
+{
+ erts_free(ERTS_ALC_T_ATOM, (void*) obj);
+}
+
+Eterm
+am_atom_put(const char* name, int len)
+{
+ Atom a;
+ Eterm ret;
+ int aix;
+
+ /*
+ * Silently truncate the atom if it is too long. Overlong atoms
+ * could occur in situations where we have no good way to return
+ * an error, such as in the I/O system. (Unfortunately, many
+ * drivers don't check for errors.)
+ *
+ * If an error should be produced for overlong atoms (such in
+ * list_to_atom/1), the caller should check the length before
+ * calling this function.
+ */
+ if (len > MAX_ATOM_LENGTH) {
+ len = MAX_ATOM_LENGTH;
+ }
+#ifdef ERTS_ATOM_PUT_OPS_STAT
+ erts_smp_atomic_inc(&atom_put_ops);
+#endif
+ a.len = len;
+ a.name = (byte*)name;
+ atom_read_lock();
+ aix = index_get(&erts_atom_table, (void*) &a);
+ atom_read_unlock();
+ if (aix >= 0)
+ ret = make_atom(aix);
+ else {
+ atom_write_lock();
+ ret = make_atom(index_put(&erts_atom_table, (void*) &a));
+ atom_write_unlock();
+ }
+ return ret;
+}
+
+
+int atom_table_size(void)
+{
+ int ret;
+#ifdef ERTS_SMP
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ atom_read_lock();
+#endif
+ ret = erts_atom_table.entries;
+#ifdef ERTS_SMP
+ if (lock)
+ atom_read_unlock();
+#endif
+ return ret;
+}
+
+int atom_table_sz(void)
+{
+ int ret;
+#ifdef ERTS_SMP
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ atom_read_lock();
+#endif
+ ret = index_table_sz(&erts_atom_table);
+#ifdef ERTS_SMP
+ if (lock)
+ atom_read_unlock();
+#endif
+ return ret;
+}
+
+int
+erts_atom_get(const char *name, int len, Eterm* ap)
+{
+ Atom a;
+ int i;
+ int res;
+
+ a.len = len;
+ a.name = (byte *)name;
+ atom_read_lock();
+ i = index_get(&erts_atom_table, (void*) &a);
+ res = i < 0 ? 0 : (*ap = make_atom(i), 1);
+ atom_read_unlock();
+ return res;
+}
+
+void
+erts_atom_get_text_space_sizes(Uint *reserved, Uint *used)
+{
+#ifdef ERTS_SMP
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ atom_read_lock();
+#endif
+ if (reserved)
+ *reserved = reserved_atom_space;
+ if (used)
+ *used = atom_space;
+#ifdef ERTS_SMP
+ if (lock)
+ atom_read_unlock();
+#endif
+}
+
+void
+init_atom_table(void)
+{
+ HashFunctions f;
+ int i;
+ Atom a;
+
+#ifdef ERTS_ATOM_PUT_OPS_STAT
+ erts_smp_atomic_init(&atom_put_ops, 0);
+#endif
+
+ atom_init_lock();
+ f.hash = (H_FUN) atom_hash;
+ f.cmp = (HCMP_FUN) atom_cmp;
+ f.alloc = (HALLOC_FUN) atom_alloc;
+ f.free = (HFREE_FUN) atom_free;
+
+ atom_text_pos = NULL;
+ atom_text_end = NULL;
+ reserved_atom_space = 0;
+ atom_space = 0;
+ text_list = NULL;
+
+ erts_index_init(ERTS_ALC_T_ATOM_TABLE, &erts_atom_table,
+ "atom_tab", ATOM_SIZE, ATOM_LIMIT, f);
+ more_atom_space();
+
+ /* Ordinary atoms */
+ for (i = 0; erl_atom_names[i] != 0; i++) {
+ int ix;
+ a.len = strlen(erl_atom_names[i]);
+ a.name = (byte*)erl_atom_names[i];
+ a.slot.index = i;
+ ix = index_put(&erts_atom_table, (void*) &a);
+ atom_text_pos -= a.len;
+ atom_space -= a.len;
+ atom_tab(ix)->name = (byte*)erl_atom_names[i];
+ }
+}
+
+void
+dump_atoms(int to, void *to_arg)
+{
+ int i = erts_atom_table.entries;
+
+ /*
+ * Print out the atom table starting from the end.
+ */
+ while (--i >= 0) {
+ if (erts_index_lookup(&erts_atom_table, i)) {
+ erts_print(to, to_arg, "%T\n", make_atom(i));
+ }
+ }
+}
diff --git a/erts/emulator/beam/atom.h b/erts/emulator/beam/atom.h
new file mode 100644
index 0000000000..e7e0dc440d
--- /dev/null
+++ b/erts/emulator/beam/atom.h
@@ -0,0 +1,104 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ATOM_H__
+#define __ATOM_H__
+
+#ifndef __INDEX_H__
+#include "index.h"
+#endif
+
+#include "erl_atom_table.h"
+
+#define MAX_ATOM_LENGTH 255
+#define ATOM_LIMIT (1024*1024)
+
+/*
+ * Atom entry.
+ */
+typedef struct atom {
+ IndexSlot slot; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
+ int len; /* length of atom name */
+ int ord0; /* ordinal value of first 3 bytes + 7 bits */
+ byte* name; /* name of atom */
+} Atom;
+
+extern IndexTable erts_atom_table;
+
+ERTS_GLB_INLINE Atom* atom_tab(Uint i);
+ERTS_GLB_INLINE int erts_is_atom_bytes(byte *text, size_t len, Eterm term);
+ERTS_GLB_INLINE int erts_is_atom_str(char *str, Eterm term);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Atom*
+atom_tab(Uint i)
+{
+ return (Atom *) erts_index_lookup(&erts_atom_table, i);
+}
+
+ERTS_GLB_INLINE int erts_is_atom_bytes(byte *text, size_t len, Eterm term)
+{
+ Atom *a;
+ if (!is_atom(term))
+ return 0;
+ a = atom_tab(atom_val(term));
+ return (len == (size_t) a->len
+ && sys_memcmp((void *) a->name, (void *) text, len) == 0);
+}
+
+ERTS_GLB_INLINE int erts_is_atom_str(char *str, Eterm term)
+{
+ Atom *a;
+ int i, len;
+ char *aname;
+ if (!is_atom(term))
+ return 0;
+ a = atom_tab(atom_val(term));
+ len = a->len;
+ aname = (char *) a->name;
+ for (i = 0; i < len; i++)
+ if (aname[i] != str[i] || str[i] == '\0')
+ return 0;
+ return str[len] == '\0';
+}
+
+#endif
+
+/*
+ * Note, ERTS_IS_ATOM_STR() expects the first argument to be a
+ * string literal.
+ */
+#define ERTS_IS_ATOM_STR(LSTR, TERM) \
+ (erts_is_atom_bytes((byte *) LSTR, sizeof(LSTR) - 1, (TERM)))
+#define ERTS_DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+#define ERTS_INIT_AM(S) AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+
+int atom_table_size(void); /* number of elements */
+int atom_table_sz(void); /* table size in bytes, excluding stored objects */
+
+Eterm am_atom_put(const char*, int); /* most callers pass plain char*'s */
+int atom_erase(byte*, int);
+int atom_static_put(byte*, int);
+void init_atom_table(void);
+void atom_info(int, void *);
+void dump_atoms(int, void *);
+int erts_atom_get(const char* name, int len, Eterm* ap);
+void erts_atom_get_text_space_sizes(Uint *reserved, Uint *used);
+#endif
+
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
new file mode 100644
index 0000000000..04eac2d807
--- /dev/null
+++ b/erts/emulator/beam/atom.names
@@ -0,0 +1,540 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 1996-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# File format:
+#
+# Lines starting with '#' are ignored.
+#
+# <atom-decl> ::= "atom" <atom>+
+# <atom> ::= <atom-name> |
+# "'" <printname> "'" |
+# <C-name> "=" "'" <printname> "'"
+# <atom-name> ::= [a-z][a-zA-Z_0-9]*
+# <C-name> ::= [A-Z][a-zA-Z_0-9]*
+# <printname> ::= .*
+#
+# (although some characters may poison the Perl parser)
+#
+
+#
+# Frequently used atoms.
+#
+atom false true
+atom Underscore='_'
+atom Noname='nonode@nohost'
+atom EOT='$end_of_table'
+atom Cookie=''
+
+#
+# Used in the Beam emulator loop. (Smaller literals usually means tighter code.)
+#
+atom fun infinity timeout normal call return
+atom throw error exit
+atom undefined
+
+#
+# Used in beam_emu.c.
+#
+atom nocatch
+atom undefined_function
+atom undefined_lambda
+
+
+# All other atoms. Try to keep the order alphabetic.
+#
+atom DOWN='DOWN'
+atom UP='UP'
+atom EXIT='EXIT'
+atom aborted
+atom abs_path
+atom absoluteURI
+atom active
+atom all
+atom all_but_first
+atom allocated
+atom allocated_areas
+atom allocator
+atom allocator_sizes
+atom alloc_util_allocators
+atom allow_passive_connect
+atom already_loaded
+atom anchored
+atom and
+atom andalso
+atom andthen
+atom any
+atom anycrlf
+atom apply
+atom args
+atom arg0
+atom arity
+atom asn1
+atom asynchronous
+atom atom
+atom atom_used
+atom attributes
+atom await_proc_exit
+atom awaiting_load
+atom awaiting_unload
+atom backtrace backtrace_depth
+atom badarg badarith badarity badfile badmatch badsig badfun
+atom bag
+atom band
+atom big
+atom bif_return_trap
+atom binary
+atom block
+atom blocked
+atom bnot
+atom bor
+atom bxor
+atom break_ignored
+atom breakpoint
+atom bsl
+atom bsr
+atom bsr_anycrlf
+atom bsr_unicode
+atom busy_dist_port
+atom busy_port
+atom call
+atom call_count
+atom caller
+atom capture
+atom case_clause
+atom caseless
+atom catchlevel
+atom cd
+atom cdr
+atom characters_to_binary_int
+atom characters_to_list_int
+atom clear
+atom close
+atom closed
+atom code
+atom command
+atom compat_rel
+atom compile
+atom compressed
+atom connect
+atom connected
+atom connection_closed
+atom cons
+atom const
+atom context_switches
+atom copy
+atom cpu
+atom cpu_timestamp
+atom cr
+atom crlf
+atom creation
+atom current_function
+atom data
+atom debug_flags
+atom delay_trap
+atom dexit
+atom depth
+atom dgroup_leader
+atom dictionary
+atom disable_trace
+atom disabled
+atom display_items
+atom dist
+atom Div='/'
+atom div
+atom dlink
+atom dmonitor_node
+atom dmonitor_p
+atom DollarDollar='$$'
+atom DollarUnderscore='$_'
+atom dollar_endonly
+atom dotall
+atom driver
+atom driver_options
+atom dsend
+atom dunlink
+atom duplicate_bag
+atom dupnames
+atom elib_malloc
+atom emulator
+atom enable_trace
+atom enabled
+atom endian
+atom env
+atom eof
+atom eol
+atom exception_from
+atom exception_trace
+atom extended
+atom Eq='=:='
+atom Eqeq='=='
+atom erlang
+atom ERROR='ERROR'
+atom error_handler
+atom error_logger
+atom ets
+atom ETS_TRANSFER='ETS-TRANSFER'
+atom event
+atom exact_reductions
+atom exclusive
+atom exit_status
+atom existing
+atom exiting
+atom exports
+atom external
+atom false
+atom fcgi
+atom fd
+atom first
+atom firstline
+atom flags
+atom flush
+atom flush_monitor_message
+atom force
+atom format_cpu_topology
+atom free
+atom fullsweep_after
+atom fullsweep_if_old_binaries
+atom fun
+atom function
+atom functions
+atom function_clause
+atom garbage_collecting
+atom garbage_collection
+atom gc_end
+atom gc_start
+atom Ge='>='
+atom generational
+atom get_seq_token
+atom get_tcw
+atom getenv
+atom getting_linked
+atom getting_unlinked
+atom global
+atom global_heaps_size
+atom Gt='>'
+atom grun
+atom group_leader
+atom heap_block_size
+atom heap_size
+atom heap_sizes
+atom heap_type
+atom heir
+atom hidden
+atom hide
+atom high
+atom hipe_architecture
+atom http httph https http_response http_request http_header http_eoh http_error http_bin httph_bin
+atom hybrid
+atom id
+atom if_clause
+atom imports
+atom in
+atom in_exiting
+atom inactive
+atom incomplete
+atom inconsistent
+atom index
+atom infinity
+atom info
+atom info_msg
+atom initial_call
+atom input
+atom internal_error
+atom internal_status
+atom instruction_counts
+atom invalid
+atom is_constant
+atom is_seq_trace
+atom io
+atom keypos
+atom kill
+atom killed
+atom kill_ports
+atom known
+atom label
+atom large_heap
+atom last_calls
+atom latin1
+atom Le='=<'
+atom lf
+atom line
+atom line_length
+atom linked_in_driver
+atom links
+atom list
+atom little
+atom loaded
+atom load_cancelled
+atom load_failure
+atom local
+atom long_gc
+atom low
+atom Lt='<'
+atom machine
+atom match
+atom match_spec
+atom max
+atom maximum
+atom max_tables max_processes
+atom mbuf_size
+atom memory
+atom memory_types
+atom message
+atom message_binary
+atom message_queue_len
+atom messages
+atom meta
+atom meta_match_spec
+atom min_heap_size
+atom minor_version
+atom Minus='-'
+atom module
+atom module_info
+atom monitored_by
+atom monitor
+atom monitor_nodes
+atom monitors
+atom more
+atom multi_scheduling
+atom multiline
+atom name
+atom named_table
+atom native_addresses
+atom Neq='=/='
+atom Neqeq='/='
+atom net_kernel
+atom net_kernel_terminated
+atom new
+atom new_index
+atom new_uniq
+atom newline
+atom next
+atom no
+atom nomatch
+atom none
+atom no_auto_capture
+atom noconnect
+atom noconnection
+atom nocookie
+atom node
+atom node_type
+atom nodedown
+atom nodedown_reason
+atom nodeup
+atom noeol
+atom nofile
+atom noproc
+atom normal
+atom nosuspend
+atom no_float
+atom no_integer
+atom no_network
+atom not
+atom not_a_list
+atom not_loaded
+atom not_loaded_by_this_process
+atom not_pending
+atom not_purged
+atom notalive
+atom notbol
+atom noteol
+atom notempty
+atom notify
+atom notsup
+atom nouse_stdio
+atom objects
+atom offset
+atom ok
+atom old_heap_block_size
+atom old_heap_size
+atom on_load
+atom open
+atom open_error
+atom or
+atom ordered_set
+atom orelse
+atom os_type
+atom os_version
+atom ose_bg_proc
+atom ose_int_proc
+atom ose_phantom
+atom ose_pri_proc
+atom ose_process_prio
+atom ose_process_type
+atom ose_ti_proc
+atom out
+atom out_exited
+atom out_exiting
+atom output
+atom overlapped_io
+atom owner
+atom packet
+atom packet_size
+atom Plus='+'
+atom pause
+atom pending
+atom pending_driver
+atom pending_process
+atom pending_reload
+atom permanent
+atom pid
+atom port
+atom ports
+atom port_count
+atom print
+atom priority
+atom private
+atom process
+atom processes
+atom processes_trap
+atom processes_used
+atom process_count
+atom process_display
+atom process_limit
+atom process_dump
+atom procs
+atom profile
+atom protected
+atom protection
+atom public
+atom purify
+atom quantify
+atom queue_size
+atom raw
+atom re
+atom re_pattern
+atom re_run_trap
+atom ready_input
+atom ready_output
+atom ready_async
+atom reason
+atom receive
+atom recent_size
+atom reductions
+atom refc
+atom register
+atom registered_name
+atom reload
+atom rem
+atom reset
+atom restart
+atom return_from
+atom return_to
+atom return_trace
+atom run_queue
+atom runnable
+atom runnable_ports
+atom runnable_procs
+atom running
+atom running_ports
+atom running_procs
+atom runtime
+atom save_calls
+atom scheduler
+atom scheduler_id
+atom schedulers_online
+atom scheme
+atom sensitive
+atom sequential_tracer
+atom sequential_trace_token
+atom serial
+atom set
+atom set_cpu_topology
+atom set_on_first_link
+atom set_on_first_spawn
+atom set_on_link
+atom set_on_spawn
+atom set_seq_token
+atom set_tcw
+atom set_tcw_fake
+atom separate
+atom shared
+atom silent
+atom size
+atom sl_alloc
+atom spawn_executable
+atom spawn_driver
+atom ssl_tls
+atom stack_size
+atom start
+atom status
+atom static
+atom stderr_to_stdout
+atom stop
+atom stream
+atom sunrm
+atom suspend
+atom suspended
+atom suspending
+atom sys_misc
+atom system
+atom system_error
+atom system_limit
+atom system_version
+atom system_architecture
+atom SYSTEM='SYSTEM'
+atom table
+atom this
+atom thread_pool_size
+atom threads
+atom timeout
+atom timeout_value
+atom Times='*'
+atom timestamp
+atom total
+atom total_heap_size
+atom tpkt
+atom trace trace_ts traced
+atom trace_control_word
+atom tracer
+atom trap_exit
+atom try_clause
+atom true
+atom tuple
+atom type
+atom ucompile
+atom undef
+atom ungreedy
+atom unicode
+atom unregister
+atom urun
+atom use_stdio
+atom used
+atom utf8
+atom unblock
+atom uniq
+atom unless_suspending
+atom unloaded
+atom unloading
+atom unloaded_only
+atom unload_cancelled
+atom value
+atom values
+atom version
+atom visible
+atom waiting
+atom wall_clock
+atom warning
+atom warning_msg
+atom wordsize
+atom write_concurrency
+atom xor
+atom yes
+atom yield
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
new file mode 100644
index 0000000000..d3a1ed4e7d
--- /dev/null
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -0,0 +1,795 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "beam_load.h"
+#include "big.h"
+#include "beam_bp.h"
+#include "beam_catches.h"
+#include "erl_binary.h"
+#include "erl_nif.h"
+
+static void set_default_trace_pattern(Eterm module);
+static Eterm check_process_code(Process* rp, Module* modp);
+static void delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp);
+static void delete_export_references(Eterm module);
+static int purge_module(int module);
+static int is_native(Eterm* code);
+static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
+static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
+static void remove_from_address_table(Eterm* code);
+
+Eterm
+load_module_2(BIF_ALIST_2)
+{
+ Eterm reason;
+ Eterm* hp;
+ int i;
+ int sz;
+ byte* code;
+ Eterm res;
+ byte* temp_alloc = NULL;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ error:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) {
+ goto error;
+ }
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ erts_export_consolidate();
+
+ hp = HAlloc(BIF_P, 3);
+ sz = binary_size(BIF_ARG_2);
+ if ((i = erts_load_module(BIF_P, 0,
+ BIF_P->group_leader, &BIF_ARG_1, code, sz)) < 0) {
+ switch (i) {
+ case -1: reason = am_badfile; break;
+ case -2: reason = am_nofile; break;
+ case -3: reason = am_not_purged; break;
+ case -4:
+ reason = am_atom_put("native_code", sizeof("native_code")-1);
+ break;
+ case -5:
+ {
+ /*
+ * The module contains an on_load function. The loader
+ * has loaded the module as usual, except that the
+ * export entries does not point into the module, so it
+ * is not possible to call any code in the module.
+ */
+
+ ERTS_DECL_AM(on_load);
+ reason = AM_on_load;
+ break;
+ }
+ default: reason = am_badfile; break;
+ }
+ res = TUPLE2(hp, am_error, reason);
+ goto done;
+ }
+
+ set_default_trace_pattern(BIF_ARG_1);
+ res = TUPLE2(hp, am_module, BIF_ARG_1);
+
+ done:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ BIF_RET(res);
+}
+
+BIF_RETTYPE purge_module_1(BIF_ALIST_1)
+{
+ int purge_res;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ erts_export_consolidate();
+ purge_res = purge_module(atom_val(BIF_ARG_1));
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ if (purge_res < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(am_true);
+}
+
+BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
+{
+ Module* modp;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if ((modp = erts_get_module(BIF_ARG_1)) == NULL) {
+ return am_undefined;
+ }
+ return (is_native(modp->code) ||
+ (modp->old_code != 0 && is_native(modp->old_code))) ?
+ am_true : am_false;
+}
+
+BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
+{
+ Eterm res;
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ erts_export_consolidate();
+ res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ return res;
+}
+
+Eterm
+check_process_code_2(BIF_ALIST_2)
+{
+ Process* rp;
+ Module* modp;
+
+ if (is_not_atom(BIF_ARG_2)) {
+ goto error;
+ }
+ if (is_internal_pid(BIF_ARG_1)) {
+ Eterm res;
+ if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
+ goto error;
+ rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
+ if (!rp) {
+ BIF_RET(am_false);
+ }
+ if (rp == ERTS_PROC_LOCK_BUSY) {
+ ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P,
+ BIF_ARG_1, BIF_ARG_2);
+ }
+ modp = erts_get_module(BIF_ARG_2);
+ res = check_process_code(rp, modp);
+#ifdef ERTS_SMP
+ if (BIF_P != rp)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+#endif
+ BIF_RET(res);
+ }
+ else if (is_external_pid(BIF_ARG_1)
+ && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
+ BIF_RET(am_false);
+ }
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
+BIF_RETTYPE delete_module_1(BIF_ALIST_1)
+{
+ int res;
+
+ if (is_not_atom(BIF_ARG_1))
+ goto badarg;
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ {
+ Module *modp = erts_get_module(BIF_ARG_1);
+ if (!modp) {
+ res = am_undefined;
+ }
+ else if (modp->old_code != 0) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Module %T must be purged before loading\n",
+ BIF_ARG_1);
+ erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
+ res = am_badarg;
+ }
+ else {
+ delete_export_references(BIF_ARG_1);
+ delete_code(BIF_P, 0, modp);
+ res = am_true;
+ }
+ }
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ if (res == am_badarg) {
+ badarg:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(res);
+}
+
+BIF_RETTYPE module_loaded_1(BIF_ALIST_1)
+{
+ Module* modp;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if ((modp = erts_get_module(BIF_ARG_1)) == NULL ||
+ modp->code == NULL ||
+ modp->code[MI_ON_LOAD_FUNCTION_PTR] != 0) {
+ BIF_RET(am_false);
+ }
+ BIF_RET(am_true);
+}
+
+BIF_RETTYPE pre_loaded_0(BIF_ALIST_0)
+{
+ return erts_preloaded(BIF_P);
+}
+
+BIF_RETTYPE loaded_0(BIF_ALIST_0)
+{
+ Eterm previous = NIL;
+ Eterm* hp;
+ int i;
+ int j = 0;
+
+ for (i = 0; i < module_code_size(); i++) {
+ if (module_code(i) != NULL &&
+ ((module_code(i)->code_length != 0) ||
+ (module_code(i)->old_code_length != 0))) {
+ j++;
+ }
+ }
+ if (j > 0) {
+ hp = HAlloc(BIF_P, j*2);
+
+ for (i = 0; i < module_code_size(); i++) {
+ if (module_code(i) != NULL &&
+ ((module_code(i)->code_length != 0) ||
+ (module_code(i)->old_code_length != 0))) {
+ previous = CONS(hp, make_atom(module_code(i)->module),
+ previous);
+ hp += 2;
+ }
+ }
+ }
+ BIF_RET(previous);
+}
+
+BIF_RETTYPE call_on_load_function_1(BIF_ALIST_1)
+{
+ Module* modp = erts_get_module(BIF_ARG_1);
+ Eterm on_load;
+
+ if (!modp || modp->code == 0) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
+ goto error;
+ }
+ BIF_TRAP_CODE_PTR_0(BIF_P, on_load);
+}
+
+BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
+{
+ Module* modp = erts_get_module(BIF_ARG_1);
+ Eterm on_load;
+
+ if (!modp || modp->code == 0) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
+ goto error;
+ }
+ if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
+ goto error;
+ }
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ if (BIF_ARG_2 == am_true) {
+ int i;
+
+ /*
+ * The on_load function succeded. Fix up export entries.
+ */
+ for (i = 0; i < export_list_size(); i++) {
+ Export *ep = export_list(i);
+ if (ep != NULL &&
+ ep->code[0] == BIF_ARG_1 &&
+ ep->code[4] != 0) {
+ ep->address = (void *) ep->code[4];
+ ep->code[3] = 0;
+ ep->code[4] = 0;
+ }
+ }
+ modp->code[MI_ON_LOAD_FUNCTION_PTR] = 0;
+ set_default_trace_pattern(BIF_ARG_1);
+ } else if (BIF_ARG_2 == am_false) {
+ Eterm* code;
+ Eterm* end;
+
+ /*
+ * The on_load function failed. Remove the loaded code.
+ * This is an combination of delete and purge. We purge
+ * the current code; the old code is not touched.
+ */
+ erts_total_code_size -= modp->code_length;
+ code = modp->code;
+ end = (Eterm *)((char *)code + modp->code_length);
+ erts_cleanup_funs_on_purge(code, end);
+ beam_catches_delmod(modp->catches, code, modp->code_length);
+ erts_free(ERTS_ALC_T_CODE, (void *) code);
+ modp->code = NULL;
+ modp->code_length = 0;
+ modp->catches = BEAM_CATCHES_NIL;
+ remove_from_address_table(code);
+ }
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(am_true);
+}
+
+
+static void
+set_default_trace_pattern(Eterm module)
+{
+ int trace_pattern_is_on;
+ Binary *match_spec;
+ Binary *meta_match_spec;
+ struct trace_pattern_flags trace_pattern_flags;
+ Eterm meta_tracer_pid;
+
+ erts_get_default_trace_pattern(&trace_pattern_is_on,
+ &match_spec,
+ &meta_match_spec,
+ &trace_pattern_flags,
+ &meta_tracer_pid);
+ if (trace_pattern_is_on) {
+ Eterm mfa[1];
+ mfa[0] = module;
+ (void) erts_set_trace_pattern(mfa, 1,
+ match_spec,
+ meta_match_spec,
+ 1, trace_pattern_flags,
+ meta_tracer_pid);
+ }
+}
+
+static Eterm
+check_process_code(Process* rp, Module* modp)
+{
+ Eterm* start;
+ char* mod_start;
+ Uint mod_size;
+ Eterm* end;
+ Eterm* sp;
+#ifndef HYBRID /* FIND ME! */
+ ErlFunThing* funp;
+ int done_gc = 0;
+#endif
+
+#define INSIDE(a) (start <= (a) && (a) < end)
+ if (modp == NULL) { /* Doesn't exist. */
+ return am_false;
+ } else if (modp->old_code == NULL) { /* No old code. */
+ return am_false;
+ }
+
+ /*
+ * Pick up limits for the module.
+ */
+ start = modp->old_code;
+ end = (Eterm *)((char *)start + modp->old_code_length);
+ mod_start = (char *) start;
+ mod_size = modp->old_code_length;
+
+ /*
+ * Check if current instruction or continuation pointer points into module.
+ */
+ if (INSIDE(rp->i) || INSIDE(rp->cp)) {
+ return am_true;
+ }
+
+ /*
+ * Check all continuation pointers stored on the stack.
+ */
+ for (sp = rp->stop; sp < STACK_START(rp); sp++) {
+ if (is_CP(*sp) && INSIDE(cp_val(*sp))) {
+ return am_true;
+ }
+ }
+
+ /*
+ * Check all continuation pointers stored in stackdump
+ * and clear exception stackdump if there is a pointer
+ * to the module.
+ */
+ if (rp->ftrace != NIL) {
+ struct StackTrace *s;
+ ASSERT(is_list(rp->ftrace));
+ s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace)));
+ if ((s->pc && INSIDE(s->pc)) ||
+ (s->current && INSIDE(s->current))) {
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ } else {
+ int i;
+ for (i = 0; i < s->depth; i++) {
+ if (INSIDE(s->trace[i])) {
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ * See if there are funs that refer to the old version of the module.
+ */
+
+#ifndef HYBRID /* FIND ME! */
+ rescan:
+ for (funp = MSO(rp).funs; funp; funp = funp->next) {
+ Eterm* fun_code;
+
+ fun_code = funp->fe->address;
+
+ if (INSIDE((Eterm *) funp->fe->address)) {
+ if (done_gc) {
+ return am_true;
+ } else {
+ /*
+ * Try to get rid of this fun by garbage collecting.
+ * Clear both fvalue and ftrace to make sure they
+ * don't hold any funs.
+ */
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ done_gc = 1;
+ FLAGS(rp) |= F_NEED_FULLSWEEP;
+ (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+ goto rescan;
+ }
+ }
+ }
+#endif
+
+ /*
+ * See if there are constants inside the module referenced by the process.
+ */
+ done_gc = 0;
+ for (;;) {
+ ErlMessage* mp;
+
+ if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, mod_start, mod_size)) {
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ }
+ if (any_heap_ref_ptrs(rp->stop, rp->hend, mod_start, mod_size)) {
+ goto need_gc;
+ }
+ if (any_heap_refs(rp->heap, rp->htop, mod_start, mod_size)) {
+ goto need_gc;
+ }
+
+ if (any_heap_refs(rp->old_heap, rp->old_htop, mod_start, mod_size)) {
+ goto need_gc;
+ }
+
+ if (rp->dictionary != NULL) {
+ Eterm* start = rp->dictionary->data;
+ Eterm* end = start + rp->dictionary->used;
+
+ if (any_heap_ref_ptrs(start, end, mod_start, mod_size)) {
+ goto need_gc;
+ }
+ }
+
+ for (mp = rp->msg.first; mp != NULL; mp = mp->next) {
+ if (any_heap_ref_ptrs(mp->m, mp->m+2, mod_start, mod_size)) {
+ goto need_gc;
+ }
+ }
+ break;
+
+ need_gc:
+ if (done_gc) {
+ return am_true;
+ } else {
+ Eterm* literals;
+ Uint lit_size;
+
+ /*
+ * Try to get rid of constants by by garbage collecting.
+ * Clear both fvalue and ftrace.
+ */
+ rp->freason = EXC_NULL;
+ rp->fvalue = NIL;
+ rp->ftrace = NIL;
+ done_gc = 1;
+ FLAGS(rp) |= F_NEED_FULLSWEEP;
+ (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+ literals = (Eterm *) modp->old_code[MI_LITERALS_START];
+ lit_size = (Eterm *) modp->old_code[MI_LITERALS_END] - literals;
+ erts_garbage_collect_literals(rp, literals, lit_size);
+ }
+ }
+ return am_false;
+#undef INSIDE
+}
+
+#define in_area(ptr,start,nbytes) \
+ ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+
+static int
+any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
+{
+ Eterm* p;
+ Eterm val;
+
+ for (p = start; p < end; p++) {
+ val = *p;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ if (in_area(val, mod_start, mod_size)) {
+ return 1;
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+static int
+any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
+{
+ Eterm* p;
+ Eterm val;
+
+ for (p = start; p < end; p++) {
+ val = *p;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ if (in_area(val, mod_start, mod_size)) {
+ return 1;
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (!header_is_transparent(val)) {
+ Eterm* new_p = p + thing_arityval(val);
+ ASSERT(start <= new_p && new_p < end);
+ p = new_p;
+ }
+ }
+ }
+ return 0;
+}
+
+#undef in_area
+
+
+static int
+purge_module(int module)
+{
+ Eterm* code;
+ Eterm* end;
+ Module* modp;
+
+ /*
+ * Correct module?
+ */
+
+ if ((modp = erts_get_module(make_atom(module))) == NULL) {
+ return -2;
+ }
+
+ /*
+ * Any code to purge?
+ */
+ if (modp->old_code == 0) {
+ if (display_loads) {
+ erts_printf("No code to purge for %T\n", make_atom(module));
+ }
+ return -1;
+ }
+
+ /*
+ * Unload any NIF library
+ */
+ if (modp->old_nif.handle != NULL) {
+ if (modp->old_nif.entry->unload != NULL) {
+ ErlNifEnv env;
+ env.nif_data = modp->old_nif.data;
+ env.proc = NULL; /* BUGBUG: unlink can not access calling process */
+ env.hp = NULL;
+ env.hp_end = NULL;
+ env.heap_frag_sz = 0;
+ env.fpe_was_unmasked = erts_block_fpe();
+ modp->old_nif.entry->unload(NULL, modp->old_nif.data);
+ erts_unblock_fpe(env.fpe_was_unmasked);
+ }
+ erts_sys_ddll_close(modp->old_nif.handle);
+ modp->old_nif.handle = NULL;
+ modp->old_nif.entry = NULL;
+ }
+
+ /*
+ * Remove the old code.
+ */
+ ASSERT(erts_total_code_size >= modp->old_code_length);
+ erts_total_code_size -= modp->old_code_length;
+ code = modp->old_code;
+ end = (Eterm *)((char *)code + modp->old_code_length);
+ erts_cleanup_funs_on_purge(code, end);
+ beam_catches_delmod(modp->old_catches, code, modp->old_code_length);
+ erts_free(ERTS_ALC_T_CODE, (void *) code);
+ modp->old_code = NULL;
+ modp->old_code_length = 0;
+ modp->old_catches = BEAM_CATCHES_NIL;
+ remove_from_address_table(code);
+ return 0;
+}
+
+static void
+remove_from_address_table(Eterm* code)
+{
+ int i;
+
+ for (i = 0; i < num_loaded_modules; i++) {
+ if (modules[i].start == code) {
+ num_loaded_modules--;
+ while (i < num_loaded_modules) {
+ modules[i] = modules[i+1];
+ i++;
+ }
+ mid_module = &modules[num_loaded_modules/2];
+ return;
+ }
+ }
+ ASSERT(0); /* Not found? */
+}
+
+
+/*
+ * Move code from current to old.
+ */
+
+static void
+delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp)
+{
+#ifdef ERTS_ENABLE_LOCK_CHECK
+#ifdef ERTS_SMP
+ if (c_p && c_p_locks)
+ erts_proc_lc_chk_only_proc_main(c_p);
+ else
+#endif
+ erts_lc_check_exact(NULL, 0);
+#endif
+
+ /*
+ * Clear breakpoints if any
+ */
+ if (modp->code != NULL && modp->code[MI_NUM_BREAKPOINTS] > 0) {
+ if (c_p && c_p_locks)
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ erts_clear_module_break(modp);
+ modp->code[MI_NUM_BREAKPOINTS] = 0;
+ erts_smp_release_system();
+ if (c_p && c_p_locks)
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ modp->old_code = modp->code;
+ modp->old_code_length = modp->code_length;
+ modp->old_catches = modp->catches;
+ modp->old_nif = modp->nif;
+ modp->code = NULL;
+ modp->code_length = 0;
+ modp->catches = BEAM_CATCHES_NIL;
+ modp->nif.handle = NULL;
+ modp->nif.entry = NULL;
+}
+
+
+/* null all references on the export table for the module called with the
+ atom index below */
+
+static void
+delete_export_references(Eterm module)
+{
+ int i;
+
+ ASSERT(is_atom(module));
+
+ for (i = 0; i < export_list_size(); i++) {
+ Export *ep = export_list(i);
+ if (ep != NULL && (ep->code[0] == module)) {
+ if (ep->address == ep->code+3 &&
+ (ep->code[3] == (Eterm) em_apply_bif)) {
+ continue;
+ }
+ ep->address = ep->code+3;
+ ep->code[3] = (Uint) em_call_error_handler;
+ ep->code[4] = 0;
+ MatchSetUnref(ep->match_prog_set);
+ ep->match_prog_set = NULL;
+ }
+ }
+}
+
+
+int
+beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
+{
+ Module* modp = erts_put_module(module);
+
+ /*
+ * Check if the previous code has been already deleted;
+ * if not, delete old code; error if old code already exists.
+ */
+
+ if (modp->code != NULL && modp->old_code != NULL) {
+ return -3;
+ } else if (modp->old_code == NULL) { /* Make the current version old. */
+ if (display_loads) {
+ erts_printf("saving old code\n");
+ }
+ delete_code(c_p, c_p_locks, modp);
+ delete_export_references(module);
+ }
+ return 0;
+}
+
+static int
+is_native(Eterm* code)
+{
+ return ((Eterm *)code[MI_FUNCTIONS])[1] != 0;
+}
+
+
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
new file mode 100644
index 0000000000..1abf1dc10c
--- /dev/null
+++ b/erts/emulator/beam/beam_bp.c
@@ -0,0 +1,785 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "beam_load.h"
+#include "bif.h"
+#include "error.h"
+#include "erl_binary.h"
+#include "beam_bp.h"
+
+/* *************************************************************************
+** Macros
+*/
+
+/*
+** Memory allocation macros
+*/
+/* Breakpoint data */
+#define Alloc(SZ) erts_alloc(ERTS_ALC_T_BPD, (SZ))
+#define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ))
+#define Free(P) erts_free(ERTS_ALC_T_BPD, (P))
+
+/*
+** Doubly linked ring macros
+*/
+
+#define BpInit(a,i) \
+do { \
+ (a)->orig_instr = (i); \
+ (a)->next = (a); \
+ (a)->prev = (a); \
+} while (0)
+
+#define BpSpliceNext(a,b) \
+do { \
+ register BpData *c = (a), *d = (b), *e; \
+ e = c->next->prev; \
+ c->next->prev = d->next->prev; \
+ d->next->prev = e; \
+ e = c->next; \
+ c->next = d->next; \
+ d->next = e; \
+} while (0)
+
+#define BpSplicePrev(a,b) \
+do { \
+ register BpData *c = (a), *d = (b), *e; \
+ e = c->prev->next; \
+ c->prev->next = d->prev->next; \
+ d->prev->next = e; \
+ e = c->prev; \
+ c->prev = d->prev; \
+ d->prev = e; \
+} while (0)
+
+#ifdef DEBUG
+# define BpSingleton(a) ((a)->next == (a) && (a)->prev == (a))
+#else
+# define BpSingleton(a) ((a)->next == (a))
+#endif
+
+#define BpInitAndSpliceNext(a,i,b) \
+do { \
+ (a)->orig_instr = (i); \
+ (a)->prev = (b); \
+ (b)->next->prev = (a); \
+ (a)->next = (b)->next; \
+ (b)->next = (a); \
+} while (0)
+
+#define BpInitAndSplicePrev(a,i,b) \
+do { \
+ (a)->orig_instr = (i); \
+ (a)->next = (b); \
+ (b)->prev->next = (a); \
+ (a)->prev = (b)->prev; \
+ (b)->prev = (a); \
+} while (0)
+
+/* *************************************************************************
+** Local prototypes
+*/
+
+/*
+** Helpers
+*/
+
+static int set_break(Eterm mfa[3], int specified,
+ Binary *match_spec, Uint break_op,
+ enum erts_break_op count_op, Eterm tracer_pid);
+static int set_module_break(Module *modp, Eterm mfa[3], int specified,
+ Binary *match_spec, Uint break_op,
+ enum erts_break_op count_op, Eterm tracer_pid);
+static int set_function_break(Module *modp, Uint *pc,
+ Binary *match_spec, Uint break_op,
+ enum erts_break_op count_op, Eterm tracer_pid);
+
+static int clear_break(Eterm mfa[3], int specified,
+ Uint break_op);
+static int clear_module_break(Module *modp, Eterm mfa[3], int specified,
+ Uint break_op);
+static int clear_function_break(Module *modp, Uint *pc,
+ Uint break_op);
+
+static BpData *is_break(Uint *pc, Uint break_op);
+
+
+
+/* *************************************************************************
+** External interfaces
+*/
+
+erts_smp_spinlock_t erts_bp_lock;
+
+void
+erts_bp_init(void) {
+ erts_smp_spinlock_init(&erts_bp_lock, "breakpoints");
+}
+
+int
+erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
+ Eterm tracer_pid) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return set_break(mfa, specified, match_spec,
+ (Uint) BeamOp(op_i_trace_breakpoint), 0, tracer_pid);
+}
+
+int
+erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
+ Eterm tracer_pid) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return set_break(mfa, specified, match_spec,
+ (Uint) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+}
+
+void
+erts_set_mtrace_bif(Uint *pc, Binary *match_spec, Eterm tracer_pid) {
+ BpDataTrace *bdt;
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+
+ bdt = (BpDataTrace *) pc[-4];
+ if (bdt) {
+ MatchSetUnref(bdt->match_spec);
+ MatchSetRef(match_spec);
+ bdt->match_spec = match_spec;
+ bdt->tracer_pid = tracer_pid;
+ } else {
+ bdt = Alloc(sizeof(BpDataTrace));
+ BpInit((BpData *) bdt, 0);
+ MatchSetRef(match_spec);
+ bdt->match_spec = match_spec;
+ bdt->tracer_pid = tracer_pid;
+ pc[-4] = (Uint) bdt;
+ }
+}
+
+int
+erts_set_debug_break(Eterm mfa[3], int specified) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return set_break(mfa, specified, NULL,
+ (Uint) BeamOp(op_i_debug_breakpoint), 0, NIL);
+}
+
+int
+erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return set_break(mfa, specified, NULL,
+ (Uint) BeamOp(op_i_count_breakpoint), count_op, NIL);
+}
+
+
+
+int
+erts_clear_trace_break(Eterm mfa[3], int specified) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return clear_break(mfa, specified,
+ (Uint) BeamOp(op_i_trace_breakpoint));
+}
+
+int
+erts_clear_mtrace_break(Eterm mfa[3], int specified) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return clear_break(mfa, specified,
+ (Uint) BeamOp(op_i_mtrace_breakpoint));
+}
+
+void
+erts_clear_mtrace_bif(Uint *pc) {
+ BpDataTrace *bdt;
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+
+ bdt = (BpDataTrace *) pc[-4];
+ if (bdt) {
+ if (bdt->match_spec) {
+ MatchSetUnref(bdt->match_spec);
+ }
+ Free(bdt);
+ }
+ pc[-4] = (Uint) NULL;
+}
+
+int
+erts_clear_debug_break(Eterm mfa[3], int specified) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return clear_break(mfa, specified,
+ (Uint) BeamOp(op_i_debug_breakpoint));
+}
+
+int
+erts_clear_count_break(Eterm mfa[3], int specified) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return clear_break(mfa, specified,
+ (Uint) BeamOp(op_i_count_breakpoint));
+}
+
+int
+erts_clear_break(Eterm mfa[3], int specified) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return clear_break(mfa, specified, 0);
+}
+
+int
+erts_clear_module_break(Module *modp) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ASSERT(modp);
+ return clear_module_break(modp, NULL, 0, 0);
+}
+
+int
+erts_clear_function_break(Module *modp, Uint *pc) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ ASSERT(modp);
+ return clear_function_break(modp, pc, 0);
+}
+
+
+
+/*
+ * SMP NOTE: Process p may have become exiting on return!
+ */
+Uint
+erts_trace_break(Process *p, Uint *pc, Eterm *args,
+ Uint32 *ret_flags, Eterm *tracer_pid) {
+ Eterm tpid1, tpid2;
+ BpDataTrace *bdt = (BpDataTrace *) pc[-4];
+
+ ASSERT(pc[-5] == (Uint) BeamOp(op_i_func_info_IaaI));
+ ASSERT(bdt);
+ bdt = (BpDataTrace *) bdt->next;
+ ASSERT(bdt);
+ ASSERT(ret_flags);
+ ASSERT(tracer_pid);
+
+ ErtsSmpBPLock(bdt);
+ tpid1 = tpid2 = bdt->tracer_pid;
+ ErtsSmpBPUnlock(bdt);
+
+ *ret_flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
+ 1, &tpid2);
+ *tracer_pid = tpid2;
+ if (tpid1 != tpid2) {
+ ErtsSmpBPLock(bdt);
+ bdt->tracer_pid = tpid2;
+ ErtsSmpBPUnlock(bdt);
+ }
+ pc[-4] = (Uint) bdt;
+ return bdt->orig_instr;
+}
+
+
+
+/*
+ * SMP NOTE: Process p may have become exiting on return!
+ */
+Uint32
+erts_bif_mtrace(Process *p, Uint *pc, Eterm *args, int local,
+ Eterm *tracer_pid) {
+ BpDataTrace *bdt = (BpDataTrace *) pc[-4];
+
+ ASSERT(tracer_pid);
+ if (bdt) {
+ Eterm tpid1, tpid2;
+ Uint32 flags;
+
+ ErtsSmpBPLock(bdt);
+ tpid1 = tpid2 = bdt->tracer_pid;
+ ErtsSmpBPUnlock(bdt);
+
+ flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
+ local, &tpid2);
+ *tracer_pid = tpid2;
+ if (tpid1 != tpid2) {
+ ErtsSmpBPLock(bdt);
+ bdt->tracer_pid = tpid2;
+ ErtsSmpBPUnlock(bdt);
+ }
+ return flags;
+ }
+ *tracer_pid = NIL;
+ return 0;
+}
+
+
+
+int
+erts_is_trace_break(Uint *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
+ BpDataTrace *bdt =
+ (BpDataTrace *) is_break(pc, (Uint) BeamOp(op_i_trace_breakpoint));
+
+ if (bdt) {
+ if (match_spec_ret) {
+ *match_spec_ret = bdt->match_spec;
+ }
+ if (tracer_pid_ret) {
+ ErtsSmpBPLock(bdt);
+ *tracer_pid_ret = bdt->tracer_pid;
+ ErtsSmpBPUnlock(bdt);
+ }
+ return !0;
+ }
+ return 0;
+}
+
+int
+erts_is_mtrace_break(Uint *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
+ BpDataTrace *bdt =
+ (BpDataTrace *) is_break(pc, (Uint) BeamOp(op_i_mtrace_breakpoint));
+
+ if (bdt) {
+ if (match_spec_ret) {
+ *match_spec_ret = bdt->match_spec;
+ }
+ if (tracer_pid_ret) {
+ ErtsSmpBPLock(bdt);
+ *tracer_pid_ret = bdt->tracer_pid;
+ ErtsSmpBPUnlock(bdt);
+ }
+ return !0;
+ }
+ return 0;
+}
+
+int
+erts_is_mtrace_bif(Uint *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
+ BpDataTrace *bdt = (BpDataTrace *) pc[-4];
+
+ if (bdt) {
+ if (match_spec_ret) {
+ *match_spec_ret = bdt->match_spec;
+ }
+ if (tracer_pid_ret) {
+ ErtsSmpBPLock(bdt);
+ *tracer_pid_ret = bdt->tracer_pid;
+ ErtsSmpBPUnlock(bdt);
+ }
+ return !0;
+ }
+ return 0;
+}
+
+int
+erts_is_native_break(Uint *pc) {
+#ifdef HIPE
+ ASSERT(pc[-5] == (Uint) BeamOp(op_i_func_info_IaaI));
+ return pc[0] == (Uint) BeamOp(op_hipe_trap_call)
+ || pc[0] == (Uint) BeamOp(op_hipe_trap_call_closure);
+#else
+ return 0;
+#endif
+}
+
+int
+erts_is_count_break(Uint *pc, Sint *count_ret) {
+ BpDataCount *bdc =
+ (BpDataCount *) is_break(pc, (Uint) BeamOp(op_i_count_breakpoint));
+
+ if (bdc) {
+ if (count_ret) {
+ ErtsSmpBPLock(bdc);
+ *count_ret = bdc->count;
+ ErtsSmpBPUnlock(bdc);
+ }
+ return !0;
+ }
+ return 0;
+}
+
+Uint *
+erts_find_local_func(Eterm mfa[3]) {
+ Module *modp;
+ Uint** code_base;
+ Uint* code_ptr;
+ Uint i,n;
+
+ if ((modp = erts_get_module(mfa[0])) == NULL)
+ return NULL;
+ if ((code_base = (Uint **) modp->code) == NULL)
+ return NULL;
+ n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ for (i = 0; i < n; ++i) {
+ code_ptr = code_base[MI_FUNCTIONS+i];
+ ASSERT(((Uint) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]);
+ ASSERT(mfa[0] == ((Eterm) code_ptr[2]));
+ if (mfa[1] == ((Eterm) code_ptr[3]) &&
+ ((Uint) mfa[2]) == code_ptr[4]) {
+ return code_ptr + 5;
+ }
+ }
+ return NULL;
+}
+
+
+
+/* *************************************************************************
+** Local helpers
+*/
+
+
+static int set_break(Eterm mfa[3], int specified,
+ Binary *match_spec, Eterm break_op,
+ enum erts_break_op count_op, Eterm tracer_pid)
+{
+ Module *modp;
+ int num_processed = 0;
+ if (!specified) {
+ /* Find and process all modules in the system... */
+ int current;
+ int last = module_code_size();
+ for (current = 0; current < last; current++) {
+ modp = module_code(current);
+ ASSERT(modp != NULL);
+ num_processed +=
+ set_module_break(modp, mfa, specified,
+ match_spec, break_op, count_op,
+ tracer_pid);
+ }
+ } else {
+ /* Process a single module */
+ if ((modp = erts_get_module(mfa[0])) != NULL) {
+ num_processed +=
+ set_module_break(modp, mfa, specified,
+ match_spec, break_op, count_op,
+ tracer_pid);
+ }
+ }
+ return num_processed;
+}
+
+static int set_module_break(Module *modp, Eterm mfa[3], int specified,
+ Binary *match_spec, Uint break_op,
+ enum erts_break_op count_op, Eterm tracer_pid) {
+ Uint** code_base;
+ Uint* code_ptr;
+ int num_processed = 0;
+ Uint i,n;
+
+ ASSERT(break_op);
+ ASSERT(modp);
+ code_base = (Uint **) modp->code;
+ if (code_base == NULL) {
+ return 0;
+ }
+ n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ for (i = 0; i < n; ++i) {
+ code_ptr = code_base[MI_FUNCTIONS+i];
+ ASSERT(code_ptr[0] == (Uint) BeamOp(op_i_func_info_IaaI));
+ if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
+ (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
+ Uint *pc = code_ptr+5;
+
+ num_processed +=
+ set_function_break(modp, pc, match_spec,
+ break_op, count_op, tracer_pid);
+ }
+ }
+ return num_processed;
+}
+
+static int set_function_break(Module *modp, Uint *pc,
+ Binary *match_spec, Uint break_op,
+ enum erts_break_op count_op, Eterm tracer_pid) {
+ BpData *bd, **r;
+ size_t size;
+ Uint **code_base = (Uint **)modp->code;
+
+ ASSERT(code_base);
+ ASSERT(code_base <= (Uint **)pc);
+ ASSERT((Uint **)pc < code_base + (modp->code_length/sizeof(Uint *)));
+ /*
+ * Currently no trace support for native code.
+ */
+ if (erts_is_native_break(pc)) {
+ return 0;
+ }
+ /* Do not allow two breakpoints of the same kind */
+ if ( (bd = is_break(pc, break_op))) {
+ if (break_op == (Uint) BeamOp(op_i_trace_breakpoint)
+ || break_op == (Uint) BeamOp(op_i_mtrace_breakpoint)) {
+ BpDataTrace *bdt = (BpDataTrace *) bd;
+ Binary *old_match_spec;
+
+ /* Update match spec and tracer */
+ MatchSetRef(match_spec);
+ ErtsSmpBPLock(bdt);
+ old_match_spec = bdt->match_spec;
+ bdt->match_spec = match_spec;
+ bdt->tracer_pid = tracer_pid;
+ ErtsSmpBPUnlock(bdt);
+ MatchSetUnref(old_match_spec);
+ } else {
+ ASSERT(! match_spec);
+ ASSERT(is_nil(tracer_pid));
+ if (break_op == (Uint) BeamOp(op_i_count_breakpoint)) {
+ BpDataCount *bdc = (BpDataCount *) bd;
+
+ ErtsSmpBPLock(bdc);
+ if (count_op == erts_break_stop) {
+ if (bdc->count >= 0) {
+ bdc->count = -bdc->count-1; /* Stop call counter */
+ }
+ } else {
+ bdc->count = 0; /* Reset call counter */
+ }
+ ErtsSmpBPUnlock(bdc);
+ } else {
+ ASSERT (! count_op);
+ }
+ }
+ return 1;
+ }
+ if (break_op == (Uint) BeamOp(op_i_trace_breakpoint) ||
+ break_op == (Uint) BeamOp(op_i_mtrace_breakpoint)) {
+ size = sizeof(BpDataTrace);
+ } else {
+ ASSERT(! match_spec);
+ ASSERT(is_nil(tracer_pid));
+ if (break_op == (Uint) BeamOp(op_i_count_breakpoint)) {
+ if (count_op == erts_break_reset
+ || count_op == erts_break_stop) {
+ /* Do not insert a new breakpoint */
+ return 1;
+ }
+ size = sizeof(BpDataCount);
+ } else {
+ ASSERT(! count_op);
+ ASSERT(break_op == (Uint) BeamOp(op_i_debug_breakpoint));
+ size = sizeof(BpDataDebug);
+ }
+ }
+ r = (BpData **) (pc-4);
+ if (! *r) {
+ ASSERT(*pc != (Uint) BeamOp(op_i_trace_breakpoint));
+ ASSERT(*pc != (Uint) BeamOp(op_i_mtrace_breakpoint));
+ ASSERT(*pc != (Uint) BeamOp(op_i_debug_breakpoint));
+ ASSERT(*pc != (Uint) BeamOp(op_i_count_breakpoint));
+ /* First breakpoint; create singleton ring */
+ bd = Alloc(size);
+ BpInit(bd, *pc);
+ *pc = break_op;
+ *r = bd;
+ } else {
+ ASSERT(*pc == (Uint) BeamOp(op_i_trace_breakpoint) ||
+ *pc == (Uint) BeamOp(op_i_mtrace_breakpoint) ||
+ *pc == (Uint) BeamOp(op_i_debug_breakpoint) ||
+ *pc == (Uint) BeamOp(op_i_count_breakpoint));
+ if (*pc == (Uint) BeamOp(op_i_debug_breakpoint)) {
+ /* Debug bp must be last, so if it is also first;
+ * it must be singleton. */
+ ASSERT(BpSingleton(*r));
+ /* Insert new bp first in the ring, i.e second to last. */
+ bd = Alloc(size);
+ BpInitAndSpliceNext(bd, *pc, *r);
+ *pc = break_op;
+ } else if ((*r)->prev->orig_instr
+ == (Uint) BeamOp(op_i_debug_breakpoint)) {
+ /* Debug bp last in the ring; insert new second to last. */
+ bd = Alloc(size);
+ BpInitAndSplicePrev(bd, (*r)->prev->orig_instr, *r);
+ (*r)->prev->orig_instr = break_op;
+ } else {
+ /* Just insert last in the ring */
+ bd = Alloc(size);
+ BpInitAndSpliceNext(bd, (*r)->orig_instr, *r);
+ (*r)->orig_instr = break_op;
+ *r = bd;
+ }
+ }
+ /* Init the bp type specific data */
+ if (break_op == (Uint) BeamOp(op_i_trace_breakpoint) ||
+ break_op == (Uint) BeamOp(op_i_mtrace_breakpoint)) {
+
+ BpDataTrace *bdt = (BpDataTrace *) bd;
+
+ MatchSetRef(match_spec);
+ bdt->match_spec = match_spec;
+ bdt->tracer_pid = tracer_pid;
+ } else if (break_op == (Uint) BeamOp(op_i_count_breakpoint)) {
+ BpDataCount *bdc = (BpDataCount *) bd;
+
+ bdc->count = 0;
+ }
+ ++(*(Uint*)&code_base[MI_NUM_BREAKPOINTS]);
+ return 1;
+}
+
+static int clear_break(Eterm mfa[3], int specified, Uint break_op)
+{
+ int num_processed = 0;
+ Module *modp;
+
+ if (!specified) {
+ /* Iterate over all modules */
+ int current;
+ int last = module_code_size();
+
+ for (current = 0; current < last; current++) {
+ modp = module_code(current);
+ ASSERT(modp != NULL);
+ num_processed += clear_module_break(modp, mfa, specified, break_op);
+ }
+ } else {
+ /* Process a single module */
+ if ((modp = erts_get_module(mfa[0])) != NULL) {
+ num_processed +=
+ clear_module_break(modp, mfa, specified, break_op);
+ }
+ }
+ return num_processed;
+}
+
+static int clear_module_break(Module *m, Eterm mfa[3], int specified,
+ Uint break_op) {
+ Uint** code_base;
+ Uint* code_ptr;
+ int num_processed = 0;
+ Uint i,n;
+
+ ASSERT(m);
+ code_base = (Uint **) m->code;
+ if (code_base == NULL) {
+ return 0;
+ }
+ n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ for (i = 0; i < n; ++i) {
+ code_ptr = code_base[MI_FUNCTIONS+i];
+ if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
+ (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
+ Uint *pc = code_ptr + 5;
+
+ num_processed +=
+ clear_function_break(m, pc, break_op);
+ }
+ }
+ return num_processed;
+}
+
+static int clear_function_break(Module *m, Uint *pc, Uint break_op) {
+ BpData *bd;
+ Uint **code_base = (Uint **)m->code;
+
+ ASSERT(code_base);
+ ASSERT(code_base <= (Uint **)pc);
+ ASSERT((Uint **)pc < code_base + (m->code_length/sizeof(Uint *)));
+ /*
+ * Currently no trace support for native code.
+ */
+ if (erts_is_native_break(pc)) {
+ return 0;
+ }
+ while ( (bd = is_break(pc, break_op))) {
+ /* Remove all breakpoints of this type.
+ * There should be only one of each type,
+ * but break_op may be 0 which matches any type.
+ */
+ Uint op;
+ BpData **r = (BpData **) (pc-4);
+
+ ASSERT(*r);
+ /* Find opcode for this breakpoint */
+ if (break_op) {
+ op = break_op;
+ } else {
+ if (bd == (*r)->next) {
+ /* First breakpoint in ring */
+ op = *pc;
+ } else {
+ op = bd->prev->orig_instr;
+ }
+ }
+ if (BpSingleton(bd)) {
+ ASSERT(*r == bd);
+ /* Only one breakpoint to remove */
+ *r = NULL;
+ *pc = bd->orig_instr;
+ } else {
+ BpData *bd_prev = bd->prev;
+
+ BpSpliceNext(bd, bd_prev);
+ ASSERT(BpSingleton(bd));
+ if (bd == *r) {
+ /* We removed the last breakpoint in the ring */
+ *r = bd_prev;
+ bd_prev->orig_instr = bd->orig_instr;
+ } else if (bd_prev == *r) {
+ /* We removed the first breakpoint in the ring */
+ *pc = bd->orig_instr;
+ } else {
+ bd_prev->orig_instr = bd->orig_instr;
+ }
+ }
+ if (op == (Uint) BeamOp(op_i_trace_breakpoint) ||
+ op == (Uint) BeamOp(op_i_mtrace_breakpoint)) {
+
+ BpDataTrace *bdt = (BpDataTrace *) bd;
+
+ MatchSetUnref(bdt->match_spec);
+ }
+ Free(bd);
+ ASSERT(((Uint) code_base[MI_NUM_BREAKPOINTS]) > 0);
+ --(*(Uint*)&code_base[MI_NUM_BREAKPOINTS]);
+ }
+ return 1;
+}
+
+
+
+/*
+** Searches (linear forward) the breakpoint ring for a specified opcode
+** and returns a pointer to the breakpoint data structure or NULL if
+** not found. If the specified opcode is 0, the last breakpoint is
+** returned. The program counter must point to the first executable
+** (breakpoint) instruction of the function.
+*/
+static BpData *is_break(Uint *pc, Uint break_op) {
+ ASSERT(pc[-5] == (Uint) BeamOp(op_i_func_info_IaaI));
+ if (! erts_is_native_break(pc)) {
+ BpData *bd = (BpData *) pc[-4];
+
+ if (break_op == 0) {
+ return bd;
+ }
+ if (*pc == break_op) {
+ ASSERT(bd);
+ return bd->next;
+ }
+ if (! bd){
+ return NULL;
+ }
+ bd = bd->next;
+ while (bd != (BpData *) pc[-4]) {
+ ASSERT(bd);
+ if (bd->orig_instr == break_op) {
+ bd = bd->next;
+ ASSERT(bd);
+ return bd;
+ } else {
+ bd = bd->next;
+ }
+ }
+ }
+ return NULL;
+}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
new file mode 100644
index 0000000000..44e6b294d8
--- /dev/null
+++ b/erts/emulator/beam/beam_bp.h
@@ -0,0 +1,165 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+#ifndef _BEAM_BP_H
+#define _BEAM_BP_H
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+
+
+
+/*
+** Common struct to all bp_data_*
+**
+** Two gotchas:
+**
+** 1) The type of bp_data structure in the ring is deduced from the
+** orig_instr field of the structure _before_ in the ring, except for
+** the first structure in the ring that has its instruction in
+** pc[0] of the code to execute.
+**
+** 2) pc[-4] points to the _last_ structure in the ring before the
+** breakpoints are being executed.
+**
+** So, as an example, when a breakpointed function starts to execute,
+** the first instruction that is a breakpoint instruction at pc[0] finds
+** its data at ((BpData *) pc[-4])->next and has to cast that pointer
+** to the correct bp_data type.
+*/
+typedef struct bp_data {
+ struct bp_data *next; /* Doubly linked ring pointers */
+ struct bp_data *prev; /* -"- */
+ Uint orig_instr; /* The original instruction to execute */
+} BpData;
+/*
+** All the following bp_data_.. structs must begin the same way
+*/
+
+typedef struct bp_data_trace {
+ struct bp_data *next;
+ struct bp_data *prev;
+ Uint orig_instr;
+ Binary *match_spec;
+ Eterm tracer_pid;
+} BpDataTrace;
+
+typedef struct bp_data_debug {
+ struct bp_data *next;
+ struct bp_data *prev;
+ Uint orig_instr;
+} BpDataDebug;
+
+typedef struct bp_data_count { /* Call count */
+ struct bp_data *next;
+ struct bp_data *prev;
+ Uint orig_instr;
+ Sint count;
+} BpDataCount;
+
+extern erts_smp_spinlock_t erts_bp_lock;
+
+#ifdef ERTS_SMP
+#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock)
+#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock)
+#else
+#define ErtsSmpBPLock(BDC)
+#define ErtsSmpBPUnlock(BDC)
+#endif
+
+#define ErtsCountBreak(pc,instr_result) \
+do { \
+ BpDataCount *bdc = (BpDataCount *) (pc)[-4]; \
+ \
+ ASSERT((pc)[-5] == (Uint) BeamOp(op_i_func_info_IaaI)); \
+ ASSERT(bdc); \
+ bdc = (BpDataCount *) bdc->next; \
+ ASSERT(bdc); \
+ (pc)[-4] = (Uint) bdc; \
+ ErtsSmpBPLock(bdc); \
+ if (bdc->count >= 0) bdc->count++; \
+ ErtsSmpBPUnlock(bdc); \
+ *(instr_result) = bdc->orig_instr; \
+} while (0)
+
+#define ErtsBreakSkip(pc,instr_result) \
+do { \
+ BpData *bd = (BpData *) (pc)[-4]; \
+ \
+ ASSERT((pc)[-5] == (Uint) BeamOp(op_i_func_info_IaaI)); \
+ ASSERT(bd); \
+ bd = bd->next; \
+ ASSERT(bd); \
+ (pc)[-4] = (Uint) bd; \
+ *(instr_result) = bd->orig_instr; \
+} while (0)
+
+enum erts_break_op{
+ erts_break_nop = 0, /* Must be false */
+ erts_break_set = !0, /* Must be true */
+ erts_break_reset,
+ erts_break_stop
+};
+
+
+
+/*
+** Function interface exported from beam_bp.c
+*/
+
+void erts_bp_init(void);
+
+int erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
+ Eterm tracer_pid);
+int erts_clear_trace_break(Eterm mfa[3], int specified);
+int erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
+ Eterm tracer_pid);
+int erts_clear_mtrace_break(Eterm mfa[3], int specified);
+void erts_set_mtrace_bif(Uint *pc, Binary *match_spec,
+ Eterm tracer_pid);
+void erts_clear_mtrace_bif(Uint *pc);
+int erts_set_debug_break(Eterm mfa[3], int specified);
+int erts_clear_debug_break(Eterm mfa[3], int specified);
+int erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op);
+int erts_clear_count_break(Eterm mfa[3], int specified);
+
+
+int erts_clear_break(Eterm mfa[3], int specified);
+int erts_clear_module_break(Module *modp);
+int erts_clear_function_break(Module *modp, Uint *pc);
+
+Uint erts_trace_break(Process *p, Uint *pc, Eterm *args,
+ Uint32 *ret_flags, Eterm *tracer_pid);
+Uint32 erts_bif_mtrace(Process *p, Uint *pc, Eterm *args,
+ int local, Eterm *tracer_pid);
+
+int erts_is_trace_break(Uint *pc, Binary **match_spec_ret,
+ Eterm *tracer_pid_ret);
+int erts_is_mtrace_break(Uint *pc, Binary **match_spec_ret,
+ Eterm *tracer_pid_rte);
+int erts_is_mtrace_bif(Uint *pc, Binary **match_spec_ret,
+ Eterm *tracer_pid_ret);
+int erts_is_native_break(Uint *pc);
+int erts_is_count_break(Uint *pc, Sint *count_ret);
+
+Uint *erts_find_local_func(Eterm mfa[3]);
+
+#endif /* _BEAM_BP_H */
diff --git a/erts/emulator/beam/beam_catches.c b/erts/emulator/beam/beam_catches.c
new file mode 100644
index 0000000000..d5cef1cad2
--- /dev/null
+++ b/erts/emulator/beam/beam_catches.c
@@ -0,0 +1,102 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#include "sys.h"
+#include "beam_catches.h"
+
+/* XXX: should use dynamic reallocation */
+#define TABSIZ (16*1024)
+static struct {
+ Eterm *cp;
+ unsigned cdr;
+} beam_catches[TABSIZ];
+
+static int free_list;
+static unsigned high_mark;
+
+void beam_catches_init(void)
+{
+ free_list = -1;
+ high_mark = 0;
+}
+
+unsigned beam_catches_cons(Eterm *cp, unsigned cdr)
+{
+ int i;
+
+ /*
+ * Allocate from free_list while it is non-empty.
+ * If free_list is empty, allocate at high_mark.
+ *
+ * This avoids the need to initialise the free list in
+ * beam_catches_init(), which would cost O(TABSIZ) time.
+ */
+ if( (i = free_list) >= 0 ) {
+ free_list = beam_catches[i].cdr;
+ } else if( (i = high_mark) < TABSIZ ) {
+ high_mark = i + 1;
+ } else {
+ fprintf(stderr, "beam_catches_cons: no free slots :-(\r\n");
+ exit(1);
+ }
+
+ beam_catches[i].cp = cp;
+ beam_catches[i].cdr = cdr;
+
+ return i;
+}
+
+Eterm *beam_catches_car(unsigned i)
+{
+ if( i >= TABSIZ ) {
+ fprintf(stderr,
+ "beam_catches_car: index %#x is out of range\r\n", i);
+ abort();
+ }
+ return beam_catches[i].cp;
+}
+
+void beam_catches_delmod(unsigned head, Eterm *code, unsigned code_bytes)
+{
+ unsigned i, cdr;
+
+ for(i = head; i != (unsigned)-1;) {
+ if( i >= TABSIZ ) {
+ fprintf(stderr,
+ "beam_catches_delmod: index %#x is out of range\r\n", i);
+ abort();
+ }
+ if( (char*)beam_catches[i].cp - (char*)code >= code_bytes ) {
+ fprintf(stderr,
+ "beam_catches_delmod: item %#x has cp %#lx which is not "
+ "in module's range [%#lx,%#lx[\r\n",
+ i, (long)beam_catches[i].cp,
+ (long)code, (long)((char*)code + code_bytes));
+ abort();
+ }
+ beam_catches[i].cp = 0;
+ cdr = beam_catches[i].cdr;
+ beam_catches[i].cdr = free_list;
+ free_list = i;
+ i = cdr;
+ }
+}
diff --git a/erts/emulator/beam/beam_catches.h b/erts/emulator/beam/beam_catches.h
new file mode 100644
index 0000000000..ccf33d5e86
--- /dev/null
+++ b/erts/emulator/beam/beam_catches.h
@@ -0,0 +1,32 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __BEAM_CATCHES_H
+#define __BEAM_CATCHES_H
+
+#define BEAM_CATCHES_NIL (-1)
+
+void beam_catches_init(void);
+unsigned beam_catches_cons(Eterm* cp, unsigned cdr);
+Eterm *beam_catches_car(unsigned i);
+void beam_catches_delmod(unsigned head, Eterm* code, unsigned code_bytes);
+
+#define catch_pc(x) beam_catches_car(catch_val((x)))
+
+#endif /* __BEAM_CATCHES_H */
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
new file mode 100644
index 0000000000..4242a4161e
--- /dev/null
+++ b/erts/emulator/beam/beam_debug.c
@@ -0,0 +1,548 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: Basic debugging support.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "big.h"
+#include "external.h"
+#include "beam_load.h"
+#include "beam_bp.h"
+#include "erl_binary.h"
+
+#ifdef ARCH_64
+# define HEXF "%016bpX"
+#else
+# define HEXF "%08bpX"
+#endif
+
+void dbg_bt(Process* p, Eterm* sp);
+void dbg_where(Eterm* addr, Eterm x0, Eterm* reg);
+
+static void print_big(int to, void *to_arg, Eterm* addr);
+static int print_op(int to, void *to_arg, int op, int size, Eterm* addr);
+Eterm
+erts_debug_same_2(Process* p, Eterm term1, Eterm term2)
+{
+ return (term1 == term2) ? am_true : am_false;
+}
+
+Eterm
+erts_debug_flat_size_1(Process* p, Eterm term)
+{
+ Uint size = size_object(term);
+
+ if (IS_USMALL(0, size)) {
+ BIF_RET(make_small(size));
+ } else {
+ Eterm* hp = HAlloc(p, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(size, hp));
+ }
+}
+
+Eterm
+erts_debug_breakpoint_2(Process* p, Eterm MFA, Eterm bool)
+{
+ Eterm* tp;
+ Eterm mfa[3];
+ int i;
+ int specified = 0;
+ Eterm res;
+
+ if (bool != am_true && bool != am_false)
+ goto error;
+
+ if (is_not_tuple(MFA)) {
+ goto error;
+ }
+ tp = tuple_val(MFA);
+ if (*tp != make_arityval(3)) {
+ goto error;
+ }
+ mfa[0] = tp[1];
+ mfa[1] = tp[2];
+ mfa[2] = tp[3];
+ if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
+ (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ goto error;
+ }
+ for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ /* Empty loop body */
+ }
+ for (i = specified; i < 3; i++) {
+ if (mfa[i] != am_Underscore) {
+ goto error;
+ }
+ }
+ if (is_small(mfa[2])) {
+ mfa[2] = signed_val(mfa[2]);
+ }
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ if (bool == am_true) {
+ res = make_small(erts_set_debug_break(mfa, specified));
+ } else {
+ res = make_small(erts_clear_debug_break(mfa, specified));
+ }
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+
+ return res;
+
+ error:
+ BIF_ERROR(p, BADARG);
+}
+
+Eterm
+erts_debug_disassemble_1(Process* p, Eterm addr)
+{
+ erts_dsprintf_buf_t *dsbufp;
+ Eterm* hp;
+ Eterm* tp;
+ Eterm bin;
+ Eterm mfa;
+ Eterm* funcinfo = NULL; /* Initialized to eliminate warning. */
+ Uint* code_base;
+ Uint* code_ptr = NULL; /* Initialized to eliminate warning. */
+ Uint instr;
+ Uint uaddr;
+ Uint hsz;
+ int i;
+
+ if (term_to_Uint(addr, &uaddr)) {
+ code_ptr = (Uint *) uaddr;
+ if ((funcinfo = find_function_from_pc(code_ptr)) == NULL) {
+ BIF_RET(am_false);
+ }
+ } else if (is_tuple(addr)) {
+ Module* modp;
+ Eterm mod;
+ Eterm name;
+ Export* ep;
+ Sint arity;
+ int n;
+
+ tp = tuple_val(addr);
+ if (tp[0] != make_arityval(3)) {
+ error:
+ BIF_ERROR(p, BADARG);
+ }
+ mod = tp[1];
+ name = tp[2];
+ if (!is_atom(mod) || !is_atom(name) || !is_small(tp[3])) {
+ goto error;
+ }
+ arity = signed_val(tp[3]);
+ modp = erts_get_module(mod);
+
+ /*
+ * Try the export entry first to allow disassembly of special functions
+ * such as erts_debug:apply/4. Then search for it in the module.
+ */
+
+ if ((ep = erts_find_function(mod, name, arity)) != NULL) {
+ /* XXX: add "&& ep->address != ep->code+3" condition?
+ * Consider a traced function.
+ * Its ep will have ep->address == ep->code+3.
+ * erts_find_function() will return the non-NULL ep.
+ * Below we'll try to derive a code_ptr from ep->address.
+ * But this code_ptr will point to the start of the Export,
+ * not the function's func_info instruction. BOOM !?
+ */
+ code_ptr = ((Eterm *) ep->address) - 5;
+ funcinfo = code_ptr+2;
+ } else if (modp == NULL || (code_base = modp->code) == NULL) {
+ BIF_RET(am_undef);
+ } else {
+ n = code_base[MI_NUM_FUNCTIONS];
+ for (i = 0; i < n; i++) {
+ code_ptr = (Uint *) code_base[MI_FUNCTIONS+i];
+ if (code_ptr[3] == name && code_ptr[4] == arity) {
+ funcinfo = code_ptr+2;
+ break;
+ }
+ }
+ if (i == n) {
+ BIF_RET(am_undef);
+ }
+ }
+ } else {
+ goto error;
+ }
+
+ dsbufp = erts_create_tmp_dsbuf(0);
+ erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr);
+ instr = (Uint) code_ptr[0];
+ for (i = 0; i < NUM_SPECIFIC_OPS; i++) {
+ if (instr == (Uint) BeamOp(i) && opc[i].name[0] != '\0') {
+ code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp,
+ i, opc[i].sz-1, code_ptr+1) + 1;
+ break;
+ }
+ }
+ if (i >= NUM_SPECIFIC_OPS) {
+ erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp,
+ "unknown " HEXF "\n", instr);
+ code_ptr++;
+ }
+ bin = new_binary(p, (byte *) dsbufp->str, (int) dsbufp->str_len);
+ erts_destroy_tmp_dsbuf(dsbufp);
+ hsz = 4+4;
+ (void) erts_bld_uint(NULL, &hsz, (Uint) code_ptr);
+ hp = HAlloc(p, hsz);
+ addr = erts_bld_uint(&hp, NULL, (Uint) code_ptr);
+ ASSERT(is_atom(funcinfo[0]));
+ ASSERT(is_atom(funcinfo[1]));
+ mfa = TUPLE3(hp, funcinfo[0], funcinfo[1], make_small(funcinfo[2]));
+ hp += 4;
+ return TUPLE3(hp, addr, bin, mfa);
+}
+
+void
+dbg_bt(Process* p, Eterm* sp)
+{
+ Eterm* stack = STACK_START(p);
+
+ while (sp < stack) {
+ if (is_CP(*sp)) {
+ Eterm* addr = find_function_from_pc(cp_val(*sp));
+ if (addr)
+ erts_fprintf(stderr,
+ HEXF ": %T:%T/%bpu\n",
+ addr, addr[0], addr[1], addr[2]);
+ }
+ sp++;
+ }
+}
+
+void
+dbg_where(Eterm* addr, Eterm x0, Eterm* reg)
+{
+ Eterm* f = find_function_from_pc(addr);
+
+ if (f == NULL) {
+ erts_fprintf(stderr, "???\n");
+ } else {
+ int arity;
+ int i;
+
+ addr = f;
+ arity = addr[2];
+ erts_fprintf(stderr, HEXF ": %T:%T(", addr, addr[0], addr[1]);
+ for (i = 0; i < arity; i++)
+ erts_fprintf(stderr, i ? ", %T" : "%T", i ? reg[i] : x0);
+ erts_fprintf(stderr, ")\n");
+ }
+}
+
+static int
+print_op(int to, void *to_arg, int op, int size, Eterm* addr)
+{
+ int i;
+ Uint tag;
+ char* sign;
+ char* start_prog; /* Start of program for packer. */
+ char* prog; /* Current position in packer program. */
+ Uint stack[8]; /* Stack for packer. */
+ Uint* sp = stack; /* Points to next free position. */
+ Uint packed = 0; /* Accumulator for packed operations. */
+ Uint args[8]; /* Arguments for this instruction. */
+ Uint* ap; /* Pointer to arguments. */
+
+ start_prog = opc[op].pack;
+
+ if (start_prog[0] == '\0') {
+ /*
+ * There is no pack program.
+ * Avoid copying because instructions containing bignum operands
+ * are bigger than actually declared.
+ */
+ ap = (Uint *) addr;
+ } else {
+ /*
+ * Copy all arguments to a local buffer for the unpacking.
+ */
+
+ ASSERT(size <= sizeof(args)/sizeof(args[0]));
+ ap = args;
+ for (i = 0; i < size; i++) {
+ *ap++ = addr[i];
+ }
+
+ /*
+ * Undo any packing done by the loader. This is easily done by running
+ * the packing program backwards and in reverse.
+ */
+
+ prog = start_prog + strlen(start_prog);
+ while (start_prog < prog) {
+ prog--;
+ switch (*prog) {
+ case 'g':
+ *ap++ = *--sp;
+ break;
+ case 'i': /* Initialize packing accumulator. */
+ *ap++ = packed;
+ break;
+ case 's':
+ *ap++ = packed & 0x3ff;
+ packed >>= 10;
+ break;
+ case '0': /* Tight shift */
+ *ap++ = packed & (BEAM_TIGHT_MASK / sizeof(Eterm));
+ packed >>= BEAM_TIGHT_SHIFT;
+ break;
+ case '6': /* Shift 16 steps */
+ *ap++ = packed & 0xffff;
+ packed >>= 16;
+ break;
+ case 'p':
+ *sp++ = *--ap;
+ break;
+ case 'P':
+ packed = *--sp;
+ break;
+ default:
+ ASSERT(0);
+ }
+ }
+ ap = args;
+ }
+
+ /*
+ * Print the name and all operands of the instructions.
+ */
+
+ erts_print(to, to_arg, "%s ", opc[op].name);
+ sign = opc[op].sign;
+ while (*sign) {
+ switch (*sign) {
+ case 'r': /* x(0) */
+ erts_print(to, to_arg, "x(0)");
+ break;
+ case 'x': /* x(N) */
+ if (reg_index(ap[0]) == 0) {
+ erts_print(to, to_arg, "X[0]");
+ } else {
+ erts_print(to, to_arg, "x(%d)", reg_index(ap[0]));
+ }
+ ap++;
+ break;
+ case 'y': /* y(N) */
+ erts_print(to, to_arg, "y(%d)", reg_index(ap[0]) - CP_SIZE);
+ ap++;
+ break;
+ case 'n': /* Nil */
+ erts_print(to, to_arg, "[]");
+ break;
+ case 's': /* Any source (tagged constant or register) */
+ tag = beam_reg_tag(*ap);
+ if (tag == X_REG_DEF) {
+ if (reg_index(*ap) == 0) {
+ erts_print(to, to_arg, "x[0]");
+ } else {
+ erts_print(to, to_arg, "x(%d)", reg_index(*ap));
+ }
+ ap++;
+ break;
+ } else if (tag == Y_REG_DEF) {
+ erts_print(to, to_arg, "y(%d)", reg_index(*ap) - CP_SIZE);
+ ap++;
+ break;
+ } else if (tag == R_REG_DEF) {
+ erts_print(to, to_arg, "x(0)");
+ ap++;
+ break;
+ }
+ /*FALLTHROUGH*/
+ case 'a': /* Tagged atom */
+ case 'i': /* Tagged integer */
+ case 'c': /* Tagged constant */
+ case 'q': /* Tagged literal */
+ erts_print(to, to_arg, "%T", *ap);
+ ap++;
+ break;
+ case 'A':
+ erts_print(to, to_arg, "%d", arityval(ap[0]));
+ ap++;
+ break;
+ case 'd': /* Destination (x(0), x(N), y(N)) */
+ switch (beam_reg_tag(*ap)) {
+ case X_REG_DEF:
+ if (reg_index(*ap) == 0) {
+ erts_print(to, to_arg, "x[0]");
+ } else {
+ erts_print(to, to_arg, "x(%d)", reg_index(*ap));
+ }
+ break;
+ case Y_REG_DEF:
+ erts_print(to, to_arg, "y(%d)", reg_index(*ap) - CP_SIZE);
+ break;
+ case R_REG_DEF:
+ erts_print(to, to_arg, "x(0)");
+ break;
+ }
+ ap++;
+ break;
+ case 'I': /* Untagged integer. */
+ case 't':
+ erts_print(to, to_arg, "%d", *ap);
+ ap++;
+ break;
+ case 'f': /* Destination label */
+ erts_print(to, to_arg, "f(%X)", *ap);
+ ap++;
+ break;
+ case 'p': /* Pointer (to label) */
+ {
+ Eterm* f = find_function_from_pc((Eterm *)*ap);
+
+ if (f+3 != (Eterm *) *ap) {
+ erts_print(to, to_arg, "p(%X)", *ap);
+ } else {
+ erts_print(to, to_arg, "%T:%T/%bpu", f[0], f[1], f[2]);
+ }
+ ap++;
+ }
+ break;
+ case 'j': /* Pointer (to label) */
+ erts_print(to, to_arg, "j(%X)", *ap);
+ ap++;
+ break;
+ case 'e': /* Export entry */
+ {
+ Export* ex = (Export *) *ap;
+ erts_print(to, to_arg,
+ "%T:%T/%bpu", ex->code[0], ex->code[1], ex->code[2]);
+ ap++;
+ }
+ break;
+ case 'F': /* Function definition */
+ break;
+ case 'b':
+ for (i = 0; i < BIF_SIZE; i++) {
+ BifFunction bif = (BifFunction) *ap;
+ if (bif == bif_table[i].f) {
+ break;
+ }
+ }
+ if (i == BIF_SIZE) {
+ erts_print(to, to_arg, "b(%d)", (Uint) *ap);
+ } else {
+ Eterm name = bif_table[i].name;
+ unsigned arity = bif_table[i].arity;
+ erts_print(to, to_arg, "%T/%u", name, arity);
+ }
+ ap++;
+ break;
+ case 'P': /* Byte offset into tuple (see beam_load.c) */
+ erts_print(to, to_arg, "%d", (*ap / sizeof(Eterm*)) - 1);
+ ap++;
+ break;
+ case 'l': /* fr(N) */
+ erts_print(to, to_arg, "fr(%d)", reg_index(ap[0]));
+ ap++;
+ break;
+ default:
+ erts_print(to, to_arg, "???");
+ ap++;
+ break;
+ }
+ erts_print(to, to_arg, " ");
+ sign++;
+ }
+
+ /*
+ * Print more information about certain instructions.
+ */
+
+ ap = addr + size;
+ switch (op) {
+ case op_i_select_val_sfI:
+ {
+ int n = ap[-1];
+
+ while (n > 0) {
+ erts_print(to, to_arg, "%T f(%X) ", ap[0], ap[1]);
+ ap += 2;
+ size += 2;
+ n--;
+ }
+ }
+ break;
+ case op_i_jump_on_val_sfII:
+ {
+ int n;
+ for (n = ap[-2]; n > 0; n--) {
+ erts_print(to, to_arg, "f(%X) ", ap[0]);
+ ap++;
+ size++;
+ }
+ }
+ break;
+ case op_i_select_big_sf:
+ while (ap[0]) {
+ int arity = thing_arityval(ap[0]);
+ print_big(to, to_arg, ap);
+ size += arity+1;
+ ap += arity+1;
+ erts_print(to, to_arg, " f(%X) ", ap[0]);
+ ap++;
+ size++;
+ }
+ ap++;
+ size++;
+ break;
+ }
+ erts_print(to, to_arg, "\n");
+
+ return size;
+}
+
+static void
+print_big(int to, void *to_arg, Eterm* addr)
+{
+ int i;
+ int k;
+
+ i = BIG_SIZE(addr);
+ if (BIG_SIGN(addr))
+ erts_print(to, to_arg, "-#integer(%d) = {", i);
+ else
+ erts_print(to, to_arg, "#integer(%d) = {", i);
+ erts_print(to, to_arg, "%d", BIG_DIGIT(addr, 0));
+ for (k = 1; k < i; k++)
+ erts_print(to, to_arg, ",%d", BIG_DIGIT(addr, k));
+ erts_print(to, to_arg, "}");
+}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
new file mode 100644
index 0000000000..dcaa43b51c
--- /dev/null
+++ b/erts/emulator/beam/beam_emu.c
@@ -0,0 +1,6198 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof() */
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "erl_nmgc.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+#include "beam_load.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+#include "dist.h"
+#include "beam_bp.h"
+#include "beam_catches.h"
+#ifdef HIPE
+#include "hipe_mode_switch.h"
+#include "hipe_bif1.h"
+#endif
+
+/* #define HARDDEBUG 1 */
+
+#if defined(NO_JUMP_TABLE)
+# define OpCase(OpCode) case op_##OpCode: lb_##OpCode
+# define CountCase(OpCode) case op_count_##OpCode
+# define OpCode(OpCode) ((Uint*)op_##OpCode)
+# define Goto(Rel) {Go = (int)(Rel); goto emulator_loop;}
+# define LabelAddr(Addr) &&##Addr
+#else
+# define OpCase(OpCode) lb_##OpCode
+# define CountCase(OpCode) lb_count_##OpCode
+# define Goto(Rel) goto *(Rel)
+# define LabelAddr(Label) &&Label
+# define OpCode(OpCode) (&&lb_##OpCode)
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+# ifdef ERTS_SMP
+# define PROCESS_MAIN_CHK_LOCKS(P) \
+do { \
+ if ((P)) { \
+ erts_pix_lock_t *pix_lock__ = ERTS_PIX2PIXLOCK(internal_pid_index((P)->id));\
+ erts_proc_lc_chk_only_proc_main((P)); \
+ erts_pix_lock(pix_lock__); \
+ ASSERT(0 < (P)->lock.refc && (P)->lock.refc < erts_no_schedulers*5);\
+ erts_pix_unlock(pix_lock__); \
+ } \
+ else \
+ erts_lc_check_exact(NULL, 0); \
+ ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING); \
+} while (0)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
+# else
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
+# define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
+# endif
+#else
+# define PROCESS_MAIN_CHK_LOCKS(P)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
+#endif
+
+/*
+ * Define macros for deep checking of terms.
+ */
+
+#if defined(HARDDEBUG)
+
+# define CHECK_TERM(T) size_object(T)
+
+# define CHECK_ARGS(PC) \
+do { \
+ int i_; \
+ int Arity_ = PC[-1]; \
+ if (Arity_ > 0) { \
+ CHECK_TERM(r(0)); \
+ } \
+ for (i_ = 1; i_ < Arity_; i_++) { \
+ CHECK_TERM(x(i_)); \
+ } \
+} while (0)
+
+#else
+# define CHECK_TERM(T) ASSERT(!is_CP(T))
+# define CHECK_ARGS(T)
+#endif
+
+#ifndef MAX
+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
+#endif
+
+#define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
+
+
+/*
+ * We reuse some of fields in the save area in the process structure.
+ * This is safe to do, since this space is only activly used when
+ * the process is switched out.
+ */
+#define REDS_IN(p) ((p)->def_arg_reg[5])
+
+/*
+ * Add a byte offset to a pointer to Eterm. This is useful when the
+ * the loader has precalculated a byte offset.
+ */
+#define ADD_BYTE_OFFSET(ptr, offset) \
+ ((Eterm *) (((unsigned char *)ptr) + (offset)))
+
+/* We don't check the range if an ordinary switch is used */
+#ifdef NO_JUMP_TABLE
+#define VALID_INSTR(IP) (0 <= (int)(IP) && ((int)(IP) < (NUMBER_OF_OPCODES*2+10)))
+#else
+#define VALID_INSTR(IP) \
+ ((Sint)LabelAddr(emulator_loop) <= (Sint)(IP) && \
+ (Sint)(IP) < (Sint)LabelAddr(end_emulator_loop))
+#endif /* NO_JUMP_TABLE */
+
+#define SET_CP(p, ip) \
+ ASSERT(VALID_INSTR(*(ip))); \
+ (p)->cp = (ip)
+
+#define SET_I(ip) \
+ ASSERT(VALID_INSTR(* (Eterm *)(ip))); \
+ I = (ip)
+
+#define FetchArgs(S1, S2) tmp_arg1 = (S1); tmp_arg2 = (S2)
+
+/*
+ * Store a result into a register given a destination descriptor.
+ */
+
+#define StoreResult(Result, DestDesc) \
+ do { \
+ Eterm stb_reg; \
+ stb_reg = (DestDesc); \
+ CHECK_TERM(Result); \
+ switch (beam_reg_tag(stb_reg)) { \
+ case R_REG_DEF: \
+ r(0) = (Result); break; \
+ case X_REG_DEF: \
+ xb(x_reg_offset(stb_reg)) = (Result); break; \
+ default: \
+ yb(y_reg_offset(stb_reg)) = (Result); break; \
+ } \
+ } while (0)
+
+#define StoreSimpleDest(Src, Dest) Dest = (Src)
+
+/*
+ * Store a result into a register and execute the next instruction.
+ * Dst points to the word with a destination descriptor, which MUST
+ * be just before the next instruction.
+ */
+
+#define StoreBifResult(Dst, Result) \
+ do { \
+ Eterm* stb_next; \
+ Eterm stb_reg; \
+ stb_reg = Arg(Dst); \
+ I += (Dst) + 2; \
+ stb_next = (Eterm *) *I; \
+ CHECK_TERM(Result); \
+ switch (beam_reg_tag(stb_reg)) { \
+ case R_REG_DEF: \
+ r(0) = (Result); Goto(stb_next); \
+ case X_REG_DEF: \
+ xb(x_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
+ default: \
+ yb(y_reg_offset(stb_reg)) = (Result); Goto(stb_next); \
+ } \
+ } while (0)
+
+#define ClauseFail() goto lb_jump_f
+
+#define SAVE_CP(X) \
+ do { \
+ *(X) = make_cp(c_p->cp); \
+ c_p->cp = 0; \
+ } while(0)
+
+#define RESTORE_CP(X) SET_CP(c_p, cp_val(*(X)))
+
+#define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y))
+
+/*
+ * Special Beam instructions.
+ */
+
+Eterm beam_apply[2];
+Eterm beam_exit[1];
+Eterm beam_continue_exit[1];
+
+Eterm* em_call_error_handler;
+Eterm* em_apply_bif;
+Eterm* em_call_traced_function;
+
+
+/* NOTE These should be the only variables containing trace instructions.
+** Sometimes tests are form the instruction value, and sometimes
+** for the refering variable (one of these), and rouge references
+** will most likely cause chaos.
+*/
+Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+Eterm beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
+
+/*
+ * All Beam instructions in numerical order.
+ */
+
+#ifndef NO_JUMP_TABLE
+void** beam_ops;
+#endif
+
+#ifndef ERTS_SMP /* Not supported with smp emulator */
+extern int count_instructions;
+#endif
+
+#if defined(HYBRID)
+#define SWAPIN \
+ g_htop = global_htop; \
+ g_hend = global_hend; \
+ HTOP = HEAP_TOP(c_p); \
+ E = c_p->stop
+
+#define SWAPOUT \
+ global_htop = g_htop; \
+ global_hend = g_hend; \
+ HEAP_TOP(c_p) = HTOP; \
+ c_p->stop = E
+
+#else
+#define SWAPIN \
+ HTOP = HEAP_TOP(c_p); \
+ E = c_p->stop
+
+#define SWAPOUT \
+ HEAP_TOP(c_p) = HTOP; \
+ c_p->stop = E
+
+/*
+ * Use LIGHT_SWAPOUT when the called function
+ * will call HeapOnlyAlloc() (and never HAlloc()).
+ */
+#ifdef DEBUG
+# /* The stack pointer is used in an assertion. */
+# define LIGHT_SWAPOUT SWAPOUT
+#else
+# define LIGHT_SWAPOUT HEAP_TOP(c_p) = HTOP
+#endif
+
+/*
+ * Use LIGHT_SWAPIN when we know that c_p->stop cannot
+ * have been updated (i.e. if there cannot have been
+ * a garbage-collection).
+ */
+
+#define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p)
+
+#endif
+
+#define PRE_BIF_SWAPOUT(P) \
+ HEAP_TOP((P)) = HTOP; \
+ (P)->stop = E; \
+ PROCESS_MAIN_CHK_LOCKS((P)); \
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P))
+
+#if defined(HYBRID)
+# define POST_BIF_GC_SWAPIN_0(_p, _res) \
+ if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
+ _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
+ } \
+ SWAPIN
+
+# define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
+ if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
+ _regs[0] = r(0); \
+ _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
+ r(0) = _regs[0]; \
+ } \
+ SWAPIN
+#else
+# define POST_BIF_GC_SWAPIN_0(_p, _res) \
+ ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
+ PROCESS_MAIN_CHK_LOCKS((_p)); \
+ if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
+ _res = erts_gc_after_bif_call((_p), (_res), NULL, 0); \
+ E = (_p)->stop; \
+ } \
+ HTOP = HEAP_TOP((_p))
+
+# define POST_BIF_GC_SWAPIN(_p, _res, _regs, _arity) \
+ ERTS_SMP_REQ_PROC_MAIN_LOCK((_p)); \
+ PROCESS_MAIN_CHK_LOCKS((_p)); \
+ if (((_p)->mbuf) || (MSO(_p).overhead >= BIN_VHEAP_SZ(_p)) ) { \
+ _regs[0] = r(0); \
+ _res = erts_gc_after_bif_call((_p), (_res), _regs, (_arity)); \
+ r(0) = _regs[0]; \
+ E = (_p)->stop; \
+ } \
+ HTOP = HEAP_TOP((_p))
+#endif
+
+#define db(N) (N)
+#define tb(N) (N)
+#define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
+#define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
+#define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
+#define x(N) reg[N]
+#define y(N) E[N]
+#define r(N) x##N
+
+/*
+ * Makes sure that there are StackNeed + HeapNeed + 1 words available
+ * on the combined heap/stack segment, then allocates StackNeed + 1
+ * words on the stack and saves CP.
+ *
+ * M is number of live registers to preserve during garbage collection
+ */
+
+#define AH(StackNeed, HeapNeed, M) \
+ do { \
+ int needed; \
+ needed = (StackNeed) + 1; \
+ if (E - HTOP < (needed + (HeapNeed))) { \
+ SWAPOUT; \
+ reg[0] = r(0); \
+ PROCESS_MAIN_CHK_LOCKS(c_p); \
+ FCALLS -= erts_garbage_collect(c_p, needed + (HeapNeed), reg, (M)); \
+ PROCESS_MAIN_CHK_LOCKS(c_p); \
+ r(0) = reg[0]; \
+ SWAPIN; \
+ } \
+ E -= needed; \
+ SAVE_CP(E); \
+ } while (0)
+
+#define Allocate(Ns, Live) AH(Ns, 0, Live)
+
+#define AllocateZero(Ns, Live) \
+ do { Eterm* ptr; \
+ int i = (Ns); \
+ AH(i, 0, Live); \
+ for (ptr = E + i; ptr > E; ptr--) { \
+ make_blank(*ptr); \
+ } \
+ } while (0)
+
+#define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live)
+
+#define AllocateHeapZero(Ns, Nh, Live) \
+ do { Eterm* ptr; \
+ int i = (Ns); \
+ AH(i, Nh, Live); \
+ for (ptr = E + i; ptr > E; ptr--) { \
+ make_blank(*ptr); \
+ } \
+ } while (0)
+
+#define AllocateInit(Ns, Live, Y) \
+ do { AH(Ns, 0, Live); make_blank(Y); } while (0)
+
+/*
+ * Like the AH macro, but allocates no additional heap space.
+ */
+
+#define A(StackNeed, M) AH(StackNeed, 0, M)
+
+#define D(N) \
+ RESTORE_CP(E); \
+ E += (N) + 1;
+
+
+
+#define TestBinVHeap(VNh, Nh, Live) \
+ do { \
+ unsigned need = (Nh); \
+ if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\
+ SWAPOUT; \
+ reg[0] = r(0); \
+ PROCESS_MAIN_CHK_LOCKS(c_p); \
+ FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
+ PROCESS_MAIN_CHK_LOCKS(c_p); \
+ r(0) = reg[0]; \
+ SWAPIN; \
+ } \
+ } while (0)
+
+
+
+/*
+ * Check if Nh words of heap are available; if not, do a garbage collection.
+ * Live is number of active argument registers to be preserved.
+ */
+
+#define TestHeap(Nh, Live) \
+ do { \
+ unsigned need = (Nh); \
+ if (E - HTOP < need) { \
+ SWAPOUT; \
+ reg[0] = r(0); \
+ PROCESS_MAIN_CHK_LOCKS(c_p); \
+ FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)); \
+ PROCESS_MAIN_CHK_LOCKS(c_p); \
+ r(0) = reg[0]; \
+ SWAPIN; \
+ } \
+ } while (0)
+
+/*
+ * Check if Nh words of heap are available; if not, do a garbage collection.
+ * Live is number of active argument registers to be preserved.
+ * Takes special care to preserve Extra if a garbage collection occurs.
+ */
+
+#define TestHeapPreserve(Nh, Live, Extra) \
+ do { \
+ unsigned need = (Nh); \
+ if (E - HTOP < need) { \
+ SWAPOUT; \
+ reg[0] = r(0); \
+ reg[Live] = Extra; \
+ PROCESS_MAIN_CHK_LOCKS(c_p); \
+ FCALLS -= erts_garbage_collect(c_p, need, reg, (Live)+1); \
+ PROCESS_MAIN_CHK_LOCKS(c_p); \
+ if (Live > 0) { \
+ r(0) = reg[0]; \
+ } \
+ Extra = reg[Live]; \
+ SWAPIN; \
+ } \
+ } while (0)
+
+#ifdef HYBRID
+#ifdef INCREMENTAL
+#define TestGlobalHeap(Nh, Live, hp) \
+ do { \
+ unsigned need = (Nh); \
+ ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
+ SWAPOUT; \
+ reg[0] = r(0); \
+ FCALLS -= need; \
+ (hp) = IncAlloc(c_p,need,reg,(Live)); \
+ r(0) = reg[0]; \
+ SWAPIN; \
+ } while (0)
+#else
+#define TestGlobalHeap(Nh, Live, hp) \
+ do { \
+ unsigned need = (Nh); \
+ ASSERT(global_heap <= g_htop && g_htop <= global_hend); \
+ if (g_hend - g_htop < need) { \
+ SWAPOUT; \
+ reg[0] = r(0); \
+ FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \
+ r(0) = reg[0]; \
+ SWAPIN; \
+ } \
+ (hp) = global_htop; \
+ } while (0)
+#endif
+#endif /* HYBRID */
+
+#define Init(N) make_blank(yb(N))
+
+#define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0)
+#define Init3(Y1, Y2, Y3) \
+ do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0)
+
+#define MakeFun(FunP, NumFree) \
+ do { \
+ SWAPOUT; \
+ reg[0] = r(0); \
+ r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \
+ SWAPIN; \
+ } while (0)
+
+
+/*
+ * Check that we haven't used the reductions and jump to function pointed to by
+ * the I register. If we are out of reductions, do a context switch.
+ */
+
+#define DispatchMacro() \
+ do { \
+ Eterm* dis_next; \
+ dis_next = (Eterm *) *I; \
+ CHECK_ARGS(I); \
+ if (FCALLS > 0 || FCALLS > neg_o_reds) { \
+ FCALLS--; \
+ Goto(dis_next); \
+ } else { \
+ goto context_switch; \
+ } \
+ } while (0)
+
+#define DispatchMacroFun() \
+ do { \
+ Eterm* dis_next; \
+ dis_next = (Eterm *) *I; \
+ CHECK_ARGS(I); \
+ if (FCALLS > 0 || FCALLS > neg_o_reds) { \
+ FCALLS--; \
+ Goto(dis_next); \
+ } else { \
+ goto context_switch_fun; \
+ } \
+ } while (0)
+
+#define DispatchMacrox() \
+ do { \
+ if (FCALLS > 0) { \
+ Eterm* dis_next; \
+ SET_I(((Export *) Arg(0))->address); \
+ dis_next = (Eterm *) *I; \
+ FCALLS--; \
+ CHECK_ARGS(I); \
+ Goto(dis_next); \
+ } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \
+ && FCALLS > neg_o_reds) { \
+ goto save_calls1; \
+ } else { \
+ SET_I(((Export *) Arg(0))->address); \
+ CHECK_ARGS(I); \
+ goto context_switch; \
+ } \
+ } while (0)
+
+#ifdef DEBUG
+/*
+ * To simplify breakpoint setting, put the code in one place only and jump to it.
+ */
+# define Dispatch() goto do_dispatch
+# define Dispatchx() goto do_dispatchx
+# define Dispatchfun() goto do_dispatchfun
+#else
+/*
+ * Inline for speed.
+ */
+# define Dispatch() DispatchMacro()
+# define Dispatchx() DispatchMacrox()
+# define Dispatchfun() DispatchMacroFun()
+#endif
+
+#define Self(R) R = c_p->id
+#define Node(R) R = erts_this_node->sysname
+
+#define Arg(N) I[(N)+1]
+#define Next(N) \
+ I += (N) + 1; \
+ ASSERT(VALID_INSTR(*I)); \
+ Goto(*I)
+
+#define PreFetch(N, Dst) do { Dst = (Eterm *) *(I + N + 1); } while (0)
+#define NextPF(N, Dst) \
+ I += N + 1; \
+ ASSERT(VALID_INSTR(Dst)); \
+ Goto(Dst)
+
+#define GetR(pos, tr) \
+ do { \
+ tr = Arg(pos); \
+ switch (beam_reg_tag(tr)) { \
+ case R_REG_DEF: tr = r(0); break; \
+ case X_REG_DEF: tr = xb(x_reg_offset(tr)); break; \
+ case Y_REG_DEF: ASSERT(y_reg_offset(tr) >= 1); tr = yb(y_reg_offset(tr)); break; \
+ } \
+ CHECK_TERM(tr); \
+ } while (0)
+
+#define GetArg1(N, Dst) GetR((N), Dst)
+
+#define GetArg2(N, Dst1, Dst2) \
+ do { \
+ GetR(N, Dst1); \
+ GetR((N)+1, Dst2); \
+ } while (0)
+
+#define PutList(H, T, Dst, Store) \
+ do { \
+ HTOP[0] = (H); HTOP[1] = (T); \
+ Store(make_list(HTOP), Dst); \
+ HTOP += 2; \
+ } while (0)
+
+#define Move(Src, Dst, Store) \
+ do { \
+ Eterm term = (Src); \
+ Store(term, Dst); \
+ } while (0)
+
+#define Move2(src1, dst1, src2, dst2) dst1 = (src1); dst2 = (src2)
+
+#define MoveGenDest(src, dstp) \
+ if ((dstp) == NULL) { r(0) = (src); } else { *(dstp) = src; }
+
+#define MoveReturn(Src, Dest) \
+ (Dest) = (Src); \
+ I = c_p->cp; \
+ ASSERT(VALID_INSTR(*c_p->cp)); \
+ c_p->cp = 0; \
+ CHECK_TERM(r(0)); \
+ Goto(*I)
+
+#define DeallocateReturn(Deallocate) \
+ do { \
+ int words_to_pop = (Deallocate); \
+ SET_I(cp_val(*E)); \
+ E = ADD_BYTE_OFFSET(E, words_to_pop); \
+ CHECK_TERM(r(0)); \
+ Goto(*I); \
+ } while (0)
+
+#define MoveDeallocateReturn(Src, Dest, Deallocate) \
+ (Dest) = (Src); \
+ DeallocateReturn(Deallocate)
+
+#define MoveCall(Src, Dest, CallDest, Size) \
+ (Dest) = (Src); \
+ SET_CP(c_p, I+Size+1); \
+ SET_I((Eterm *) CallDest); \
+ Dispatch();
+
+#define MoveCallLast(Src, Dest, CallDest, Deallocate) \
+ (Dest) = (Src); \
+ RESTORE_CP(E); \
+ E = ADD_BYTE_OFFSET(E, (Deallocate)); \
+ SET_I((Eterm *) CallDest); \
+ Dispatch();
+
+#define MoveCallOnly(Src, Dest, CallDest) \
+ (Dest) = (Src); \
+ SET_I((Eterm *) CallDest); \
+ Dispatch();
+
+#define GetList(Src, H, T) do { \
+ Eterm* tmp_ptr = list_val(Src); \
+ H = CAR(tmp_ptr); \
+ T = CDR(tmp_ptr); } while (0)
+
+#define GetTupleElement(Src, Element, Dest) \
+ do { \
+ tmp_arg1 = (Eterm) (((unsigned char *) tuple_val(Src)) + (Element)); \
+ (Dest) = (*(Eterm *)tmp_arg1); \
+ } while (0)
+
+#define ExtractNextElement(Dest) \
+ tmp_arg1 += sizeof(Eterm); \
+ (Dest) = (* (Eterm *) (((unsigned char *) tmp_arg1)))
+
+#define ExtractNextElement2(Dest) \
+ do { \
+ Eterm* ene_dstp = &(Dest); \
+ ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
+ ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
+ tmp_arg1 += sizeof(Eterm) + sizeof(Eterm); \
+ } while (0)
+
+#define ExtractNextElement3(Dest) \
+ do { \
+ Eterm* ene_dstp = &(Dest); \
+ ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
+ ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
+ ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
+ tmp_arg1 += 3*sizeof(Eterm); \
+ } while (0)
+
+#define ExtractNextElement4(Dest) \
+ do { \
+ Eterm* ene_dstp = &(Dest); \
+ ene_dstp[0] = ((Eterm *) tmp_arg1)[1]; \
+ ene_dstp[1] = ((Eterm *) tmp_arg1)[2]; \
+ ene_dstp[2] = ((Eterm *) tmp_arg1)[3]; \
+ ene_dstp[3] = ((Eterm *) tmp_arg1)[4]; \
+ tmp_arg1 += 4*sizeof(Eterm); \
+ } while (0)
+
+#define ExtractElement(Element, Dest) \
+ do { \
+ tmp_arg1 += (Element); \
+ (Dest) = (* (Eterm *) tmp_arg1); \
+ } while (0)
+
+#define PutTuple(Arity, Src, Dest) \
+ ASSERT(is_arity_value(Arity)); \
+ Dest = make_tuple(HTOP); \
+ HTOP[0] = (Arity); \
+ HTOP[1] = (Src); \
+ HTOP += 2
+
+#define Put(Word) *HTOP++ = (Word)
+
+#define EqualImmed(X, Y, Action) if (X != Y) { Action; }
+
+#define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
+
+#define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; }
+
+#define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; }
+
+#define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; }
+
+#define IsIntegerAllocate(Src, Need, Alive, Fail) \
+ if (is_not_integer(Src)) { Fail; } \
+ A(Need, Alive)
+
+#define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; }
+
+#define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; }
+
+#define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; }
+
+#define IsNonemptyListAllocate(Src, Need, Alive, Fail) \
+ if (is_not_list(Src)) { Fail; } \
+ A(Need, Alive)
+
+#define IsNonemptyListTestHeap(Src, Need, Alive, Fail) \
+ if (is_not_list(Src)) { Fail; } \
+ TestHeap(Need, Alive)
+
+#define IsTuple(X, Action) if (is_not_tuple(X)) Action
+
+#define IsArity(Pointer, Arity, Fail) \
+ if (*(Eterm *)(tmp_arg1 = (Eterm)tuple_val(Pointer)) != (Arity)) { Fail; }
+
+#define IsFunction(X, Action) \
+ do { \
+ if ( !(is_any_fun(X)) ) { \
+ Action; \
+ } \
+ } while (0)
+
+#define IsFunction2(F, A, Action) \
+ do { \
+ if (is_function_2(c_p, F, A) != am_true ) {\
+ Action; \
+ } \
+ } while (0)
+
+#define IsTupleOfArity(Src, Arity, Fail) \
+ do { \
+ if (is_not_tuple(Src) || *(Eterm *)(tmp_arg1 = (Eterm) tuple_val(Src)) != Arity) { \
+ Fail; \
+ } \
+ } while (0)
+
+#define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; }
+
+#define IsBinary(Src, Fail) \
+ if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; }
+
+#define IsBitstring(Src, Fail) \
+ if (is_not_binary(Src)) { Fail; }
+
+#ifdef ARCH_64
+#define BsSafeMul(A, B, Fail, Target) \
+ do { Uint64 _res = (A) * (B); \
+ if (_res / B != A) { Fail; } \
+ Target = _res; \
+ } while (0)
+#else
+#define BsSafeMul(A, B, Fail, Target) \
+ do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \
+ if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \
+ Target = _res; \
+ } while (0)
+#endif
+
+#define BsGetFieldSize(Bits, Unit, Fail, Target) \
+ do { \
+ Sint _signed_size; Uint _uint_size; \
+ if (is_small(Bits)) { \
+ _signed_size = signed_val(Bits); \
+ if (_signed_size < 0) { Fail; } \
+ _uint_size = (Uint) _signed_size; \
+ } else { \
+ if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
+ _uint_size = temp_bits; \
+ } \
+ BsSafeMul(_uint_size, Unit, Fail, Target); \
+ } while (0)
+
+#define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \
+ do { \
+ Sint _signed_size; Uint _uint_size; \
+ if (is_small(Bits)) { \
+ _signed_size = signed_val(Bits); \
+ if (_signed_size < 0) { Fail; } \
+ _uint_size = (Uint) _signed_size; \
+ } else { \
+ if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \
+ _uint_size = (Uint) temp_bits; \
+ } \
+ Target = _uint_size * Unit; \
+ } while (0)
+
+#define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
+ do { \
+ ErlBinMatchBuffer *_mb; \
+ Eterm _result; Sint _size; \
+ if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \
+ _size *= ((Flags) >> 3); \
+ TestHeap(FLOAT_SIZE_OBJECT, Live); \
+ _mb = ms_matchbuffer(Ms); \
+ LIGHT_SWAPOUT; \
+ _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \
+ LIGHT_SWAPIN; \
+ if (is_non_value(_result)) { Fail; } \
+ else { Store(_result, Dst); } \
+ } while (0)
+
+#define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
+ do { \
+ ErlBinMatchBuffer *_mb; \
+ Eterm _result; \
+ TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \
+ _mb = ms_matchbuffer(Ms); \
+ LIGHT_SWAPOUT; \
+ _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \
+ LIGHT_SWAPIN; \
+ if (is_non_value(_result)) { Fail; } \
+ else { Store(_result, Dst); } \
+ } while (0)
+
+#define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Store, Fail) \
+ do { \
+ ErlBinMatchBuffer *_mb; \
+ Eterm _result; Uint _size; \
+ BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \
+ TestHeap(ERL_SUB_BIN_SIZE, Live); \
+ _mb = ms_matchbuffer(Ms); \
+ LIGHT_SWAPOUT; \
+ _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \
+ LIGHT_SWAPIN; \
+ if (is_non_value(_result)) { Fail; } \
+ else { Store(_result, Dst); } \
+ } while (0)
+
+#define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Store, Fail) \
+ do { \
+ ErlBinMatchBuffer *_mb; \
+ Eterm _result; \
+ TestHeap(ERL_SUB_BIN_SIZE, Live); \
+ _mb = ms_matchbuffer(Ms); \
+ if (((_mb->size - _mb->offset) % Unit) == 0) { \
+ LIGHT_SWAPOUT; \
+ _result = erts_bs_get_binary_all_2(c_p, _mb); \
+ LIGHT_SWAPIN; \
+ ASSERT(is_value(_result)); \
+ Store(_result, Dst); \
+ } else { Fail; } \
+ } while (0)
+
+#define BsSkipBits2(Ms, Bits, Unit, Fail) \
+ do { \
+ ErlBinMatchBuffer *_mb; \
+ size_t new_offset; \
+ Uint _size; \
+ _mb = ms_matchbuffer(Ms); \
+ BsGetFieldSize(Bits, Unit, Fail, _size); \
+ new_offset = _mb->offset + _size; \
+ if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
+ else { Fail; } \
+ } while (0)
+
+#define BsSkipBitsAll2(Ms, Unit, Fail) \
+ do { \
+ ErlBinMatchBuffer *_mb; \
+ _mb = ms_matchbuffer(Ms); \
+ if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \
+ else { Fail; } \
+ } while (0)
+
+#define BsSkipBitsImm2(Ms, Bits, Fail) \
+ do { \
+ ErlBinMatchBuffer *_mb; \
+ size_t new_offset; \
+ _mb = ms_matchbuffer(Ms); \
+ new_offset = _mb->offset + (Bits); \
+ if (new_offset <= _mb->size) { _mb->offset = new_offset; } \
+ else { Fail; } \
+ } while (0)
+
+#define NewBsPutIntegerImm(Sz, Flags, Src) \
+ do { \
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \
+ } while (0)
+
+#define NewBsPutInteger(Sz, Flags, Src) \
+ do { \
+ Sint _size; \
+ BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
+ if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \
+ { goto badarg; } \
+ } while (0)
+
+#define NewBsPutFloatImm(Sz, Flags, Src) \
+ do { \
+ if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \
+ } while (0)
+
+#define NewBsPutFloat(Sz, Flags, Src) \
+ do { \
+ Sint _size; \
+ BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
+ if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \
+ } while (0)
+
+#define NewBsPutBinary(Sz, Flags, Src) \
+ do { \
+ Sint _size; \
+ BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \
+ if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \
+ } while (0)
+
+#define NewBsPutBinaryImm(Sz, Src) \
+ do { \
+ if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \
+ } while (0)
+
+#define NewBsPutBinaryAll(Src, Unit) \
+ do { \
+ if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \
+ } while (0)
+
+
+#define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; }
+#define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
+#define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
+
+static BifFunction translate_gc_bif(void* gcf);
+static Eterm* handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf);
+static Eterm* next_catch(Process* c_p, Eterm *reg);
+static void terminate_proc(Process* c_p, Eterm Value);
+static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
+static void save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg,
+ BifFunction bf, Eterm args);
+static struct StackTrace * get_trace_from_exc(Eterm exc);
+static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
+static Eterm call_error_handler(Process* p, Eterm* ip, Eterm* reg);
+static Eterm call_breakpoint_handler(Process* p, Eterm* fi, Eterm* reg);
+static Uint* fixed_apply(Process* p, Eterm* reg, Uint arity);
+static Eterm* apply(Process* p, Eterm module, Eterm function,
+ Eterm args, Eterm* reg);
+static int hibernate(Process* c_p, Eterm module, Eterm function,
+ Eterm args, Eterm* reg);
+static Eterm* call_fun(Process* p, int arity, Eterm* reg, Eterm args);
+static Eterm* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg);
+static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free);
+static BIF_RETTYPE nif_dispatcher_0(Process* p, Uint* I);
+static BIF_RETTYPE nif_dispatcher_1(Process* p, Eterm arg1, Uint* I);
+static BIF_RETTYPE nif_dispatcher_2(Process* p, Eterm arg1, Eterm arg2, Uint* I);
+static BIF_RETTYPE nif_dispatcher_3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3, Uint* I);
+
+#if defined(_OSE_) || defined(VXWORKS)
+static int init_done;
+#endif
+
+void
+init_emulator(void)
+{
+#if defined(_OSE_) || defined(VXWORKS)
+ init_done = 0;
+#endif
+ process_main();
+}
+
+/*
+ * On certain platforms, make sure that the main variables really are placed
+ * in registers.
+ */
+
+#if defined(__GNUC__) && defined(sparc) && !defined(DEBUG)
+# define REG_x0 asm("%l0")
+# define REG_xregs asm("%l1")
+# define REG_htop asm("%l2")
+# define REG_stop asm("%l3")
+# define REG_I asm("%l4")
+# define REG_fcalls asm("%l5")
+# define REG_tmp_arg1 asm("%l6")
+# define REG_tmp_arg2 asm("%l7")
+#else
+# define REG_x0
+# define REG_xregs
+# define REG_htop
+# define REG_stop
+# define REG_I
+# define REG_fcalls
+# define REG_tmp_arg1
+# define REG_tmp_arg2
+#endif
+
+/*
+ * process_main() is called twice:
+ * The first call performs some initialisation, including exporting
+ * the instructions' C labels to the loader.
+ * The second call starts execution of BEAM code. This call never returns.
+ */
+void process_main(void)
+{
+#if !defined(_OSE_) && !defined(VXWORKS)
+ static int init_done = 0;
+#endif
+ Process* c_p = NULL;
+ int reds_used;
+#ifdef DEBUG
+ Eterm pid;
+#endif
+
+ /*
+ * X register zero; also called r(0)
+ */
+ register Eterm x0 REG_x0 = NIL;
+
+ /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
+ * in all other cases x0 is used.
+ */
+ register Eterm* reg REG_xregs = NULL;
+
+ /*
+ * Top of heap (next free location); grows upwards.
+ */
+ register Eterm* HTOP REG_htop = NULL;
+
+
+#ifdef HYBRID
+ Eterm *g_htop;
+ Eterm *g_hend;
+#endif
+
+ /* Stack pointer. Grows downwards; points
+ * to last item pushed (normally a saved
+ * continuation pointer).
+ */
+ register Eterm* E REG_stop = NULL;
+
+ /*
+ * Pointer to next threaded instruction.
+ */
+ register Eterm *I REG_I = NULL;
+
+ /* Number of reductions left. This function
+ * returns to the scheduler when FCALLS reaches zero.
+ */
+ register Sint FCALLS REG_fcalls = 0;
+
+ /*
+ * Temporaries used for picking up arguments for instructions.
+ */
+ register Eterm tmp_arg1 REG_tmp_arg1 = NIL;
+ register Eterm tmp_arg2 REG_tmp_arg2 = NIL;
+ Eterm tmp_big[2]; /* Temporary buffer for small bignums. */
+
+#ifndef ERTS_SMP
+ static Eterm save_reg[ERTS_X_REGS_ALLOCATED];
+ /* X registers -- not used directly, but
+ * through 'reg', because using it directly
+ * needs two instructions on a SPARC,
+ * while using it through reg needs only
+ * one.
+ */
+
+ /*
+ * Floating point registers.
+ */
+ static FloatDef freg[MAX_REG];
+#else
+ /* X regisers and floating point registers are located in
+ * scheduler specific data.
+ */
+ register FloatDef *freg;
+#endif
+
+ /*
+ * For keeping the negative old value of 'reds' when call saving is active.
+ */
+ int neg_o_reds = 0;
+
+ Eterm (*arith_func)(Process* p, Eterm* reg, Uint live);
+
+#ifndef NO_JUMP_TABLE
+ static void* opcodes[] = { DEFINE_OPCODES };
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+ static void* counting_opcodes[] = { DEFINE_COUNTING_OPCODES };
+#endif
+#else
+ int Go;
+#endif
+
+ Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
+
+ ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
+
+
+ /*
+ * Note: In this function, we attempt to place rarely executed code towards
+ * the end of the function, in the hope that the cache hit rate will be better.
+ * The initialization code is only run once, so it is at the very end.
+ *
+ * Note: c_p->arity must be set to reflect the number of useful terms in
+ * c_p->arg_reg before calling the scheduler.
+ */
+
+ if (!init_done) {
+ init_done = 1;
+ goto init_emulator;
+ }
+#ifndef ERTS_SMP
+ reg = save_reg; /* XXX: probably wastes a register on x86 */
+#endif
+ c_p = NULL;
+ reds_used = 0;
+ goto do_schedule1;
+
+ do_schedule:
+ reds_used = REDS_IN(c_p) - FCALLS;
+ do_schedule1:
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ c_p = schedule(c_p, reds_used);
+#ifdef DEBUG
+ pid = c_p->id;
+#endif
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+#ifdef ERTS_SMP
+ reg = c_p->scheduler_data->save_reg;
+ freg = c_p->scheduler_data->freg;
+#endif
+ ERL_BITS_RELOAD_STATEP(c_p);
+ {
+ int reds;
+ Eterm* argp;
+ Eterm* next;
+ int i;
+
+ argp = c_p->arg_reg;
+ for (i = c_p->arity - 1; i > 0; i--) {
+ reg[i] = argp[i];
+ CHECK_TERM(reg[i]);
+ }
+
+ /*
+ * We put the original reduction count in the process structure, to reduce
+ * the code size (referencing a field in a struct through a pointer stored
+ * in a register gives smaller code than referencing a global variable).
+ */
+
+ SET_I(c_p->i);
+
+ reds = c_p->fcalls;
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
+ && (c_p->trace_flags & F_SENSITIVE) == 0) {
+ neg_o_reds = -reds;
+ FCALLS = REDS_IN(c_p) = 0;
+ } else {
+ neg_o_reds = 0;
+ FCALLS = REDS_IN(c_p) = reds;
+ }
+
+ next = (Eterm *) *I;
+ r(0) = c_p->arg_reg[0];
+#ifdef HARDDEBUG
+ if (c_p->arity > 0) {
+ CHECK_TERM(r(0));
+ }
+#endif
+ SWAPIN;
+ ASSERT(VALID_INSTR(next));
+ Goto(next);
+ }
+
+#if defined(DEBUG) || defined(NO_JUMP_TABLE)
+ emulator_loop:
+#endif
+
+#ifdef NO_JUMP_TABLE
+ switch (Go) {
+#endif
+#include "beam_hot.h"
+
+#define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
+#define ARITH_FUNC(name) erts_gc_##name
+
+ OpCase(i_plus_jId):
+ {
+ Eterm result;
+
+ if (is_both_small(tmp_arg1, tmp_arg2)) {
+ Sint i = signed_val(tmp_arg1) + signed_val(tmp_arg2);
+ ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
+ if (MY_IS_SSMALL(i)) {
+ result = make_small(i);
+ STORE_ARITH_RESULT(result);
+ }
+
+ }
+ arith_func = ARITH_FUNC(mixed_plus);
+ goto do_big_arith2;
+ }
+
+ OpCase(i_minus_jId):
+ {
+ Eterm result;
+
+ if (is_both_small(tmp_arg1, tmp_arg2)) {
+ Sint i = signed_val(tmp_arg1) - signed_val(tmp_arg2);
+ ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
+ if (MY_IS_SSMALL(i)) {
+ result = make_small(i);
+ STORE_ARITH_RESULT(result);
+ }
+ }
+ arith_func = ARITH_FUNC(mixed_minus);
+ goto do_big_arith2;
+ }
+
+ OpCase(i_is_lt_f):
+ if (CMP_GE(tmp_arg1, tmp_arg2)) {
+ ClauseFail();
+ }
+ Next(1);
+
+ OpCase(i_is_ge_f):
+ if (CMP_LT(tmp_arg1, tmp_arg2)) {
+ ClauseFail();
+ }
+ Next(1);
+
+ OpCase(i_is_eq_f):
+ if (CMP_NE(tmp_arg1, tmp_arg2)) {
+ ClauseFail();
+ }
+ Next(1);
+
+ OpCase(i_is_ne_f):
+ if (CMP_EQ(tmp_arg1, tmp_arg2)) {
+ ClauseFail();
+ }
+ Next(1);
+
+ OpCase(i_is_eq_exact_f):
+ if (!EQ(tmp_arg1, tmp_arg2)) {
+ ClauseFail();
+ }
+ Next(1);
+
+ OpCase(i_move_call_only_fcr): {
+ r(0) = Arg(1);
+ }
+ /* FALL THROUGH */
+ OpCase(i_call_only_f): {
+ SET_I((Eterm *) Arg(0));
+ Dispatch();
+ }
+
+ OpCase(i_move_call_last_fPcr): {
+ r(0) = Arg(2);
+ }
+ /* FALL THROUGH */
+ OpCase(i_call_last_fP): {
+ RESTORE_CP(E);
+ E = ADD_BYTE_OFFSET(E, Arg(1));
+ SET_I((Eterm *) Arg(0));
+ Dispatch();
+ }
+
+ OpCase(i_move_call_crf): {
+ r(0) = Arg(0);
+ I++;
+ }
+ /* FALL THROUGH */
+ OpCase(i_call_f): {
+ SET_CP(c_p, I+2);
+ SET_I((Eterm *) Arg(0));
+ Dispatch();
+ }
+
+ OpCase(i_move_call_ext_last_ePcr): {
+ r(0) = Arg(2);
+ }
+ /* FALL THROUGH */
+ OpCase(i_call_ext_last_eP):
+ RESTORE_CP(E);
+ E = ADD_BYTE_OFFSET(E, Arg(1));
+
+ /*
+ * Note: The pointer to the export entry is never NULL; if the module
+ * is not loaded, it points to code which will invoke the error handler
+ * (see lb_call_error_handler below).
+ */
+ Dispatchx();
+
+ OpCase(i_move_call_ext_cre): {
+ r(0) = Arg(0);
+ I++;
+ }
+ /* FALL THROUGH */
+ OpCase(i_call_ext_e):
+ SET_CP(c_p, I+2);
+ Dispatchx();
+
+ OpCase(i_move_call_ext_only_ecr): {
+ r(0) = Arg(1);
+ }
+ /* FALL THROUGH */
+ OpCase(i_call_ext_only_e):
+ Dispatchx();
+
+ OpCase(init_y): {
+ Eterm* next;
+
+ PreFetch(1, next);
+ make_blank(yb(Arg(0)));
+ NextPF(1, next);
+ }
+
+ OpCase(i_trim_I): {
+ Eterm* next;
+ Uint words;
+ Uint cp;
+
+ words = Arg(0);
+ cp = E[0];
+ PreFetch(1, next);
+ E += words;
+ E[0] = cp;
+ NextPF(1, next);
+ }
+
+ OpCase(return): {
+ SET_I(c_p->cp);
+ /*
+ * We must clear the CP to make sure that a stale value do not
+ * create a false module dependcy preventing code upgrading.
+ * It also means that we can use the CP in stack backtraces.
+ */
+ c_p->cp = 0;
+ CHECK_TERM(r(0));
+ Goto(*I);
+ }
+
+ OpCase(test_heap_1_put_list_Iy): {
+ Eterm* next;
+
+ PreFetch(2, next);
+ TestHeap(Arg(0), 1);
+ PutList(yb(Arg(1)), r(0), r(0), StoreSimpleDest);
+ CHECK_TERM(r(0));
+ NextPF(2, next);
+ }
+
+ OpCase(put_string_IId):
+ {
+ unsigned char* s;
+ int len;
+ Eterm result;
+
+ len = Arg(0); /* Length. */
+ result = NIL;
+ for (s = (unsigned char *) Arg(1); len > 0; s--, len--) {
+ PutList(make_small(*s), result, result, StoreSimpleDest);
+ }
+ StoreBifResult(2, result);
+ }
+
+ /*
+ * Send is almost a standard call-BIF with two arguments, except for:
+ * 1) It cannot be traced.
+ * 2) There is no pointer to the send_2 function stored in
+ * the instruction.
+ */
+
+ OpCase(send): {
+ Eterm* next;
+ Eterm result;
+
+ PRE_BIF_SWAPOUT(c_p);
+ c_p->fcalls = FCALLS - 1;
+ result = send_2(c_p, r(0), x(1));
+ PreFetch(0, next);
+ POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ r(0) = result;
+ CHECK_TERM(r(0));
+ NextPF(0, next);
+ } else if (c_p->freason == TRAP) {
+ SET_CP(c_p, I+1);
+ SET_I((Eterm *) c_p->def_arg_reg[3]);
+ SWAPIN;
+ r(0) = c_p->def_arg_reg[0];
+ x(1) = c_p->def_arg_reg[1];
+ Dispatch();
+ }
+ goto find_func_info;
+ }
+
+ OpCase(i_element_jssd): {
+ Eterm index;
+ Eterm tuple;
+
+ /*
+ * Inlined version of element/2 for speed.
+ */
+ GetArg2(1, index, tuple);
+ if (is_small(index) && is_tuple(tuple)) {
+ Eterm* tp = tuple_val(tuple);
+
+ if ((signed_val(index) >= 1) &&
+ (signed_val(index) <= arityval(*tp))) {
+ Eterm result = tp[signed_val(index)];
+ StoreBifResult(3, result);
+ }
+ }
+ }
+ /* Fall through */
+
+ OpCase(badarg_j):
+ badarg:
+ c_p->freason = BADARG;
+ goto lb_Cl_error;
+
+ OpCase(i_fast_element_jIsd): {
+ Eterm tuple;
+
+ /*
+ * Inlined version of element/2 for even more speed.
+ * The first argument is an untagged integer >= 1.
+ * The second argument is guaranteed to be a register operand.
+ */
+ GetArg1(2, tuple);
+ if (is_tuple(tuple)) {
+ Eterm* tp = tuple_val(tuple);
+ tmp_arg2 = Arg(1);
+ if (tmp_arg2 <= arityval(*tp)) {
+ Eterm result = tp[tmp_arg2];
+ StoreBifResult(3, result);
+ }
+ }
+ goto badarg;
+ }
+
+ OpCase(catch_yf):
+ c_p->catches++;
+ yb(Arg(0)) = Arg(1);
+ Next(2);
+
+ OpCase(catch_end_y): {
+ c_p->catches--;
+ make_blank(yb(Arg(0)));
+ if (is_non_value(r(0))) {
+ if (x(1) == am_throw) {
+ r(0) = x(2);
+ } else {
+ if (x(1) == am_error) {
+ SWAPOUT;
+ x(2) = add_stacktrace(c_p, x(2), x(3));
+ SWAPIN;
+ }
+ /* only x(2) is included in the rootset here */
+ if (E - HTOP < 3 || c_p->mbuf) { /* Force GC in case add_stacktrace()
+ * created heap fragments */
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect(c_p, 3, reg+2, 1);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ }
+ r(0) = TUPLE2(HTOP, am_EXIT, x(2));
+ HTOP += 3;
+ }
+ }
+ CHECK_TERM(r(0));
+ Next(1);
+ }
+
+ OpCase(try_end_y): {
+ c_p->catches--;
+ make_blank(yb(Arg(0)));
+ if (is_non_value(r(0))) {
+ r(0) = x(1);
+ x(1) = x(2);
+ x(2) = x(3);
+ }
+ Next(1);
+ }
+
+ /*
+ * Skeleton for receive statement:
+ *
+ * L1: <-------------------+
+ * <-----------+ |
+ * | |
+ * loop_rec L2 ------+---+ |
+ * ... | | |
+ * remove_message | | |
+ * jump L3 | | |
+ * ... | | |
+ * loop_rec_end L1 --+ | |
+ * L2: <---------------+ |
+ * wait L1 -----------------+ or wait_timeout
+ * timeout
+ *
+ * L3: Code after receive...
+ *
+ *
+ */
+
+ /*
+ * Pick up the next message and place it in x(0).
+ * If no message, jump to a wait or wait_timeout instruction.
+ */
+ OpCase(i_loop_rec_fr):
+ {
+ Eterm* next;
+ ErlMessage* msgp;
+
+ loop_rec__:
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ msgp = PEEK_MESSAGE(c_p);
+
+ if (!msgp) {
+#ifdef ERTS_SMP
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ /* Make sure messages wont pass exit signals... */
+ if (ERTS_PROC_PENDING_EXIT(c_p)) {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ SWAPOUT;
+ goto do_schedule; /* Will be rescheduled for exit */
+ }
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ msgp = PEEK_MESSAGE(c_p);
+ if (msgp)
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ else {
+#endif
+ SET_I((Eterm *) Arg(0));
+ Goto(*I); /* Jump to a wait or wait_timeout instruction */
+#ifdef ERTS_SMP
+ }
+#endif
+ }
+ ErtsMoveMsgAttachmentIntoProc(msgp, c_p, E, HTOP, FCALLS,
+ {
+ SWAPOUT;
+ reg[0] = r(0);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ },
+ {
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ r(0) = reg[0];
+ SWAPIN;
+ });
+ if (is_non_value(ERL_MESSAGE_TERM(msgp))) {
+ /*
+ * A corrupt distribution message that we weren't able to decode;
+ * remove it...
+ */
+ ASSERT(!msgp->data.attached);
+ UNLINK_MESSAGE(c_p, msgp);
+ free_message(msgp);
+ goto loop_rec__;
+ }
+ PreFetch(1, next);
+ r(0) = ERL_MESSAGE_TERM(msgp);
+ NextPF(1, next);
+ }
+
+ /*
+ * Remove a (matched) message from the message queue.
+ */
+ OpCase(remove_message): {
+ Eterm* next;
+ ErlMessage* msgp;
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ PreFetch(0, next);
+ msgp = PEEK_MESSAGE(c_p);
+
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
+ save_calls(c_p, &exp_receive);
+ }
+ if (ERL_MESSAGE_TOKEN(msgp) == NIL) {
+ SEQ_TRACE_TOKEN(c_p) = NIL;
+ } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) {
+ Eterm msg;
+ SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp);
+ ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p)));
+ ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
+ ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p)));
+ ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p)));
+ ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p)));
+ ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p)));
+ c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
+ if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) {
+ c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p));
+ }
+ msg = ERL_MESSAGE_TERM(msgp);
+ seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
+ c_p->id, c_p);
+ }
+ UNLINK_MESSAGE(c_p, msgp);
+ JOIN_MESSAGE(c_p);
+ CANCEL_TIMER(c_p);
+ free_message(msgp);
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ NextPF(0, next);
+ }
+
+ /*
+ * Advance the save pointer to the next message (the current
+ * message didn't match), then jump to the loop_rec instruction.
+ */
+ OpCase(loop_rec_end_f): {
+ SET_I((Eterm *) Arg(0));
+ SAVE_MESSAGE(c_p);
+ goto loop_rec__;
+ }
+ /*
+ * Prepare to wait for a message or a timeout, whichever occurs first.
+ *
+ * Note: In order to keep the compatibility between 32 and 64 bits
+ * emulators, only timeout values that can be represented in 32 bits
+ * (unsigned) or less are allowed.
+ */
+
+
+ OpCase(i_wait_timeout_fs): {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+
+ /* Fall through */
+ }
+ OpCase(i_wait_timeout_locked_fs): {
+ Eterm timeout_value;
+
+ /*
+ * If we have already set the timer, we must NOT set it again. Therefore,
+ * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
+ */
+ if (c_p->flags & (F_INSLPQUEUE | F_TIMO)) {
+ goto wait2;
+ }
+ GetArg1(1, timeout_value);
+ if (timeout_value != make_small(0)) {
+#if !defined(ARCH_64)
+ Uint time_val;
+#endif
+
+ if (is_small(timeout_value) && signed_val(timeout_value) > 0 &&
+#if defined(ARCH_64)
+ ((unsigned_val(timeout_value) >> 32) == 0)
+#else
+ 1
+#endif
+ ) {
+ /*
+ * The timer routiner will set c_p->i to the value in
+ * c_p->def_arg_reg[0]. Note that it is safe to use this
+ * location because there are no living x registers in
+ * a receive statement.
+ */
+ c_p->def_arg_reg[0] = (Eterm) (I+3);
+ set_timer(c_p, unsigned_val(timeout_value));
+ } else if (timeout_value == am_infinity) {
+ c_p->flags |= F_TIMO;
+#if !defined(ARCH_64)
+ } else if (term_to_Uint(timeout_value, &time_val)) {
+ c_p->def_arg_reg[0] = (Eterm) (I+3);
+ set_timer(c_p, time_val);
+#endif
+ } else { /* Wrong time */
+ OpCase(i_wait_error_locked): {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ /* Fall through */
+ }
+ OpCase(i_wait_error): {
+ c_p->freason = EXC_TIMEOUT_VALUE;
+ goto find_func_info;
+ }
+ }
+
+ /*
+ * Prepare to wait indefinitely for a new message to arrive
+ * (or the time set above if falling through from above).
+ *
+ * When a new message arrives, control will be transferred
+ * the loop_rec instruction (at label L1). In case of
+ * of timeout, control will be transferred to the timeout
+ * instruction following the wait_timeout instruction.
+ */
+
+ OpCase(wait_locked_f):
+ OpCase(wait_f):
+
+ wait2: {
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ c_p->i = (Eterm *) Arg(0); /* L1 */
+ SWAPOUT;
+ c_p->arity = 0;
+ c_p->status = P_WAITING;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ c_p->current = NULL;
+ goto do_schedule;
+ }
+ OpCase(wait_unlocked_f): {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ goto wait2;
+ }
+ }
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ Next(2);
+ }
+
+ OpCase(i_wait_timeout_fI): {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ }
+
+ OpCase(i_wait_timeout_locked_fI):
+ {
+ /*
+ * If we have already set the timer, we must NOT set it again. Therefore,
+ * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag.
+ */
+ if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
+ c_p->def_arg_reg[0] = (Eterm) (I+3);
+ set_timer(c_p, Arg(1));
+ }
+ goto wait2;
+ }
+
+ /*
+ * A timeout has occurred. Reset the save pointer so that the next
+ * receive statement will examine the first message first.
+ */
+ OpCase(timeout_locked): {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ }
+
+ OpCase(timeout): {
+ Eterm* next;
+
+ PreFetch(0, next);
+ if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
+ trace_receive(c_p, am_timeout);
+ }
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
+ save_calls(c_p, &exp_timeout);
+ }
+ c_p->flags &= ~F_TIMO;
+ JOIN_MESSAGE(c_p);
+ NextPF(0, next);
+ }
+
+ OpCase(i_select_val_sfI):
+ GetArg1(0, tmp_arg1);
+
+ do_binary_search:
+ {
+ struct Pairs {
+ Eterm val;
+ Eterm* addr;
+ };
+ struct Pairs* low;
+ struct Pairs* high;
+ struct Pairs* mid;
+ int bdiff; /* int not long because the arrays aren't that large */
+
+ low = (struct Pairs *) &Arg(3);
+ high = low + Arg(2);
+
+ /* The pointer subtraction (high-low) below must produce
+ * a signed result, because high could be < low. That
+ * requires the compiler to insert quite a bit of code.
+ *
+ * However, high will be > low so the result will be
+ * positive. We can use that knowledge to optimise the
+ * entire sequence, from the initial comparison to the
+ * computation of mid.
+ *
+ * -- Mikael Pettersson, Acumem AB
+ *
+ * Original loop control code:
+ *
+ * while (low < high) {
+ * mid = low + (high-low) / 2;
+ *
+ */
+ while ((bdiff = (int)((char*)high - (char*)low)) > 0) {
+ unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Pairs)-1);
+
+ mid = (struct Pairs*)((char*)low + boffset);
+ if (tmp_arg1 < mid->val) {
+ high = mid;
+ } else if (tmp_arg1 > mid->val) {
+ low = mid + 1;
+ } else {
+ SET_I(mid->addr);
+ Goto(*I);
+ }
+ }
+ SET_I((Eterm *) Arg(1));
+ Goto(*I);
+ }
+
+ OpCase(i_jump_on_val_zero_sfI):
+ {
+ Eterm index;
+
+ GetArg1(0, index);
+ if (is_small(index)) {
+ index = signed_val(index);
+ if (index < Arg(2)) {
+ SET_I((Eterm *) (&Arg(3))[index]);
+ Goto(*I);
+ }
+ }
+ SET_I((Eterm *) Arg(1));
+ Goto(*I);
+ }
+
+ OpCase(i_jump_on_val_sfII):
+ {
+ Eterm index;
+
+ GetArg1(0, index);
+ if (is_small(index)) {
+ index = (Uint) (signed_val(index) - Arg(3));
+ if (index < Arg(2)) {
+ SET_I((Eterm *) (&Arg(4))[index]);
+ Goto(*I);
+ }
+ }
+ SET_I((Eterm *) Arg(1));
+ Goto(*I);
+ }
+
+ /*
+ * All guards with zero arguments have special instructions:
+ * self/0
+ * node/0
+ *
+ * All other guard BIFs take one or two arguments.
+ */
+
+ /*
+ * Guard BIF in head. On failure, ignore the error and jump
+ * to the code for the next clause. We don't support tracing
+ * of guard BIFs.
+ */
+
+ OpCase(bif1_fbsd):
+ {
+ Eterm (*bf)(Process*, Eterm);
+ Eterm arg;
+ Eterm result;
+
+ GetArg1(2, arg);
+ bf = (BifFunction) Arg(1);
+ c_p->fcalls = FCALLS;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ result = (*bf)(c_p, arg);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ StoreBifResult(3, result);
+ }
+ SET_I((Eterm *) Arg(0));
+ Goto(*I);
+ }
+
+ /*
+ * Guard BIF in body. It can fail like any BIF. No trace support.
+ */
+
+ OpCase(bif1_body_bsd):
+ {
+ Eterm (*bf)(Process*, Eterm);
+
+ Eterm arg;
+ Eterm result;
+
+ GetArg1(1, arg);
+ bf = (BifFunction) Arg(0);
+ c_p->fcalls = FCALLS;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ result = (*bf)(c_p, arg);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ StoreBifResult(2, result);
+ }
+ reg[0] = arg;
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, bf);
+ goto post_error_handling;
+ }
+
+ OpCase(i_gc_bif1_jIsId):
+ {
+ typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint);
+ GcBifFunction bf;
+ Eterm arg;
+ Eterm result;
+ Uint live = Arg(3);
+
+ GetArg1(2, arg);
+ reg[0] = r(0);
+ reg[live] = arg;
+ bf = (GcBifFunction) Arg(1);
+ c_p->fcalls = FCALLS;
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ result = (*bf)(c_p, reg, live);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ SWAPIN;
+ r(0) = reg[0];
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ StoreBifResult(4, result);
+ }
+ if (Arg(0) != 0) {
+ SET_I((Eterm *) Arg(0));
+ Goto(*I);
+ }
+ reg[0] = arg;
+ I = handle_error(c_p, I, reg, translate_gc_bif((void *) bf));
+ goto post_error_handling;
+ }
+
+ /*
+ * Guards bifs and, or, xor in guards.
+ */
+ OpCase(i_bif2_fbd):
+ {
+ Eterm (*bf)(Process*, Eterm, Eterm);
+ Eterm result;
+
+ bf = (BifFunction) Arg(1);
+ c_p->fcalls = FCALLS;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ result = (*bf)(c_p, tmp_arg1, tmp_arg2);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ StoreBifResult(2, result);
+ }
+ SET_I((Eterm *) Arg(0));
+ Goto(*I);
+ }
+
+ /*
+ * Guards bifs and, or, xor, relational operators in body.
+ */
+ OpCase(i_bif2_body_bd):
+ {
+ Eterm (*bf)(Process*, Eterm, Eterm);
+ Eterm result;
+
+ bf = (BifFunction) Arg(0);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ result = (*bf)(c_p, tmp_arg1, tmp_arg2);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ if (is_value(result)) {
+ ASSERT(!is_CP(result));
+ StoreBifResult(1, result);
+ }
+ reg[0] = tmp_arg1;
+ reg[1] = tmp_arg2;
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, bf);
+ goto post_error_handling;
+ }
+
+ /*
+ * The most general BIF call. The BIF may build any amount of data
+ * on the heap. The result is always returned in r(0).
+ */
+ OpCase(call_bif0_e):
+ {
+ Eterm (*bf)(Process*, Uint*) = GET_BIF_ADDRESS(Arg(0));
+
+ PRE_BIF_SWAPOUT(c_p);
+ c_p->fcalls = FCALLS - 1;
+ if (FCALLS <= 0) {
+ save_calls(c_p, (Export *) Arg(0));
+ }
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ r(0) = (*bf)(c_p, I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(r(0)));
+ ERTS_HOLE_CHECK(c_p);
+ POST_BIF_GC_SWAPIN_0(c_p, r(0));
+ FCALLS = c_p->fcalls;
+ if (is_value(r(0))) {
+ CHECK_TERM(r(0));
+ Next(1);
+ }
+ else if (c_p->freason == TRAP) {
+ goto call_bif_trap3;
+ }
+
+ /*
+ * Error handling. SWAPOUT is not needed because it was done above.
+ */
+ ASSERT(c_p->stop == E);
+ reg[0] = r(0);
+ I = handle_error(c_p, I, reg, bf);
+ goto post_error_handling;
+ }
+
+ OpCase(call_bif1_e):
+ {
+ Eterm (*bf)(Process*, Eterm, Uint*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm result;
+ Eterm* next;
+
+ c_p->fcalls = FCALLS - 1;
+ if (FCALLS <= 0) {
+ save_calls(c_p, (Export *) Arg(0));
+ }
+ PreFetch(1, next);
+ PRE_BIF_SWAPOUT(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ result = (*bf)(c_p, r(0), I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_HOLE_CHECK(c_p);
+ POST_BIF_GC_SWAPIN(c_p, result, reg, 1);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ r(0) = result;
+ CHECK_TERM(r(0));
+ NextPF(1, next);
+ } else if (c_p->freason == TRAP) {
+ goto call_bif_trap3;
+ }
+
+ /*
+ * Error handling. SWAPOUT is not needed because it was done above.
+ */
+ ASSERT(c_p->stop == E);
+ reg[0] = r(0);
+ I = handle_error(c_p, I, reg, bf);
+ goto post_error_handling;
+ }
+
+ OpCase(call_bif2_e):
+ {
+ Eterm (*bf)(Process*, Eterm, Eterm, Uint*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm result;
+ Eterm* next;
+
+ PRE_BIF_SWAPOUT(c_p);
+ c_p->fcalls = FCALLS - 1;
+ if (FCALLS <= 0) {
+ save_calls(c_p, (Export *) Arg(0));
+ }
+ PreFetch(1, next);
+ CHECK_TERM(r(0));
+ CHECK_TERM(x(1));
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ result = (*bf)(c_p, r(0), x(1), I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_HOLE_CHECK(c_p);
+ POST_BIF_GC_SWAPIN(c_p, result, reg, 2);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ r(0) = result;
+ CHECK_TERM(r(0));
+ NextPF(1, next);
+ } else if (c_p->freason == TRAP) {
+ goto call_bif_trap3;
+ }
+
+ /*
+ * Error handling. SWAPOUT is not needed because it was done above.
+ */
+ ASSERT(c_p->stop == E);
+ reg[0] = r(0);
+ I = handle_error(c_p, I, reg, bf);
+ goto post_error_handling;
+ }
+
+ OpCase(call_bif3_e):
+ {
+ Eterm (*bf)(Process*, Eterm, Eterm, Eterm, Uint*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm result;
+ Eterm* next;
+
+ PRE_BIF_SWAPOUT(c_p);
+ c_p->fcalls = FCALLS - 1;
+ if (FCALLS <= 0) {
+ save_calls(c_p, (Export *) Arg(0));
+ }
+ PreFetch(1, next);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ result = (*bf)(c_p, r(0), x(1), x(2), I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_HOLE_CHECK(c_p);
+ POST_BIF_GC_SWAPIN(c_p, result, reg, 3);
+ FCALLS = c_p->fcalls;
+ if (is_value(result)) {
+ r(0) = result;
+ CHECK_TERM(r(0));
+ NextPF(1, next);
+ } else if (c_p->freason == TRAP) {
+ call_bif_trap3:
+ SET_CP(c_p, I+2);
+ SET_I((Eterm *)c_p->def_arg_reg[3]);
+ SWAPIN;
+ r(0) = c_p->def_arg_reg[0];
+ x(1) = c_p->def_arg_reg[1];
+ x(2) = c_p->def_arg_reg[2];
+ Dispatch();
+ }
+
+ /*
+ * Error handling. SWAPOUT is not needed because it was done above.
+ */
+ ASSERT(c_p->stop == E);
+ reg[0] = r(0);
+ I = handle_error(c_p, I, reg, bf);
+ goto post_error_handling;
+ }
+
+ /*
+ * Arithmetic operations.
+ */
+
+ OpCase(i_times_jId):
+ {
+ arith_func = ARITH_FUNC(mixed_times);
+ goto do_big_arith2;
+ }
+
+ OpCase(i_m_div_jId):
+ {
+ arith_func = ARITH_FUNC(mixed_div);
+ goto do_big_arith2;
+ }
+
+ OpCase(i_int_div_jId):
+ {
+ Eterm result;
+
+ if (tmp_arg2 == SMALL_ZERO) {
+ goto badarith;
+ } else if (is_both_small(tmp_arg1, tmp_arg2)) {
+ Sint ires = signed_val(tmp_arg1) / signed_val(tmp_arg2);
+ if (MY_IS_SSMALL(ires)) {
+ result = make_small(ires);
+ STORE_ARITH_RESULT(result);
+ }
+ }
+ arith_func = ARITH_FUNC(int_div);
+ goto do_big_arith2;
+ }
+
+ OpCase(i_rem_jId):
+ {
+ Eterm result;
+
+ if (tmp_arg2 == SMALL_ZERO) {
+ goto badarith;
+ } else if (is_both_small(tmp_arg1, tmp_arg2)) {
+ result = make_small(signed_val(tmp_arg1) % signed_val(tmp_arg2));
+ STORE_ARITH_RESULT(result);
+ } else {
+ arith_func = ARITH_FUNC(int_rem);
+ goto do_big_arith2;
+ }
+ }
+
+ OpCase(i_band_jId):
+ {
+ Eterm result;
+
+ if (is_both_small(tmp_arg1, tmp_arg2)) {
+ /*
+ * No need to untag -- TAG & TAG == TAG.
+ */
+ result = tmp_arg1 & tmp_arg2;
+ STORE_ARITH_RESULT(result);
+ }
+ arith_func = ARITH_FUNC(band);
+ goto do_big_arith2;
+ }
+
+ do_big_arith2:
+ {
+ Eterm result;
+ Uint live = Arg(1);
+
+ SWAPOUT;
+ reg[0] = r(0);
+ reg[live] = tmp_arg1;
+ reg[live+1] = tmp_arg2;
+ result = arith_func(c_p, reg, live);
+ r(0) = reg[0];
+ SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ if (is_value(result)) {
+ STORE_ARITH_RESULT(result);
+ }
+ goto lb_Cl_error;
+ }
+
+ /*
+ * An error occured in an arithmetic operation or test that could
+ * appear either in a head or in a body.
+ * In a head, execution should continue at failure address in Arg(0).
+ * In a body, Arg(0) == 0 and an exception should be raised.
+ */
+ lb_Cl_error: {
+ if (Arg(0) != 0) {
+ OpCase(jump_f): {
+ SET_I((Eterm *) Arg(0));
+ Goto(*I);
+ }
+ }
+ ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
+ goto find_func_info;
+ }
+
+ OpCase(i_bor_jId):
+ {
+ Eterm result;
+
+ if (is_both_small(tmp_arg1, tmp_arg2)) {
+ /*
+ * No need to untag -- TAG | TAG == TAG.
+ */
+ result = tmp_arg1 | tmp_arg2;
+ STORE_ARITH_RESULT(result);
+ }
+ arith_func = ARITH_FUNC(bor);
+ goto do_big_arith2;
+ }
+
+ OpCase(i_bxor_jId):
+ {
+ Eterm result;
+
+ if (is_both_small(tmp_arg1, tmp_arg2)) {
+ /*
+ * We could extract the tag from one argument, but a tag extraction
+ * could mean a shift. Therefore, play it safe here.
+ */
+ result = make_small(signed_val(tmp_arg1) ^ signed_val(tmp_arg2));
+ STORE_ARITH_RESULT(result);
+ }
+ arith_func = ARITH_FUNC(bxor);
+ goto do_big_arith2;
+ }
+
+ {
+ Sint i;
+ Sint ires;
+ Eterm* bigp;
+
+ OpCase(i_bsr_jId):
+ if (is_small(tmp_arg2)) {
+ i = -signed_val(tmp_arg2);
+ if (is_small(tmp_arg1)) {
+ goto small_shift;
+ } else if (is_big(tmp_arg1)) {
+ if (i == 0) {
+ StoreBifResult(2, tmp_arg1);
+ }
+ goto big_shift;
+ }
+ } else if (is_big(tmp_arg2)) {
+ /*
+ * N bsr NegativeBigNum == N bsl MAX_SMALL
+ * N bsr PositiveBigNum == N bsl MIN_SMALL
+ */
+ tmp_arg2 = make_small(bignum_header_is_neg(*big_val(tmp_arg2)) ?
+ MAX_SMALL : MIN_SMALL);
+ goto do_bsl;
+ }
+ goto badarith;
+
+ OpCase(i_bsl_jId):
+ do_bsl:
+ if (is_small(tmp_arg2)) {
+ i = signed_val(tmp_arg2);
+
+ if (is_small(tmp_arg1)) {
+ small_shift:
+ ires = signed_val(tmp_arg1);
+
+ if (i == 0 || ires == 0) {
+ StoreBifResult(2, tmp_arg1);
+ } else if (i < 0) { /* Right shift */
+ i = -i;
+ if (i >= SMALL_BITS-1) {
+ tmp_arg1 = (ires < 0) ? SMALL_MINUS_ONE : SMALL_ZERO;
+ } else {
+ tmp_arg1 = make_small(ires >> i);
+ }
+ StoreBifResult(2, tmp_arg1);
+ } else if (i < SMALL_BITS-1) { /* Left shift */
+ if ((ires > 0 && ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ires) == 0) ||
+ ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ~ires) == 0) {
+ tmp_arg1 = make_small(ires << i);
+ StoreBifResult(2, tmp_arg1);
+ }
+ }
+ tmp_arg1 = small_to_big(ires, tmp_big);
+
+ big_shift:
+ if (i > 0) { /* Left shift. */
+ ires = big_size(tmp_arg1) + (i / D_EXP);
+ } else { /* Right shift. */
+ ires = big_size(tmp_arg1);
+ if (ires <= (-i / D_EXP))
+ ires = 3; /* ??? */
+ else
+ ires -= (-i / D_EXP);
+ }
+ {
+ ires = BIG_NEED_SIZE(ires+1);
+ /*
+ * Slightly conservative check the size to avoid
+ * allocating huge amounts of memory for bignums that
+ * clearly would overflow the arity in the header
+ * word.
+ */
+ if (ires-8 > BIG_ARITY_MAX) {
+ c_p->freason = SYSTEM_LIMIT;
+ goto lb_Cl_error;
+ }
+ TestHeapPreserve(ires+1, Arg(1), tmp_arg1);
+ bigp = HTOP;
+ tmp_arg1 = big_lshift(tmp_arg1, i, bigp);
+ if (is_big(tmp_arg1)) {
+ HTOP += bignum_header_arity(*HTOP) + 1;
+ }
+ if (is_nil(tmp_arg1)) {
+ /*
+ * This result must have been only slight larger
+ * than allowed since it wasn't caught by the
+ * previous test.
+ */
+ c_p->freason = SYSTEM_LIMIT;
+ goto lb_Cl_error;
+ }
+ ERTS_HOLE_CHECK(c_p);
+ StoreBifResult(2, tmp_arg1);
+ }
+ } else if (is_big(tmp_arg1)) {
+ if (i == 0) {
+ StoreBifResult(2, tmp_arg1);
+ }
+ goto big_shift;
+ }
+ } else if (is_big(tmp_arg2)) {
+ if (bignum_header_is_neg(*big_val(tmp_arg2))) {
+ /*
+ * N bsl NegativeBigNum is either 0 or -1, depending on
+ * the sign of N. Since we don't believe this case
+ * is common, do the calculation with the minimum
+ * amount of code.
+ */
+ tmp_arg2 = make_small(MIN_SMALL);
+ goto do_bsl;
+ } else if (is_small(tmp_arg1) || is_big(tmp_arg1)) {
+ /*
+ * N bsl PositiveBigNum is too large to represent.
+ */
+ c_p->freason = SYSTEM_LIMIT;
+ goto lb_Cl_error;
+ }
+ /* Fall through if the left argument is not an integer. */
+ }
+ /*
+ * One or more non-integer arguments.
+ */
+ goto badarith;
+ }
+
+ OpCase(i_int_bnot_jsId):
+ {
+ GetArg1(1, tmp_arg1);
+ if (is_small(tmp_arg1)) {
+ tmp_arg1 = make_small(~signed_val(tmp_arg1));
+ } else {
+ Uint live = Arg(2);
+ SWAPOUT;
+ reg[0] = r(0);
+ reg[live] = tmp_arg1;
+ tmp_arg1 = erts_gc_bnot(c_p, reg, live);
+ r(0) = reg[0];
+ SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ if (is_nil(tmp_arg1)) {
+ goto lb_Cl_error;
+ }
+ }
+ StoreBifResult(3, tmp_arg1);
+ }
+
+ badarith:
+ c_p->freason = BADARITH;
+ goto lb_Cl_error;
+
+ OpCase(i_apply): {
+ Eterm* next;
+ SWAPOUT;
+ next = apply(c_p, r(0), x(1), x(2), reg);
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_CP(c_p, I+1);
+ SET_I(next);
+ Dispatch();
+ }
+ I = handle_error(c_p, I, reg, apply_3);
+ goto post_error_handling;
+ }
+
+ OpCase(i_apply_last_P): {
+ Eterm* next;
+ SWAPOUT;
+ next = apply(c_p, r(0), x(1), x(2), reg);
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_CP(c_p, (Eterm *) E[0]);
+ E = ADD_BYTE_OFFSET(E, Arg(0));
+ SET_I(next);
+ Dispatch();
+ }
+ I = handle_error(c_p, I, reg, apply_3);
+ goto post_error_handling;
+ }
+
+ OpCase(i_apply_only): {
+ Eterm* next;
+ SWAPOUT;
+ next = apply(c_p, r(0), x(1), x(2), reg);
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_I(next);
+ Dispatch();
+ }
+ I = handle_error(c_p, I, reg, apply_3);
+ goto post_error_handling;
+ }
+
+ OpCase(apply_I): {
+ Eterm* next;
+
+ reg[0] = r(0);
+ SWAPOUT;
+ next = fixed_apply(c_p, reg, Arg(0));
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_CP(c_p, I+2);
+ SET_I(next);
+ Dispatch();
+ }
+ I = handle_error(c_p, I, reg, apply_3);
+ goto post_error_handling;
+ }
+
+ OpCase(apply_last_IP): {
+ Eterm* next;
+
+ reg[0] = r(0);
+ SWAPOUT;
+ next = fixed_apply(c_p, reg, Arg(0));
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_CP(c_p, (Eterm *) E[0]);
+ E = ADD_BYTE_OFFSET(E, Arg(1));
+ SET_I(next);
+ Dispatch();
+ }
+ I = handle_error(c_p, I, reg, apply_3);
+ goto post_error_handling;
+ }
+
+ OpCase(i_apply_fun): {
+ Eterm* next;
+
+ SWAPOUT;
+ next = apply_fun(c_p, r(0), x(1), reg);
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_CP(c_p, I+1);
+ SET_I(next);
+ Dispatchfun();
+ }
+ goto find_func_info;
+ }
+
+ OpCase(i_apply_fun_last_P): {
+ Eterm* next;
+
+ SWAPOUT;
+ next = apply_fun(c_p, r(0), x(1), reg);
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_CP(c_p, (Eterm *) E[0]);
+ E = ADD_BYTE_OFFSET(E, Arg(0));
+ SET_I(next);
+ Dispatchfun();
+ }
+ goto find_func_info;
+ }
+
+ OpCase(i_apply_fun_only): {
+ Eterm* next;
+
+ SWAPOUT;
+ next = apply_fun(c_p, r(0), x(1), reg);
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_I(next);
+ Dispatchfun();
+ }
+ goto find_func_info;
+ }
+
+ OpCase(i_call_fun_I): {
+ Eterm* next;
+
+ SWAPOUT;
+ reg[0] = r(0);
+ next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_CP(c_p, I+2);
+ SET_I(next);
+ Dispatchfun();
+ }
+ goto find_func_info;
+ }
+
+ OpCase(i_call_fun_last_IP): {
+ Eterm* next;
+
+ SWAPOUT;
+ reg[0] = r(0);
+ next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE);
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_CP(c_p, (Eterm *) E[0]);
+ E = ADD_BYTE_OFFSET(E, Arg(1));
+ SET_I(next);
+ Dispatchfun();
+ }
+ goto find_func_info;
+ }
+
+#ifdef DEBUG
+ /*
+ * Set a breakpoint here to get control just after a call instruction.
+ * I points to the first instruction in the called function.
+ *
+ * In gdb, use 'call dis(I-5, 1)' to show the name of the function.
+ */
+ do_dispatch:
+ DispatchMacro();
+
+ do_dispatchx:
+ DispatchMacrox();
+
+ do_dispatchfun:
+ DispatchMacroFun();
+
+#endif
+
+ /*
+ * Jumped to from the Dispatch() macro when the reductions are used up.
+ *
+ * Since the I register points just beyond the FuncBegin instruction, we
+ * can get the module, function, and arity for the function being
+ * called from I[-3], I[-2], and I[-1] respectively.
+ */
+ context_switch_fun:
+ c_p->arity = I[-1] + 1;
+ goto context_switch2;
+
+ context_switch:
+ c_p->arity = I[-1];
+
+ context_switch2: /* Entry for fun calls. */
+ c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+
+ {
+ Eterm* argp;
+ int i;
+
+ /*
+ * Make sure that there is enough room for the argument registers to be saved.
+ */
+ if (c_p->arity > c_p->max_arg_reg) {
+ /*
+ * Yes, this is an expensive operation, but you only pay it the first
+ * time you call a function with more than 6 arguments which is
+ * scheduled out. This is better than paying for 26 words of wasted
+ * space for most processes which never call functions with more than
+ * 6 arguments.
+ */
+ Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
+ if (c_p->arg_reg != c_p->def_arg_reg) {
+ c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
+ (void *) c_p->arg_reg,
+ size);
+ } else {
+ c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
+ }
+ c_p->max_arg_reg = c_p->arity;
+ }
+
+ /*
+ * Since REDS_IN(c_p) is stored in the save area (c_p->arg_reg) we must read it
+ * now before saving registers.
+ *
+ * The '+ 1' compensates for the last increment which was not done
+ * (beacuse the code for the Dispatch() macro becomes shorter that way).
+ */
+
+ reds_used = REDS_IN(c_p) - FCALLS + 1;
+
+ /*
+ * Save the argument registers and everything else.
+ */
+
+ argp = c_p->arg_reg;
+ for (i = c_p->arity - 1; i > 0; i--) {
+ argp[i] = reg[i];
+ }
+ c_p->arg_reg[0] = r(0);
+ SWAPOUT;
+ c_p->i = I;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ if (c_p->status != P_SUSPENDED)
+ erts_add_to_runq(c_p);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ goto do_schedule1;
+ }
+
+ OpCase(i_select_tuple_arity_sfI):
+ {
+ GetArg1(0, tmp_arg1);
+
+ if (is_tuple(tmp_arg1)) {
+ tmp_arg1 = *tuple_val(tmp_arg1);
+ goto do_binary_search;
+ }
+ SET_I((Eterm *) Arg(1));
+ Goto(*I);
+ }
+
+ OpCase(i_select_big_sf):
+ {
+ Eterm* bigp;
+ Uint arity;
+ Eterm* given;
+ Uint given_arity;
+ Uint given_size;
+
+ GetArg1(0, tmp_arg1);
+ if (is_big(tmp_arg1)) {
+
+ /*
+ * The loader has sorted the bignumbers in descending order
+ * on the arity word. Therefore, we know that the search
+ * has failed as soon as we encounter an arity word less than
+ * the arity word of the given number. There is a zero word
+ * (less than any valid arity word) stored after the last bignumber.
+ */
+
+ given = big_val(tmp_arg1);
+ given_arity = given[0];
+ given_size = thing_arityval(given_arity);
+ bigp = &Arg(2);
+ while ((arity = bigp[0]) > given_arity) {
+ bigp += thing_arityval(arity) + 2;
+ }
+ while (bigp[0] == given_arity) {
+ if (memcmp(bigp+1, given+1, sizeof(Eterm)*given_size) == 0) {
+ SET_I((Eterm *) bigp[given_size+1]);
+ Goto(*I);
+ }
+ bigp += thing_arityval(arity) + 2;
+ }
+ }
+
+ /*
+ * Failed.
+ */
+
+ SET_I((Eterm *) Arg(1));
+ Goto(*I);
+ }
+
+#ifdef ARCH_64
+ OpCase(i_select_float_sfI):
+ {
+ Uint f;
+ int n;
+ struct ValLabel {
+ Uint f;
+ Eterm* addr;
+ };
+ struct ValLabel* ptr;
+
+ GetArg1(0, tmp_arg1);
+ ASSERT(is_float(tmp_arg1));
+ f = float_val(tmp_arg1)[1];
+ n = Arg(2);
+ ptr = (struct ValLabel *) &Arg(3);
+ while (n-- > 0) {
+ if (ptr->f == f) {
+ SET_I(ptr->addr);
+ Goto(*I);
+ }
+ ptr++;
+ }
+ SET_I((Eterm *) Arg(1));
+ Goto(*I);
+ }
+#else
+ OpCase(i_select_float_sfI):
+ {
+ Uint fpart1;
+ Uint fpart2;
+ int n;
+ struct ValLabel {
+ Uint fpart1;
+ Uint fpart2;
+ Eterm* addr;
+ };
+ struct ValLabel* ptr;
+
+ GetArg1(0, tmp_arg1);
+ ASSERT(is_float(tmp_arg1));
+ fpart1 = float_val(tmp_arg1)[1];
+ fpart2 = float_val(tmp_arg1)[2];
+
+ n = Arg(2);
+ ptr = (struct ValLabel *) &Arg(3);
+ while (n-- > 0) {
+ if (ptr->fpart1 == fpart1 && ptr->fpart2 == fpart2) {
+ SET_I(ptr->addr);
+ Goto(*I);
+ }
+ ptr++;
+ }
+ SET_I((Eterm *) Arg(1));
+ Goto(*I);
+ }
+#endif
+
+ OpCase(set_tuple_element_sdP): {
+ Eterm element;
+ Eterm tuple;
+ Eterm* next;
+ Eterm* p;
+
+ PreFetch(3, next);
+ GetArg2(0, element, tuple);
+ ASSERT(is_tuple(tuple));
+ p = (Eterm *) ((unsigned char *) tuple_val(tuple) + Arg(2));
+ *p = element;
+ NextPF(3, next);
+ }
+
+ OpCase(i_is_ne_exact_f):
+ if (EQ(tmp_arg1, tmp_arg2)) {
+ ClauseFail();
+ }
+ Next(1);
+
+ OpCase(normal_exit): {
+ SWAPOUT;
+ c_p->freason = EXC_NORMAL;
+ c_p->arity = 0; /* In case this process will ever be garbed again. */
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ erts_do_exit_process(c_p, am_normal);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ goto do_schedule;
+ }
+
+ OpCase(continue_exit): {
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ erts_continue_exit_process(c_p);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ goto do_schedule;
+ }
+
+ OpCase(raise_ss): {
+ /* This was not done very well in R10-0; then, we passed the tag in
+ the first argument and hoped that the existing c_p->ftrace was
+ still correct. But the ftrace-object already includes the tag
+ (or rather, the freason). Now, we pass the original ftrace in
+ the first argument. We also handle atom tags in the first
+ argument for backwards compatibility.
+ */
+ GetArg2(0, tmp_arg1, tmp_arg2);
+ c_p->fvalue = tmp_arg2;
+ if (c_p->freason == EXC_NULL) {
+ /* a safety check for the R10-0 case; should not happen */
+ c_p->ftrace = NIL;
+ c_p->freason = EXC_ERROR;
+ }
+ /* for R10-0 code, keep existing c_p->ftrace and hope it's correct */
+ switch (tmp_arg1) {
+ case am_throw:
+ c_p->freason = EXC_THROWN & ~EXF_SAVETRACE;
+ break;
+ case am_error:
+ c_p->freason = EXC_ERROR & ~EXF_SAVETRACE;
+ break;
+ case am_exit:
+ c_p->freason = EXC_EXIT & ~EXF_SAVETRACE;
+ break;
+ default:
+ {/* R10-1 and later
+ XXX note: should do sanity check on given trace if it can be
+ passed from a user! Currently only expecting generated calls.
+ */
+ struct StackTrace *s;
+ c_p->ftrace = tmp_arg1;
+ s = get_trace_from_exc(tmp_arg1);
+ if (s == NULL) {
+ c_p->freason = EXC_ERROR;
+ } else {
+ c_p->freason = PRIMARY_EXCEPTION(s->freason);
+ }
+ }
+ }
+ goto find_func_info;
+ }
+
+ OpCase(badmatch_s): {
+ GetArg1(0, tmp_arg1);
+ c_p->fvalue = tmp_arg1;
+ c_p->freason = BADMATCH;
+ }
+ /* Fall through here */
+
+ find_func_info: {
+ reg[0] = r(0);
+ SWAPOUT;
+ I = handle_error(c_p, I, reg, NULL);
+ goto post_error_handling;
+ }
+
+ OpCase(call_error_handler):
+ /*
+ * At this point, I points to the code[3] in the export entry for
+ * a function which is not loaded.
+ *
+ * code[0]: Module
+ * code[1]: Function
+ * code[2]: Arity
+ * code[3]: &&call_error_handler
+ * code[4]: Not used
+ */
+ SWAPOUT;
+ reg[0] = r(0);
+ tmp_arg1 = call_error_handler(c_p, I-3, reg);
+ r(0) = reg[0];
+ SWAPIN;
+ if (tmp_arg1) {
+ SET_I(c_p->i);
+ Dispatch();
+ }
+
+ /* Fall through */
+ OpCase(error_action_code): {
+ no_error_handler:
+ reg[0] = r(0);
+ SWAPOUT;
+ I = handle_error(c_p, NULL, reg, NULL);
+ post_error_handling:
+ if (I == 0) {
+ goto do_schedule;
+ } else {
+ r(0) = reg[0];
+ ASSERT(!is_value(r(0)));
+ if (c_p->mbuf) {
+ erts_garbage_collect(c_p, 0, reg+1, 3);
+ }
+ SWAPIN;
+ Goto(*I);
+ }
+ }
+
+ OpCase(call_nif):
+ {
+ static void* const dispatchers[4] = {
+ nif_dispatcher_0, nif_dispatcher_1, nif_dispatcher_2, nif_dispatcher_3
+ };
+ BifFunction vbf = dispatchers[I[-1]];
+ goto apply_bif_or_nif;
+
+ OpCase(apply_bif):
+ /*
+ * At this point, I points to the code[3] in the export entry for
+ * the BIF:
+ *
+ * code[0]: Module
+ * code[1]: Function
+ * code[2]: Arity
+ * code[3]: &&apply_bif
+ * code[4]: Function pointer to BIF function
+ */
+ vbf = (BifFunction) Arg(0);
+
+ apply_bif_or_nif:
+ c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */
+ c_p->i = I; /* In case we apply check_process_code/2. */
+ c_p->arity = 0; /* To allow garbage collection on ourselves
+ * (check_process_code/2).
+ */
+
+ SWAPOUT;
+ c_p->fcalls = FCALLS - 1;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ tmp_arg2 = I[-1];
+ ASSERT(tmp_arg2 <= 3);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ switch (tmp_arg2) {
+ case 3:
+ {
+ Eterm (*bf)(Process*, Eterm, Eterm, Eterm, Uint*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ tmp_arg1 = (*bf)(c_p, r(0), x(1), x(2), I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ }
+ break;
+ case 2:
+ {
+ Eterm (*bf)(Process*, Eterm, Eterm, Uint*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ tmp_arg1 = (*bf)(c_p, r(0), x(1), I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ }
+ break;
+ case 1:
+ {
+ Eterm (*bf)(Process*, Eterm, Uint*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ tmp_arg1 = (*bf)(c_p, r(0), I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ }
+ break;
+ case 0:
+ {
+ Eterm (*bf)(Process*, Uint*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ tmp_arg1 = (*bf)(c_p, I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ break;
+ }
+ }
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ if (c_p->mbuf) {
+ reg[0] = r(0);
+ tmp_arg1 = erts_gc_after_bif_call(c_p, tmp_arg1, reg, tmp_arg2);
+ r(0) = reg[0];
+ }
+ SWAPIN; /* There might have been a garbage collection. */
+ FCALLS = c_p->fcalls;
+ if (is_value(tmp_arg1)) {
+ r(0) = tmp_arg1;
+ CHECK_TERM(r(0));
+ SET_I(c_p->cp);
+ Goto(*I);
+ } else if (c_p->freason == TRAP) {
+ SET_I((Eterm *)c_p->def_arg_reg[3]);
+ r(0) = c_p->def_arg_reg[0];
+ x(1) = c_p->def_arg_reg[1];
+ x(2) = c_p->def_arg_reg[2];
+ Dispatch();
+ }
+ reg[0] = r(0);
+ I = handle_error(c_p, c_p->cp, reg, vbf);
+ goto post_error_handling;
+ }
+
+ OpCase(i_get_sd):
+ {
+ Eterm arg;
+ Eterm result;
+
+ GetArg1(0, arg);
+ result = erts_pd_hash_get(c_p, arg);
+ StoreBifResult(1, result);
+ }
+
+ OpCase(i_put_tuple_only_Ad): {
+ tmp_arg1 = make_tuple(HTOP);
+ *HTOP++ = Arg(0);
+ StoreBifResult(1, tmp_arg1);
+ }
+
+ OpCase(case_end_s):
+ GetArg1(0, tmp_arg1);
+ c_p->fvalue = tmp_arg1;
+ c_p->freason = EXC_CASE_CLAUSE;
+ goto find_func_info;
+
+ OpCase(if_end):
+ c_p->freason = EXC_IF_CLAUSE;
+ goto find_func_info;
+
+ OpCase(i_func_info_IaaI): {
+ c_p->freason = EXC_FUNCTION_CLAUSE;
+ c_p->current = I + 2;
+ goto lb_error_action_code;
+ }
+
+ OpCase(try_case_end_s):
+ GetArg1(0, tmp_arg1);
+ c_p->fvalue = tmp_arg1;
+ c_p->freason = EXC_TRY_CLAUSE;
+ goto find_func_info;
+
+ /*
+ * Construction of binaries using new instructions.
+ */
+ {
+ Eterm new_binary;
+ Eterm num_bits_term;
+ Uint num_bits;
+ Uint alloc;
+ Uint num_bytes;
+
+ OpCase(i_bs_init_bits_heap_IIId): {
+ num_bits = Arg(0);
+ alloc = Arg(1);
+ I++;
+ goto do_bs_init_bits_known;
+ }
+
+ OpCase(i_bs_init_bits_IId): {
+ num_bits = Arg(0);
+ alloc = 0;
+ goto do_bs_init_bits_known;
+ }
+
+ OpCase(i_bs_init_bits_fail_heap_IjId): {
+ /* tmp_arg1 was fetched by an i_fetch instruction */
+ num_bits_term = tmp_arg1;
+ alloc = Arg(0);
+ I++;
+ goto do_bs_init_bits;
+ }
+
+ OpCase(i_bs_init_bits_fail_rjId): {
+ num_bits_term = r(0);
+ alloc = 0;
+ goto do_bs_init_bits;
+ }
+ OpCase(i_bs_init_bits_fail_yjId): {
+ num_bits_term = yb(Arg(0));
+ I++;
+ alloc = 0;
+ goto do_bs_init_bits;
+ }
+ OpCase(i_bs_init_bits_fail_xjId): {
+ num_bits_term = xb(Arg(0));
+ I++;
+ alloc = 0;
+ /* FALL THROUGH */
+ }
+
+ /* num_bits_term = Term for number of bits to build (small/big)
+ * alloc = Number of words to allocate on heap
+ * Operands: Fail Live Dst
+ */
+
+ do_bs_init_bits:
+ if (is_small(num_bits_term)) {
+ Sint size = signed_val(num_bits_term);
+ if (size < 0) {
+ goto badarg;
+ }
+ num_bits = (Uint) size;
+ } else {
+ Uint bits;
+
+ if (!term_to_Uint(num_bits_term, &bits)) {
+ c_p->freason = bits;
+ goto lb_Cl_error;
+
+ }
+ num_bits = (Eterm) bits;
+ }
+
+ /* num_bits = Number of bits to build
+ * alloc = Number of extra words to allocate on heap
+ * Operands: NotUsed Live Dst
+ */
+ do_bs_init_bits_known:
+ num_bytes = (num_bits+7) >> 3;
+ if (num_bits & 7) {
+ alloc += ERL_SUB_BIN_SIZE;
+ }
+ if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
+ alloc += heap_bin_size(num_bytes);
+ } else {
+ alloc += PROC_BIN_SIZE;
+ }
+ TestHeap(alloc, Arg(1));
+
+ /* num_bits = Number of bits to build
+ * num_bytes = Number of bytes to allocate in the binary
+ * alloc = Total number of words to allocate on heap
+ * Operands: NotUsed NotUsed Dst
+ */
+ if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+ hb = (ErlHeapBin *) HTOP;
+ HTOP += heap_bin_size(num_bytes);
+ hb->thing_word = header_heap_bin(num_bytes);
+ hb->size = num_bytes;
+ erts_current_bin = (byte *) hb->data;
+ new_binary = make_binary(hb);
+
+ do_bits_sub_bin:
+ if (num_bits & 7) {
+ ErlSubBin* sb;
+
+ sb = (ErlSubBin *) HTOP;
+ HTOP += ERL_SUB_BIN_SIZE;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = num_bytes - 1;
+ sb->bitsize = num_bits & 7;
+ sb->offs = 0;
+ sb->bitoffs = 0;
+ sb->is_writable = 0;
+ sb->orig = new_binary;
+ new_binary = make_binary(sb);
+ }
+ StoreBifResult(2, new_binary);
+ } else {
+ Binary* bptr;
+ ProcBin* pb;
+
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+
+ /*
+ * Allocate the binary struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(num_bytes);
+ bptr->flags = 0;
+ bptr->orig_size = num_bytes;
+ erts_refc_init(&bptr->refc, 1);
+ erts_current_bin = (byte *) bptr->orig_bytes;
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) HTOP;
+ HTOP += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = num_bytes;
+ pb->next = MSO(c_p).mso;
+ MSO(c_p).mso = pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+ MSO(c_p).overhead += pb->size / sizeof(Eterm);
+ new_binary = make_binary(pb);
+ goto do_bits_sub_bin;
+ }
+ }
+
+ {
+ OpCase(i_bs_init_fail_heap_IjId): {
+ /* tmp_arg1 was fetched by an i_fetch instruction */
+ tmp_arg2 = Arg(0);
+ I++;
+ goto do_bs_init;
+ }
+
+ OpCase(i_bs_init_fail_rjId): {
+ tmp_arg1 = r(0);
+ tmp_arg2 = 0;
+ goto do_bs_init;
+ }
+
+ OpCase(i_bs_init_fail_yjId): {
+ tmp_arg1 = yb(Arg(0));
+ tmp_arg2 = 0;
+ I++;
+ goto do_bs_init;
+ }
+
+ OpCase(i_bs_init_fail_xjId): {
+ tmp_arg1 = xb(Arg(0));
+ tmp_arg2 = 0;
+ I++;
+ }
+ /* FALL THROUGH */
+ do_bs_init:
+ if (is_small(tmp_arg1)) {
+ Sint size = signed_val(tmp_arg1);
+ if (size < 0) {
+ goto badarg;
+ }
+ tmp_arg1 = (Eterm) size;
+ } else {
+ Uint bytes;
+
+ if (!term_to_Uint(tmp_arg1, &bytes)) {
+ c_p->freason = bytes;
+ goto lb_Cl_error;
+ }
+ if ((bytes >> (8*sizeof(Uint)-3)) != 0) {
+ goto system_limit;
+ }
+ tmp_arg1 = (Eterm) bytes;
+ }
+ if (tmp_arg1 <= ERL_ONHEAP_BIN_LIMIT) {
+ goto do_heap_bin_alloc;
+ } else {
+ goto do_proc_bin_alloc;
+ }
+
+
+ OpCase(i_bs_init_heap_IIId): {
+ tmp_arg1 = Arg(0);
+ tmp_arg2 = Arg(1);
+ I++;
+ goto do_proc_bin_alloc;
+ }
+
+ OpCase(i_bs_init_IId): {
+ tmp_arg1 = Arg(0);
+ tmp_arg2 = 0;
+ }
+ /* FALL THROUGH */
+ do_proc_bin_alloc: {
+ Binary* bptr;
+ ProcBin* pb;
+
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+ TestBinVHeap(tmp_arg1 / sizeof(Eterm),
+ tmp_arg2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, Arg(1));
+
+ /*
+ * Allocate the binary struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(tmp_arg1);
+ bptr->flags = 0;
+ bptr->orig_size = tmp_arg1;
+ erts_refc_init(&bptr->refc, 1);
+ erts_current_bin = (byte *) bptr->orig_bytes;
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) HTOP;
+ HTOP += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = tmp_arg1;
+ pb->next = MSO(c_p).mso;
+ MSO(c_p).mso = pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+
+ MSO(c_p).overhead += tmp_arg1 / sizeof(Eterm);
+
+ StoreBifResult(2, make_binary(pb));
+ }
+
+ OpCase(i_bs_init_heap_bin_heap_IIId): {
+ tmp_arg1 = Arg(0);
+ tmp_arg2 = Arg(1);
+ I++;
+ goto do_heap_bin_alloc;
+ }
+
+ OpCase(i_bs_init_heap_bin_IId): {
+ tmp_arg1 = Arg(0);
+ tmp_arg2 = 0;
+ }
+ /* Fall through */
+ do_heap_bin_alloc:
+ {
+ ErlHeapBin* hb;
+ Uint bin_need;
+
+ bin_need = heap_bin_size(tmp_arg1);
+ erts_bin_offset = 0;
+ erts_writable_bin = 0;
+ TestHeap(bin_need+tmp_arg2+ERL_SUB_BIN_SIZE, Arg(1));
+ hb = (ErlHeapBin *) HTOP;
+ HTOP += bin_need;
+ hb->thing_word = header_heap_bin(tmp_arg1);
+ hb->size = tmp_arg1;
+ erts_current_bin = (byte *) hb->data;
+ tmp_arg1 = make_binary(hb);
+ StoreBifResult(2, tmp_arg1);
+ }
+ }
+
+ OpCase(i_bs_bits_to_bytes_rjd): {
+ tmp_arg1 = r(0);
+ goto do_bits_to_bytes;
+ }
+
+ OpCase(i_bs_bits_to_bytes_yjd): {
+ tmp_arg1 = yb(Arg(0));
+ I++;
+ goto do_bits_to_bytes;
+
+ OpCase(i_bs_bits_to_bytes_xjd): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ }
+
+ do_bits_to_bytes:
+ {
+ if (is_valid_bit_size(tmp_arg1)) {
+ tmp_arg1 = make_small(unsigned_val(tmp_arg1) >> 3);
+ } else {
+ Uint bytes;
+ if (!term_to_Uint(tmp_arg1, &bytes)) {
+ goto badarg;
+ }
+ tmp_arg1 = bytes;
+ if ((tmp_arg1 & 0x07) != 0) {
+ goto badarg;
+ }
+ SWAPOUT;
+ tmp_arg1 = erts_make_integer(tmp_arg1 >> 3, c_p);
+ HTOP = HEAP_TOP(c_p);
+ }
+ StoreBifResult(1, tmp_arg1);
+ }
+ }
+
+ OpCase(i_bs_add_jId): {
+ Uint Unit = Arg(1);
+ if (is_both_small(tmp_arg1, tmp_arg2)) {
+ Sint Arg1 = signed_val(tmp_arg1);
+ Sint Arg2 = signed_val(tmp_arg2);
+
+ if (Arg1 >= 0 && Arg2 >= 0) {
+ BsSafeMul(Arg2, Unit, goto system_limit, tmp_arg1);
+ tmp_arg1 += Arg1;
+
+ store_bs_add_result:
+ if (MY_IS_SSMALL((Sint) tmp_arg1)) {
+ tmp_arg1 = make_small(tmp_arg1);
+ } else {
+ /*
+ * May generate a heap fragment, but in this
+ * particular case it is OK, since the value will be
+ * stored into an x register (the GC will scan x
+ * registers for references to heap fragments) and
+ * there is no risk that value can be stored into a
+ * location that is not scanned for heap-fragment
+ * references (such as the heap).
+ */
+ SWAPOUT;
+ tmp_arg1 = erts_make_integer(tmp_arg1, c_p);
+ HTOP = HEAP_TOP(c_p);
+ }
+ StoreBifResult(2, tmp_arg1);
+ }
+ goto badarg;
+ } else {
+ Uint a;
+ Uint b;
+ Uint c;
+
+ /*
+ * Now we know that one of the arguments is
+ * not at small. We must convert both arguments
+ * to Uints and check for errors at the same time.
+ *
+ * Error checking is tricky.
+ *
+ * If one of the arguments is not numeric or
+ * not positive, the error reason is BADARG.
+ *
+ * Otherwise if both arguments are numeric,
+ * but at least one argument does not fit in
+ * an Uint, the reason is SYSTEM_LIMIT.
+ */
+
+ if (!term_to_Uint(tmp_arg1, &a)) {
+ if (a == BADARG) {
+ goto badarg;
+ }
+ if (!term_to_Uint(tmp_arg2, &b)) {
+ c_p->freason = b;
+ goto lb_Cl_error;
+ }
+ goto system_limit;
+ } else if (!term_to_Uint(tmp_arg2, &b)) {
+ c_p->freason = b;
+ goto lb_Cl_error;
+ }
+
+ /*
+ * The arguments are now correct and stored in a and b.
+ */
+
+ BsSafeMul(b, Unit, goto system_limit, c);
+ tmp_arg1 = a + c;
+ if (tmp_arg1 < a) {
+ /*
+ * If the result is less than one of the
+ * arguments, there must have been an overflow.
+ */
+ goto system_limit;
+ }
+ goto store_bs_add_result;
+ }
+ /* No fallthrough */
+ ASSERT(0);
+ }
+
+ OpCase(bs_put_string_II):
+ {
+ Eterm* next;
+ PreFetch(2, next);
+ erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) Arg(1), Arg(0)));
+ NextPF(2, next);
+ }
+
+ /*
+ * tmp_arg1 = Number of bytes to build
+ * tmp_arg2 = Source binary
+ * Operands: Fail ExtraHeap Live Unit Dst
+ */
+
+ OpCase(i_bs_append_jIIId): {
+ Uint live = Arg(2);
+ Uint res;
+
+ SWAPOUT;
+ reg[0] = r(0);
+ reg[live] = tmp_arg2;
+ res = erts_bs_append(c_p, reg, live, tmp_arg1, Arg(1), Arg(3));
+ r(0) = reg[0];
+ SWAPIN;
+ if (is_non_value(res)) {
+ /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */
+ goto lb_Cl_error;
+ }
+ StoreBifResult(4, res);
+ }
+
+ /*
+ * tmp_arg1 = Number of bytes to build
+ * tmp_arg2 = Source binary
+ * Operands: Fail Unit Dst
+ */
+ OpCase(i_bs_private_append_jId): {
+ Eterm res;
+
+ res = erts_bs_private_append(c_p, tmp_arg2, tmp_arg1, Arg(1));
+ if (is_non_value(res)) {
+ /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */
+ goto lb_Cl_error;
+ }
+ StoreBifResult(2, res);
+ }
+
+ /*
+ * tmp_arg1 = Initial size of writable binary
+ * Operands: Live Dst
+ */
+ OpCase(bs_init_writable): {
+ SWAPOUT;
+ r(0) = erts_bs_init_writable(c_p, r(0));
+ SWAPIN;
+ Next(0);
+ }
+
+ /*
+ * Calculate the number of bytes needed to encode the source
+ * operarand to UTF-8. If the source operand is invalid (e.g. wrong
+ * type or range) we return a nonsense integer result (0 or 4). We
+ * can get away with that because we KNOW that bs_put_utf8 will do
+ * full error checking.
+ */
+ OpCase(i_bs_utf8_size_sd): {
+ Eterm arg;
+ Eterm result;
+
+ GetArg1(0, arg);
+ if (arg < make_small(0x80UL)) {
+ result = make_small(1);
+ } else if (arg < make_small(0x800UL)) {
+ result = make_small(2);
+ } else if (arg < make_small(0x10000UL)) {
+ result = make_small(3);
+ } else {
+ result = make_small(4);
+ }
+ StoreBifResult(1, result);
+ }
+
+ OpCase(i_bs_put_utf8_js): {
+ Eterm arg;
+
+ GetArg1(1, arg);
+ if (!erts_bs_put_utf8(ERL_BITS_ARGS_1(arg))) {
+ goto badarg;
+ }
+ Next(2);
+ }
+
+ /*
+ * Calculate the number of bytes needed to encode the source
+ * operarand to UTF-8. If the source operand is invalid (e.g. wrong
+ * type or range) we return a nonsense integer result (2 or 4). We
+ * can get away with that because we KNOW that bs_put_utf16 will do
+ * full error checking.
+ */
+
+ OpCase(i_bs_utf16_size_sd): {
+ Eterm arg;
+ Eterm result = make_small(2);
+
+ GetArg1(0, arg);
+ if (arg >= make_small(0x10000UL)) {
+ result = make_small(4);
+ }
+ StoreBifResult(1, result);
+ }
+
+ OpCase(i_bs_put_utf16_jIs): {
+ Eterm arg;
+
+ GetArg1(2, arg);
+ if (!erts_bs_put_utf16(ERL_BITS_ARGS_2(arg, Arg(1)))) {
+ goto badarg;
+ }
+ Next(3);
+ }
+
+ /*
+ * Only used for validating a value about to be stored in a binary.
+ */
+ OpCase(i_bs_validate_unicode_js): {
+ Eterm val;
+
+ GetArg1(1, val);
+
+ /*
+ * There is no need to untag the integer, but it IS necessary
+ * to make sure it is small (if the term is a bignum, it could
+ * slip through the test, and there is no further test that
+ * would catch it, since bit syntax construction silently masks
+ * too big numbers).
+ */
+ if (is_not_small(val) || val > make_small(0x10FFFFUL) ||
+ (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL)) ||
+ val == make_small(0xFFFEUL) || val == make_small(0xFFFFUL)) {
+ goto badarg;
+ }
+ Next(2);
+ }
+
+ /*
+ * Only used for validating a value matched out.
+ *
+ * tmp_arg1 = Integer to validate
+ * tmp_arg2 = Match context
+ */
+ OpCase(i_bs_validate_unicode_retract_j): {
+ /*
+ * There is no need to untag the integer, but it IS necessary
+ * to make sure it is small (a bignum pointer could fall in
+ * the valid range).
+ */
+ if (is_not_small(tmp_arg1) || tmp_arg1 > make_small(0x10FFFFUL) ||
+ (make_small(0xD800UL) <= tmp_arg1 && tmp_arg1 <= make_small(0xDFFFUL)) ||
+ tmp_arg1 == make_small(0xFFFEUL) || tmp_arg1 == make_small(0xFFFFUL)) {
+ ErlBinMatchBuffer *mb = ms_matchbuffer(tmp_arg2);
+
+ mb->offset -= 32;
+ goto badarg;
+ }
+ Next(1);
+ }
+
+ /*
+ * Matching of binaries.
+ */
+
+ {
+ Eterm header;
+ Eterm* next;
+ Uint slots;
+
+ OpCase(i_bs_start_match2_rfIId): {
+ tmp_arg1 = r(0);
+
+ do_start_match:
+ slots = Arg(2);
+ if (!is_boxed(tmp_arg1)) {
+ ClauseFail();
+ }
+ PreFetch(4, next);
+ header = *boxed_val(tmp_arg1);
+ if (header_is_bin_matchstate(header)) {
+ ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(tmp_arg1);
+ Uint actual_slots = HEADER_NUM_SLOTS(header);
+ ms->save_offset[0] = ms->mb.offset;
+ if (actual_slots < slots) {
+ ErlBinMatchState* dst;
+ Uint live = Arg(1);
+ Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
+
+ TestHeapPreserve(wordsneeded, live, tmp_arg1);
+ ms = (ErlBinMatchState *) boxed_val(tmp_arg1);
+ dst = (ErlBinMatchState *) HTOP;
+ *dst = *ms;
+ *HTOP = HEADER_BIN_MATCHSTATE(slots);
+ HTOP += wordsneeded;
+ StoreResult(make_matchstate(dst), Arg(3));
+ }
+ } else if (is_binary_header(header)) {
+ Eterm result;
+ Uint live = Arg(1);
+ Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
+ TestHeapPreserve(wordsneeded, live, tmp_arg1);
+ HEAP_TOP(c_p) = HTOP;
+#ifdef DEBUG
+ c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
+#endif
+ result = erts_bs_start_match_2(c_p, tmp_arg1, slots);
+ HTOP = HEAP_TOP(c_p);
+ if (is_non_value(result)) {
+ ClauseFail();
+ } else {
+ StoreResult(result, Arg(3));
+ }
+ } else {
+ ClauseFail();
+ }
+ NextPF(4, next);
+ }
+ OpCase(i_bs_start_match2_xfIId): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ goto do_start_match;
+ }
+ OpCase(i_bs_start_match2_yfIId): {
+ tmp_arg1 = yb(Arg(0));
+ I++;
+ goto do_start_match;
+ }
+ }
+
+ OpCase(bs_test_zero_tail2_fr): {
+ Eterm* next;
+ ErlBinMatchBuffer *_mb;
+
+ PreFetch(1, next);
+ _mb = (ErlBinMatchBuffer*) ms_matchbuffer(r(0));
+ if (_mb->size != _mb->offset) {
+ ClauseFail();
+ }
+ NextPF(1, next);
+ }
+
+ OpCase(bs_test_zero_tail2_fx): {
+ Eterm* next;
+ ErlBinMatchBuffer *_mb;
+
+ PreFetch(2, next);
+ _mb = (ErlBinMatchBuffer*) ms_matchbuffer(xb(Arg(1)));
+ if (_mb->size != _mb->offset) {
+ ClauseFail();
+ }
+ NextPF(2, next);
+ }
+
+ OpCase(bs_test_tail_imm2_frI): {
+ Eterm* next;
+ ErlBinMatchBuffer *_mb;
+ PreFetch(2, next);
+ _mb = ms_matchbuffer(r(0));
+ if (_mb->size - _mb->offset != Arg(1)) {
+ ClauseFail();
+ }
+ NextPF(2, next);
+ }
+ OpCase(bs_test_tail_imm2_fxI): {
+ Eterm* next;
+ ErlBinMatchBuffer *_mb;
+ PreFetch(3, next);
+ _mb = ms_matchbuffer(xb(Arg(1)));
+ if (_mb->size - _mb->offset != Arg(2)) {
+ ClauseFail();
+ }
+ NextPF(3, next);
+ }
+
+ OpCase(bs_test_unit_frI): {
+ Eterm* next;
+ ErlBinMatchBuffer *_mb;
+ PreFetch(2, next);
+ _mb = ms_matchbuffer(r(0));
+ if ((_mb->size - _mb->offset) % Arg(1)) {
+ ClauseFail();
+ }
+ NextPF(2, next);
+ }
+ OpCase(bs_test_unit_fxI): {
+ Eterm* next;
+ ErlBinMatchBuffer *_mb;
+ PreFetch(3, next);
+ _mb = ms_matchbuffer(xb(Arg(1)));
+ if ((_mb->size - _mb->offset) % Arg(2)) {
+ ClauseFail();
+ }
+ NextPF(3, next);
+ }
+
+ OpCase(bs_test_unit8_fr): {
+ Eterm* next;
+ ErlBinMatchBuffer *_mb;
+ PreFetch(1, next);
+ _mb = ms_matchbuffer(r(0));
+ if ((_mb->size - _mb->offset) & 7) {
+ ClauseFail();
+ }
+ NextPF(1, next);
+ }
+ OpCase(bs_test_unit8_fx): {
+ Eterm* next;
+ ErlBinMatchBuffer *_mb;
+ PreFetch(2, next);
+ _mb = ms_matchbuffer(xb(Arg(1)));
+ if ((_mb->size - _mb->offset) & 7) {
+ ClauseFail();
+ }
+ NextPF(2, next);
+ }
+
+ OpCase(i_bs_get_integer_8_rfd): {
+ tmp_arg1 = r(0);
+ goto do_bs_get_integer_8;
+ }
+
+ OpCase(i_bs_get_integer_8_xfd): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ }
+
+ do_bs_get_integer_8: {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ _mb = ms_matchbuffer(tmp_arg1);
+ if (_mb->size - _mb->offset < 8) {
+ ClauseFail();
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _result = erts_bs_get_integer_2(c_p, 8, 0, _mb);
+ } else {
+ _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]);
+ _mb->offset += 8;
+ }
+ StoreBifResult(1, _result);
+ }
+
+ OpCase(i_bs_get_integer_16_rfd): {
+ tmp_arg1 = r(0);
+ goto do_bs_get_integer_16;
+ }
+
+ OpCase(i_bs_get_integer_16_xfd): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ }
+
+ do_bs_get_integer_16: {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ _mb = ms_matchbuffer(tmp_arg1);
+ if (_mb->size - _mb->offset < 16) {
+ ClauseFail();
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _result = erts_bs_get_integer_2(c_p, 16, 0, _mb);
+ } else {
+ _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset)));
+ _mb->offset += 16;
+ }
+ StoreBifResult(1, _result);
+ }
+
+ OpCase(i_bs_get_integer_32_rfId): {
+ tmp_arg1 = r(0);
+ goto do_bs_get_integer_32;
+ }
+
+ OpCase(i_bs_get_integer_32_xfId): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ }
+
+ do_bs_get_integer_32: {
+ ErlBinMatchBuffer *_mb;
+ Uint32 _integer;
+ Eterm _result;
+ _mb = ms_matchbuffer(tmp_arg1);
+ if (_mb->size - _mb->offset < 32) { ClauseFail(); }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _integer = erts_bs_get_unaligned_uint32(_mb);
+ } else {
+ _integer = get_int32(_mb->base + _mb->offset/8);
+ }
+ _mb->offset += 32;
+#ifndef ARCH_64
+ if (IS_USMALL(0, _integer)) {
+#endif
+ _result = make_small(_integer);
+#ifndef ARCH_64
+ } else {
+ TestHeap(BIG_UINT_HEAP_SIZE, Arg(1));
+ _result = uint_to_big((Uint) _integer, HTOP);
+ HTOP += BIG_UINT_HEAP_SIZE;
+ }
+#endif
+ StoreBifResult(2, _result);
+ }
+
+ /* Operands: Size Live Fail Flags Dst */
+ OpCase(i_bs_get_integer_imm_rIIfId): {
+ tmp_arg1 = r(0);
+ /* Operands: Size Live Fail Flags Dst */
+ goto do_bs_get_integer_imm_test_heap;
+ }
+
+ /* Operands: x(Reg) Size Live Fail Flags Dst */
+ OpCase(i_bs_get_integer_imm_xIIfId): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ /* Operands: Size Live Fail Flags Dst */
+ goto do_bs_get_integer_imm_test_heap;
+ }
+
+ /*
+ * tmp_arg1 = match context
+ * Operands: Size Live Fail Flags Dst
+ */
+ do_bs_get_integer_imm_test_heap: {
+ Uint wordsneeded;
+ tmp_arg2 = Arg(0);
+ wordsneeded = 1+WSIZE(NBYTES(tmp_arg2));
+ TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
+ I += 2;
+ /* Operands: Fail Flags Dst */
+ goto do_bs_get_integer_imm;
+ }
+
+ /* Operands: Size Fail Flags Dst */
+ OpCase(i_bs_get_integer_small_imm_rIfId): {
+ tmp_arg1 = r(0);
+ tmp_arg2 = Arg(0);
+ I++;
+ /* Operands: Fail Flags Dst */
+ goto do_bs_get_integer_imm;
+ }
+
+ /* Operands: x(Reg) Size Fail Flags Dst */
+ OpCase(i_bs_get_integer_small_imm_xIfId): {
+ tmp_arg1 = xb(Arg(0));
+ tmp_arg2 = Arg(1);
+ I += 2;
+ /* Operands: Fail Flags Dst */
+ goto do_bs_get_integer_imm;
+ }
+
+ /*
+ * tmp_arg1 = match context
+ * tmp_arg2 = size of field
+ * Operands: Fail Flags Dst
+ */
+ do_bs_get_integer_imm: {
+ ErlBinMatchBuffer* mb;
+ Eterm result;
+
+ mb = ms_matchbuffer(tmp_arg1);
+ LIGHT_SWAPOUT;
+ result = erts_bs_get_integer_2(c_p, tmp_arg2, Arg(1), mb);
+ LIGHT_SWAPIN;
+ if (is_non_value(result)) {
+ ClauseFail();
+ }
+ StoreBifResult(2, result);
+ }
+
+ /*
+ * tmp_arg1 = Match context
+ * tmp_arg2 = Size field
+ * Operands: Fail Live FlagsAndUnit Dst
+ */
+ OpCase(i_bs_get_integer_fIId): {
+ Uint flags;
+ Uint size;
+ ErlBinMatchBuffer* mb;
+ Eterm result;
+
+ flags = Arg(2);
+ BsGetFieldSize(tmp_arg2, (flags >> 3), ClauseFail(), size);
+ if (size >= SMALL_BITS) {
+ Uint wordsneeded = 1+WSIZE(NBYTES((Uint) size));
+ TestHeapPreserve(wordsneeded, Arg(1), tmp_arg1);
+ }
+ mb = ms_matchbuffer(tmp_arg1);
+ LIGHT_SWAPOUT;
+ result = erts_bs_get_integer_2(c_p, size, flags, mb);
+ LIGHT_SWAPIN;
+ if (is_non_value(result)) {
+ ClauseFail();
+ }
+ StoreBifResult(3, result);
+ }
+
+ /* Operands: MatchContext Fail Dst */
+ OpCase(i_bs_get_utf8_rfd): {
+ tmp_arg1 = r(0);
+ goto do_bs_get_utf8;
+ }
+
+ OpCase(i_bs_get_utf8_xfd): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ }
+
+ /*
+ * tmp_arg1 = match_context
+ * Operands: Fail Dst
+ */
+
+ do_bs_get_utf8: {
+ Eterm result = erts_bs_get_utf8(ms_matchbuffer(tmp_arg1));
+ if (is_non_value(result)) {
+ ClauseFail();
+ }
+ StoreBifResult(1, result);
+ }
+
+ /* Operands: MatchContext Fail Flags Dst */
+ OpCase(i_bs_get_utf16_rfId): {
+ tmp_arg1 = r(0);
+ goto do_bs_get_utf16;
+ }
+
+ OpCase(i_bs_get_utf16_xfId): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ }
+
+ /*
+ * tmp_arg1 = match_context
+ * Operands: Fail Flags Dst
+ */
+ do_bs_get_utf16: {
+ Eterm result = erts_bs_get_utf16(ms_matchbuffer(tmp_arg1), Arg(1));
+ if (is_non_value(result)) {
+ ClauseFail();
+ }
+ StoreBifResult(2, result);
+ }
+
+ {
+ ErlBinMatchBuffer* mb;
+ ErlSubBin* sb;
+ Uint size;
+ Uint offs;
+ Uint orig;
+ Uint hole_size;
+
+ OpCase(bs_context_to_binary_r): {
+ tmp_arg1 = x0;
+ I -= 2;
+ goto do_context_to_binary;
+ }
+
+ /* Unfortunately, inlining can generate this instruction. */
+ OpCase(bs_context_to_binary_y): {
+ tmp_arg1 = yb(Arg(0));
+ goto do_context_to_binary0;
+ }
+
+ OpCase(bs_context_to_binary_x): {
+ tmp_arg1 = xb(Arg(0));
+
+ do_context_to_binary0:
+ I--;
+ }
+
+ do_context_to_binary:
+ if (is_boxed(tmp_arg1) && header_is_bin_matchstate(*boxed_val(tmp_arg1))) {
+ ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(tmp_arg1);
+ mb = &ms->mb;
+ offs = ms->save_offset[0];
+ size = mb->size - offs;
+ goto do_bs_get_binary_all_reuse_common;
+ }
+ Next(2);
+
+ OpCase(i_bs_get_binary_all_reuse_rfI): {
+ tmp_arg1 = x0;
+ goto do_bs_get_binary_all_reuse;
+ }
+
+ OpCase(i_bs_get_binary_all_reuse_xfI): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ }
+
+ do_bs_get_binary_all_reuse:
+ mb = ms_matchbuffer(tmp_arg1);
+ size = mb->size - mb->offset;
+ if (size % Arg(1) != 0) {
+ ClauseFail();
+ }
+ offs = mb->offset;
+
+ do_bs_get_binary_all_reuse_common:
+ orig = mb->orig;
+ sb = (ErlSubBin *) boxed_val(tmp_arg1);
+ hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = BYTE_OFFSET(size);
+ sb->bitsize = BIT_OFFSET(size);
+ sb->offs = BYTE_OFFSET(offs);
+ sb->bitoffs = BIT_OFFSET(offs);
+ sb->is_writable = 0;
+ sb->orig = orig;
+ if (hole_size) {
+ sb[1].thing_word = make_pos_bignum_header(hole_size-1);
+ }
+ Next(2);
+ }
+
+ {
+ OpCase(i_bs_match_string_rfII): {
+ tmp_arg1 = r(0);
+ goto do_bs_match_string;
+ }
+ OpCase(i_bs_match_string_xfII): {
+ tmp_arg1 = xb(Arg(0));
+ I++;
+ }
+
+ do_bs_match_string:
+ {
+ Eterm* next;
+ byte* bytes;
+ Uint bits;
+ ErlBinMatchBuffer* mb;
+ Uint offs;
+
+ PreFetch(3, next);
+ bits = Arg(1);
+ bytes = (byte *) Arg(2);
+ mb = ms_matchbuffer(tmp_arg1);
+ if (mb->size - mb->offset < bits) {
+ ClauseFail();
+ }
+ offs = mb->offset & 7;
+ if (offs == 0 && (bits & 7) == 0) {
+ if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) {
+ ClauseFail();
+ }
+ } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) {
+ ClauseFail();
+ }
+ mb->offset += bits;
+ NextPF(3, next);
+ }
+ }
+
+ OpCase(i_bs_save2_rI): {
+ Eterm* next;
+ ErlBinMatchState *_ms;
+ PreFetch(1, next);
+ _ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
+ _ms->save_offset[Arg(0)] = _ms->mb.offset;
+ NextPF(1, next);
+ }
+ OpCase(i_bs_save2_xI): {
+ Eterm* next;
+ ErlBinMatchState *_ms;
+ PreFetch(2, next);
+ _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
+ _ms->save_offset[Arg(1)] = _ms->mb.offset;
+ NextPF(2, next);
+ }
+
+ OpCase(i_bs_restore2_rI): {
+ Eterm* next;
+ ErlBinMatchState *_ms;
+ PreFetch(1, next);
+ _ms = (ErlBinMatchState*) boxed_val((Eterm) r(0));
+ _ms->mb.offset = _ms->save_offset[Arg(0)];
+ NextPF(1, next);
+ }
+ OpCase(i_bs_restore2_xI): {
+ Eterm* next;
+ ErlBinMatchState *_ms;
+ PreFetch(2, next);
+ _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0)));
+ _ms->mb.offset = _ms->save_offset[Arg(1)];
+ NextPF(2, next);
+ }
+
+#include "beam_cold.h"
+
+
+ /*
+ * This instruction is probably never used (because it is combined with a
+ * a return). However, a future compiler might for some reason emit a
+ * deallocate not followed by a return, and that should work.
+ */
+ OpCase(deallocate_I): {
+ Eterm* next;
+
+ PreFetch(1, next);
+ D(Arg(0));
+ NextPF(1, next);
+ }
+
+ /*
+ * Trace and debugging support.
+ */
+
+ /*
+ * At this point, I points to the code[3] in the export entry for
+ * a trace-enabled function.
+ *
+ * code[0]: Module
+ * code[1]: Function
+ * code[2]: Arity
+ * code[3]: &&call_traced_function
+ * code[4]: Address of function.
+ */
+ OpCase(call_traced_function): {
+ if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
+ unsigned offset = offsetof(Export, code) + 3*sizeof(Eterm);
+ Export* ep = (Export *) (((char *)I)-offset);
+ Uint32 flags;
+
+ SWAPOUT;
+ reg[0] = r(0);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg,
+ 0, &c_p->tracer_proc);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ SWAPIN;
+
+ if (flags & MATCH_SET_RX_TRACE) {
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - 3 < HTOP) {
+ /* SWAPOUT, SWAPIN was done and r(0) was saved above */
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ r(0) = reg[0];
+ SWAPIN;
+ }
+ E -= 3;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ ASSERT(is_CP((Eterm)(ep->code)));
+ ASSERT(is_internal_pid(c_p->tracer_proc) ||
+ is_internal_port(c_p->tracer_proc));
+ E[2] = make_cp(c_p->cp);
+ E[1] = am_true; /* Process tracer */
+ E[0] = make_cp(ep->code);
+ c_p->cp = (Eterm*)
+ make_cp(flags & MATCH_SET_EXCEPTION_TRACE
+ ? beam_exception_trace : beam_return_trace);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ c_p->trace_flags |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ SET_I((Uint *) Arg(0));
+ Dispatch();
+ }
+
+ OpCase(return_trace): {
+ Uint* code = (Uint *) E[0];
+
+ SWAPOUT; /* Needed for shared heap */
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ erts_trace_return(c_p, code, r(0), E+1/*Process tracer*/);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ SWAPIN;
+ c_p->cp = NULL;
+ SET_I((Eterm *) E[2]);
+ E += 3;
+ Goto(*I);
+ }
+
+ OpCase(i_count_breakpoint): {
+ Uint real_I;
+
+ ErtsCountBreak((Uint *) I, &real_I);
+ ASSERT(VALID_INSTR(real_I));
+ Goto(real_I);
+ }
+
+ OpCase(i_trace_breakpoint):
+ if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
+ Uint real_I;
+
+ ErtsBreakSkip((Uint *) I, &real_I);
+ Goto(real_I);
+ }
+ /* Fall through to next case */
+ OpCase(i_mtrace_breakpoint): {
+ Uint real_I;
+ Uint32 flags;
+ Eterm tracer_pid;
+ Uint *cpp;
+ int return_to_trace = 0, need = 0;
+ flags = 0;
+ SWAPOUT;
+ reg[0] = r(0);
+
+ if (*cp_val((Eterm)c_p->cp)
+ == (Uint) OpCode(return_trace)) {
+ cpp = (Uint*)&E[2];
+ } else if (*cp_val((Eterm)c_p->cp)
+ == (Uint) OpCode(i_return_to_trace)) {
+ return_to_trace = !0;
+ cpp = (Uint*)&E[0];
+ } else {
+ cpp = NULL;
+ }
+ if (cpp) {
+ /* This _IS_ a tail recursive call, if there are
+ * return_trace and/or i_return_to_trace stackframes
+ * on the stack, they are not intermixed with y registers
+ */
+ Eterm *cp_save = c_p->cp;
+ for (;;) {
+ ASSERT(is_CP(*cpp));
+ if (*cp_val(*cpp) == (Uint) OpCode(return_trace)) {
+ cpp += 3;
+ } else if (*cp_val(*cpp) == (Uint) OpCode(i_return_to_trace)) {
+ return_to_trace = !0;
+ cpp += 1;
+ } else
+ break;
+ }
+ c_p->cp = (Eterm *) *cpp;
+ ASSERT(is_CP((Eterm)c_p->cp));
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ SWAPIN; /* Needed by shared heap. */
+ c_p->cp = cp_save;
+ } else {
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ SWAPIN; /* Needed by shared heap. */
+ }
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
+ need += 1;
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ need += 3;
+ }
+ if (need) {
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - need < HTOP) {
+ /* SWAPOUT was done and r(0) was saved above */
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect(c_p, need, reg, I[-1]);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ r(0) = reg[0];
+ SWAPIN;
+ }
+ }
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
+ E -= 1;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ E[0] = make_cp(c_p->cp);
+ c_p->cp = (Eterm *) make_cp(beam_return_to_trace);
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ E -= 3;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ ASSERT(is_CP((Eterm) (I - 3)));
+ ASSERT(am_true == tracer_pid ||
+ is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
+ E[2] = make_cp(c_p->cp);
+ E[1] = tracer_pid;
+ E[0] = make_cp(I - 3); /* We ARE at the beginning of an
+ instruction,
+ the funcinfo is above i. */
+ c_p->cp = (Eterm*)
+ make_cp(flags & MATCH_SET_EXCEPTION_TRACE
+ ? beam_exception_trace : beam_return_trace);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ c_p->trace_flags |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ Goto(real_I);
+ }
+
+ OpCase(i_return_to_trace): {
+ if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
+ Uint *cpp = (Uint*) E;
+ for(;;) {
+ ASSERT(is_CP(*cpp));
+ if (*cp_val(*cpp) == (Uint) OpCode(return_trace)) {
+ do ++cpp; while(is_not_CP(*cpp));
+ cpp += 2;
+ } else if (*cp_val(*cpp) == (Uint) OpCode(i_return_to_trace)) {
+ do ++cpp; while(is_not_CP(*cpp));
+ } else break;
+ }
+ SWAPOUT; /* Needed for shared heap */
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ erts_trace_return_to(c_p, cp_val(*cpp));
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ SWAPIN;
+ }
+ c_p->cp = NULL;
+ SET_I((Eterm *) E[0]);
+ E += 1;
+ Goto(*I);
+ }
+
+ /*
+ * Instructions for allocating on the message area.
+ */
+
+ OpCase(i_global_cons):
+ {
+ Eterm *next;
+#ifdef HYBRID
+ Eterm *hp;
+
+ PreFetch(0,next);
+ TestGlobalHeap(2,2,hp);
+ hp[0] = r(0);
+ hp[1] = x(1);
+ r(0) = make_list(hp);
+#ifndef INCREMENTAL
+ global_htop += 2;
+#endif
+ NextPF(0,next);
+#else
+ PreFetch(0,next);
+ c_p->freason = EXC_INTERNAL_ERROR;
+ goto find_func_info;
+#endif
+ }
+
+ OpCase(i_global_tuple):
+ {
+ Eterm *next;
+ int len;
+#ifdef HYBRID
+ Eterm list;
+ Eterm *hp;
+#endif
+
+ if ((len = list_length(r(0))) < 0) {
+ goto badarg;
+ }
+
+ PreFetch(0,next);
+#ifdef HYBRID
+ TestGlobalHeap(len + 1,1,hp);
+ list = r(0);
+ r(0) = make_tuple(hp);
+ *hp++ = make_arityval(len);
+ while(is_list(list))
+ {
+ Eterm* cons = list_val(list);
+ *hp++ = CAR(cons);
+ list = CDR(cons);
+ }
+#ifndef INCREMENTAL
+ global_htop += len + 1;
+#endif
+ NextPF(0,next);
+#else
+ c_p->freason = EXC_INTERNAL_ERROR;
+ goto find_func_info;
+#endif
+ }
+
+ OpCase(i_global_copy):
+ {
+ Eterm *next;
+ PreFetch(0,next);
+#ifdef HYBRID
+ if (!IS_CONST(r(0)))
+ {
+ BM_SWAP_TIMER(system,copy);
+ SWAPOUT;
+ reg[0] = r(0);
+ reg[1] = NIL;
+ r(0) = copy_struct_lazy(c_p,r(0),0);
+ ASSERT(ma_src_top == 0);
+ ASSERT(ma_dst_top == 0);
+ ASSERT(ma_offset_top == 0);
+ SWAPIN;
+ BM_SWAP_TIMER(copy,system);
+ }
+ NextPF(0,next);
+#else
+ c_p->freason = EXC_INTERNAL_ERROR;
+ goto find_func_info;
+#endif
+ }
+
+ /*
+ * New floating point instructions.
+ */
+
+ OpCase(fmove_ql): {
+ Eterm fr = Arg(1);
+ Eterm* next;
+
+ PreFetch(2, next);
+ GET_DOUBLE(Arg(0), *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
+ NextPF(2, next);
+ }
+
+ OpCase(fmove_dl): {
+ Eterm targ1;
+ Eterm fr = Arg(1);
+ Eterm* next;
+
+ PreFetch(2, next);
+ GetR(0, targ1);
+ /* Arg(0) == HEADER_FLONUM */
+ GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
+ NextPF(2, next);
+ }
+
+ OpCase(fmove_new_ld): {
+ Eterm fr = Arg(0);
+ Eterm dest = make_float(HTOP);
+
+ PUT_DOUBLE(*(FloatDef*)ADD_BYTE_OFFSET(freg, fr), HTOP);
+ HTOP += FLOAT_SIZE_OBJECT;
+ StoreBifResult(1, dest);
+ }
+
+ OpCase(fconv_dl): {
+ Eterm targ1;
+ Eterm fr = Arg(1);
+ Eterm* next;
+
+ GetR(0, targ1);
+ PreFetch(2, next);
+ if (is_small(targ1)) {
+ fb(fr) = (double) signed_val(targ1);
+ } else if (is_big(targ1)) {
+ if (big_to_double(targ1, &fb(fr)) < 0) {
+ goto fbadarith;
+ }
+ } else if (is_float(targ1)) {
+ GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr));
+ } else {
+ goto fbadarith;
+ }
+ NextPF(2, next);
+ }
+
+ /*
+ * Old allocating fmove.
+ */
+
+
+#ifdef NO_FPE_SIGNALS
+ OpCase(fclearerror):
+ OpCase(i_fcheckerror):
+ erl_exit(1, "fclearerror/i_fcheckerror without fpe signals (beam_emu)");
+#else
+ OpCase(fclearerror): {
+ Eterm* next;
+
+ PreFetch(0, next);
+ ERTS_FP_CHECK_INIT(c_p);
+ NextPF(0, next);
+ }
+
+ OpCase(i_fcheckerror): {
+ Eterm* next;
+
+ PreFetch(0, next);
+ ERTS_FP_ERROR(c_p, freg[0].fd, goto fbadarith);
+ NextPF(0, next);
+ }
+# undef ERTS_FP_CHECK_INIT
+# undef ERTS_FP_ERROR
+# define ERTS_FP_CHECK_INIT(p)
+# define ERTS_FP_ERROR(p, a, b)
+#endif
+
+
+ OpCase(i_fadd_lll): {
+ Eterm* next;
+
+ PreFetch(3, next);
+ ERTS_FP_CHECK_INIT(c_p);
+ fb(Arg(2)) = fb(Arg(0)) + fb(Arg(1));
+ ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
+ NextPF(3, next);
+ }
+ OpCase(i_fsub_lll): {
+ Eterm* next;
+
+ PreFetch(3, next);
+ ERTS_FP_CHECK_INIT(c_p);
+ fb(Arg(2)) = fb(Arg(0)) - fb(Arg(1));
+ ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
+ NextPF(3, next);
+ }
+ OpCase(i_fmul_lll): {
+ Eterm* next;
+
+ PreFetch(3, next);
+ ERTS_FP_CHECK_INIT(c_p);
+ fb(Arg(2)) = fb(Arg(0)) * fb(Arg(1));
+ ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
+ NextPF(3, next);
+ }
+ OpCase(i_fdiv_lll): {
+ Eterm* next;
+
+ PreFetch(3, next);
+ ERTS_FP_CHECK_INIT(c_p);
+ fb(Arg(2)) = fb(Arg(0)) / fb(Arg(1));
+ ERTS_FP_ERROR(c_p, fb(Arg(2)), goto fbadarith);
+ NextPF(3, next);
+ }
+ OpCase(i_fnegate_ll): {
+ Eterm* next;
+
+ PreFetch(2, next);
+ ERTS_FP_CHECK_INIT(c_p);
+ fb(Arg(1)) = -fb(Arg(0));
+ ERTS_FP_ERROR(c_p, fb(Arg(1)), goto fbadarith);
+ NextPF(2, next);
+
+ fbadarith:
+ c_p->freason = BADARITH;
+ goto find_func_info;
+ }
+
+#ifdef HIPE
+ {
+ unsigned cmd;
+
+ OpCase(hipe_trap_call): {
+ /*
+ * I[-5]: &&lb_i_func_info_IaaI
+ * I[-4]: Native code callee (inserted by HiPE)
+ * I[-3]: Module (tagged atom)
+ * I[-2]: Function (tagged atom)
+ * I[-1]: Arity (untagged integer)
+ * I[ 0]: &&lb_hipe_trap_call
+ * ... remainder of original BEAM code
+ */
+ ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
+ c_p->hipe.ncallee = (void(*)(void)) I[-4];
+ cmd = HIPE_MODE_SWITCH_CMD_CALL | (I[-1] << 8);
+ ++hipe_trap_count;
+ goto L_hipe_mode_switch;
+ }
+ OpCase(hipe_trap_call_closure): {
+ ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
+ c_p->hipe.ncallee = (void(*)(void)) I[-4];
+ cmd = HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (I[-1] << 8);
+ ++hipe_trap_count;
+ goto L_hipe_mode_switch;
+ }
+ OpCase(hipe_trap_return): {
+ cmd = HIPE_MODE_SWITCH_CMD_RETURN;
+ goto L_hipe_mode_switch;
+ }
+ OpCase(hipe_trap_throw): {
+ cmd = HIPE_MODE_SWITCH_CMD_THROW;
+ goto L_hipe_mode_switch;
+ }
+ OpCase(hipe_trap_resume): {
+ cmd = HIPE_MODE_SWITCH_CMD_RESUME;
+ goto L_hipe_mode_switch;
+ }
+ L_hipe_mode_switch:
+ /* XXX: this abuse of def_arg_reg[] is horrid! */
+ SWAPOUT;
+ c_p->fcalls = FCALLS;
+ c_p->def_arg_reg[4] = -neg_o_reds;
+ reg[0] = r(0);
+ c_p = hipe_mode_switch(c_p, cmd, reg);
+#ifdef ERTS_SMP
+ reg = c_p->scheduler_data->save_reg;
+ freg = c_p->scheduler_data->freg;
+#endif
+ ERL_BITS_RELOAD_STATEP(c_p);
+ neg_o_reds = -c_p->def_arg_reg[4];
+ FCALLS = c_p->fcalls;
+ SWAPIN;
+ switch( c_p->def_arg_reg[3] ) {
+ case HIPE_MODE_SWITCH_RES_RETURN:
+ ASSERT(is_value(reg[0]));
+ MoveReturn(reg[0], r(0));
+ case HIPE_MODE_SWITCH_RES_CALL:
+ SET_I(c_p->i);
+ r(0) = reg[0];
+ Dispatch();
+ case HIPE_MODE_SWITCH_RES_CALL_CLOSURE:
+ /* This can be used to call any function value, but currently it's
+ only used to call closures referring to unloaded modules. */
+ {
+ Eterm *next;
+
+ next = call_fun(c_p, c_p->arity - 1, reg, THE_NON_VALUE);
+ SWAPIN;
+ if (next != NULL) {
+ r(0) = reg[0];
+ SET_I(next);
+ Dispatchfun();
+ }
+ goto find_func_info;
+ }
+ case HIPE_MODE_SWITCH_RES_THROW:
+ c_p->cp = NULL;
+ I = handle_error(c_p, I, reg, NULL);
+ goto post_error_handling;
+ default:
+ erl_exit(1, "hipe_mode_switch: result %u\n", c_p->def_arg_reg[3]);
+ }
+ }
+ OpCase(hipe_call_count): {
+ /*
+ * I[-5]: &&lb_i_func_info_IaaI
+ * I[-4]: pointer to struct hipe_call_count (inserted by HiPE)
+ * I[-3]: Module (tagged atom)
+ * I[-2]: Function (tagged atom)
+ * I[-1]: Arity (untagged integer)
+ * I[ 0]: &&lb_hipe_call_count
+ * ... remainder of original BEAM code
+ */
+ struct hipe_call_count *hcc = (struct hipe_call_count*)I[-4];
+ ASSERT(I[-5] == (Uint) OpCode(i_func_info_IaaI));
+ ASSERT(hcc != NULL);
+ ASSERT(VALID_INSTR(hcc->opcode));
+ ++(hcc->count);
+ Goto(hcc->opcode);
+ }
+#endif /* HIPE */
+
+ OpCase(i_yield):
+ {
+ /* This is safe as long as REDS_IN(c_p) is never stored
+ * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5],
+ * which may be c_p->arg_reg[5], which is close, but no banana.
+ */
+ c_p->arg_reg[0] = am_true;
+ c_p->arity = 1; /* One living register (the 'true' return value) */
+ SWAPOUT;
+ c_p->i = I + 1; /* Next instruction */
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_add_to_runq(c_p);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ c_p->current = NULL;
+ goto do_schedule;
+ }
+
+ OpCase(i_hibernate): {
+ SWAPOUT;
+ if (hibernate(c_p, r(0), x(1), x(2), reg)) {
+ goto do_schedule;
+ } else {
+ I = handle_error(c_p, I, reg, hibernate_3);
+ goto post_error_handling;
+ }
+ }
+
+ OpCase(i_debug_breakpoint): {
+ SWAPOUT;
+ reg[0] = r(0);
+ tmp_arg1 = call_breakpoint_handler(c_p, I-3, reg);
+ r(0) = reg[0];
+ SWAPIN;
+ if (tmp_arg1) {
+ SET_I(c_p->i);
+ Dispatch();
+ }
+ goto no_error_handler;
+ }
+
+
+ OpCase(system_limit_j):
+ system_limit:
+ c_p->freason = SYSTEM_LIMIT;
+ goto lb_Cl_error;
+
+
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+ DEFINE_COUNTING_LABELS;
+#endif
+
+#ifndef NO_JUMP_TABLE
+#ifdef DEBUG
+ end_emulator_loop:
+#endif
+#endif
+
+ OpCase(int_code_end):
+ OpCase(label_L):
+ OpCase(too_old_compiler):
+ OpCase(on_load):
+ erl_exit(1, "meta op\n");
+
+ /*
+ * One-time initialization of Beam emulator.
+ */
+
+ init_emulator:
+ {
+ int i;
+ Export* ep;
+
+#ifndef NO_JUMP_TABLE
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+
+ /* Are tables correctly generated by beam_makeops? */
+ ASSERT(sizeof(counting_opcodes) == sizeof(opcodes));
+
+ if (count_instructions) {
+#ifdef DEBUG
+ counting_opcodes[op_catch_end_y] = LabelAddr(lb_catch_end_y);
+#endif
+ counting_opcodes[op_i_func_info_IaaI] = LabelAddr(lb_i_func_info_IaaI);
+ beam_ops = counting_opcodes;
+ }
+ else
+#endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
+ {
+ beam_ops = opcodes;
+ }
+#endif /* NO_JUMP_TABLE */
+
+ em_call_error_handler = OpCode(call_error_handler);
+ em_call_traced_function = OpCode(call_traced_function);
+ em_apply_bif = OpCode(apply_bif);
+ beam_apply[0] = (Eterm) OpCode(i_apply);
+ beam_apply[1] = (Eterm) OpCode(normal_exit);
+ beam_exit[0] = (Eterm) OpCode(error_action_code);
+ beam_continue_exit[0] = (Eterm) OpCode(continue_exit);
+ beam_return_to_trace[0] = (Eterm) OpCode(i_return_to_trace);
+ beam_return_trace[0] = (Eterm) OpCode(return_trace);
+ beam_exception_trace[0] = (Eterm) OpCode(return_trace); /* UGLY */
+
+ /*
+ * Enter all BIFs into the export table.
+ */
+ for (i = 0; i < BIF_SIZE; i++) {
+ ep = erts_export_put(bif_table[i].module,
+ bif_table[i].name,
+ bif_table[i].arity);
+ bif_export[i] = ep;
+ ep->code[3] = (Eterm) OpCode(apply_bif);
+ ep->code[4] = (Eterm) bif_table[i].f;
+ }
+
+ return;
+ }
+#ifdef NO_JUMP_TABLE
+ default:
+ erl_exit(1, "unexpected op code %d\n",Go);
+ }
+#endif
+ return; /* Never executed */
+
+ save_calls1:
+ {
+ Eterm* dis_next;
+
+ save_calls(c_p, (Export *) Arg(0));
+
+ SET_I(((Export *) Arg(0))->address);
+
+ dis_next = (Eterm *) *I;
+ FCALLS--;
+ Goto(dis_next);
+ }
+}
+
+static BifFunction
+translate_gc_bif(void* gcf)
+{
+ if (gcf == erts_gc_length_1) {
+ return length_1;
+ } else if (gcf == erts_gc_size_1) {
+ return size_1;
+ } else if (gcf == erts_gc_bit_size_1) {
+ return bit_size_1;
+ } else if (gcf == erts_gc_byte_size_1) {
+ return byte_size_1;
+ } else if (gcf == erts_gc_abs_1) {
+ return abs_1;
+ } else if (gcf == erts_gc_float_1) {
+ return float_1;
+ } else if (gcf == erts_gc_round_1) {
+ return round_1;
+ } else if (gcf == erts_gc_trunc_1) {
+ return round_1;
+ } else {
+ erl_exit(1, "bad gc bif");
+ }
+}
+
+/*
+ * Mapping from the error code 'class tag' to atoms.
+ */
+Eterm exception_tag[NUMBER_EXC_TAGS] = {
+ am_error, /* 0 */
+ am_exit, /* 1 */
+ am_throw, /* 2 */
+};
+
+/*
+ * Mapping from error code 'index' to atoms.
+ */
+Eterm error_atom[NUMBER_EXIT_CODES] = {
+ am_internal_error, /* 0 */
+ am_normal, /* 1 */
+ am_internal_error, /* 2 */
+ am_badarg, /* 3 */
+ am_badarith, /* 4 */
+ am_badmatch, /* 5 */
+ am_function_clause, /* 6 */
+ am_case_clause, /* 7 */
+ am_if_clause, /* 8 */
+ am_undef, /* 9 */
+ am_badfun, /* 10 */
+ am_badarity, /* 11 */
+ am_timeout_value, /* 12 */
+ am_noproc, /* 13 */
+ am_notalive, /* 14 */
+ am_system_limit, /* 15 */
+ am_try_clause, /* 16 */
+ am_notsup /* 17 */
+};
+
+/*
+ * To fully understand the error handling, one must keep in mind that
+ * when an exception is thrown, the search for a handler can jump back
+ * and forth between Beam and native code. Upon each mode switch, a
+ * dummy handler is inserted so that if an exception reaches that point,
+ * the handler is invoked (like any handler) and transfers control so
+ * that the search for a real handler is continued in the other mode.
+ * Therefore, c_p->freason and c_p->fvalue must still hold the exception
+ * info when the handler is executed, but normalized so that creation of
+ * error terms and saving of the stack trace is only done once, even if
+ * we pass through the error handling code several times.
+ *
+ * When a new exception is raised, the current stack trace information
+ * is quick-saved in a small structure allocated on the heap. Depending
+ * on how the exception is eventually caught (perhaps by causing the
+ * current process to terminate), the saved information may be used to
+ * create a symbolic (human-readable) representation of the stack trace
+ * at the point of the original exception.
+ */
+
+static Eterm*
+handle_error(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf)
+{
+ Eterm* hp;
+ Eterm Value = c_p->fvalue;
+ Eterm Args = am_true;
+ c_p->i = pc; /* In case we call erl_exit(). */
+
+ ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
+
+ /*
+ * Check if we have an arglist for the top level call. If so, this
+ * is encoded in Value, so we have to dig out the real Value as well
+ * as the Arglist.
+ */
+ if (c_p->freason & EXF_ARGLIST) {
+ Eterm* tp;
+ ASSERT(is_tuple(Value));
+ tp = tuple_val(Value);
+ Value = tp[1];
+ Args = tp[2];
+ }
+
+ /*
+ * Save the stack trace info if the EXF_SAVETRACE flag is set. The
+ * main reason for doing this separately is to allow throws to later
+ * become promoted to errors without losing the original stack
+ * trace, even if they have passed through one or more catch and
+ * rethrow. It also makes the creation of symbolic stack traces much
+ * more modular.
+ */
+ if (c_p->freason & EXF_SAVETRACE) {
+ save_stacktrace(c_p, pc, reg, bf, Args);
+ }
+
+ /*
+ * Throws that are not caught are turned into 'nocatch' errors
+ */
+ if ((c_p->freason & EXF_THROWN) && (c_p->catches <= 0) ) {
+ hp = HAlloc(c_p, 3);
+ Value = TUPLE2(hp, am_nocatch, Value);
+ c_p->freason = EXC_ERROR;
+ }
+
+ /* Get the fully expanded error term */
+ Value = expand_error_value(c_p, c_p->freason, Value);
+
+ /* Save final error term and stabilize the exception flags so no
+ further expansion is done. */
+ c_p->fvalue = Value;
+ c_p->freason = PRIMARY_EXCEPTION(c_p->freason);
+
+ /* Find a handler or die */
+ if ((c_p->catches > 0 || IS_TRACED_FL(c_p, F_EXCEPTION_TRACE))
+ && !(c_p->freason & EXF_PANIC)) {
+ Eterm *new_pc;
+ /* The Beam handler code (catch_end or try_end) checks reg[0]
+ for THE_NON_VALUE to see if the previous code finished
+ abnormally. If so, reg[1], reg[2] and reg[3] should hold the
+ exception class, term and trace, respectively. (If the
+ handler is just a trap to native code, these registers will
+ be ignored.) */
+ reg[0] = THE_NON_VALUE;
+ reg[1] = exception_tag[GET_EXC_CLASS(c_p->freason)];
+ reg[2] = Value;
+ reg[3] = c_p->ftrace;
+ if ((new_pc = next_catch(c_p, reg))) {
+ c_p->cp = 0; /* To avoid keeping stale references. */
+ return new_pc;
+ }
+ if (c_p->catches > 0) erl_exit(1, "Catch not found");
+ }
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ terminate_proc(c_p, Value);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ return NULL;
+}
+
+/*
+ * Find the nearest catch handler
+ */
+static Eterm*
+next_catch(Process* c_p, Eterm *reg) {
+ int active_catches = c_p->catches > 0;
+ int have_return_to_trace = 0;
+ Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
+ Uint i_return_trace = beam_return_trace[0];
+ Uint i_return_to_trace = beam_return_to_trace[0];
+ ptr = prev = c_p->stop;
+ ASSERT(is_CP(*ptr));
+ ASSERT(ptr <= STACK_START(c_p));
+ if (ptr == STACK_START(c_p)) return NULL;
+ if ((is_not_CP(*ptr) || (*cp_val(*ptr) != i_return_trace &&
+ *cp_val(*ptr) != i_return_to_trace))
+ && c_p->cp) {
+ /* Can not follow cp here - code may be unloaded */
+ Uint *cpp = cp_val((Eterm) c_p->cp);
+ if (cpp == beam_exception_trace) {
+ erts_trace_exception(c_p, (Eterm*) ptr[0],
+ reg[1], reg[2], ptr+1);
+ /* Skip return_trace parameters */
+ ptr += 2;
+ } else if (cpp == beam_return_trace) {
+ /* Skip return_trace parameters */
+ ptr += 2;
+ } else if (cpp == beam_return_to_trace) {
+ have_return_to_trace = !0; /* Record next cp */
+ }
+ }
+ while (ptr < STACK_START(c_p)) {
+ if (is_catch(*ptr)) {
+ if (active_catches) goto found_catch;
+ ptr++;
+ }
+ else if (is_CP(*ptr)) {
+ prev = ptr;
+ if (*cp_val(*prev) == i_return_trace) {
+ /* Skip stack frame variables */
+ while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
+ if (is_catch(*ptr) && active_catches) goto found_catch;
+ }
+ if (cp_val(*prev) == beam_exception_trace) {
+ erts_trace_exception(c_p, (Eterm*) ptr[0],
+ reg[1], reg[2], ptr+1);
+ }
+ /* Skip return_trace parameters */
+ ptr += 2;
+ } else if (*cp_val(*prev) == i_return_to_trace) {
+ /* Skip stack frame variables */
+ while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
+ if (is_catch(*ptr) && active_catches) goto found_catch;
+ }
+ have_return_to_trace = !0; /* Record next cp */
+ return_to_trace_ptr = NULL;
+ } else {
+ if (have_return_to_trace) {
+ /* Record this cp as possible return_to trace cp */
+ have_return_to_trace = 0;
+ return_to_trace_ptr = ptr;
+ } else return_to_trace_ptr = NULL;
+ ptr++;
+ }
+ } else ptr++;
+ }
+ return NULL;
+
+ found_catch:
+ ASSERT(ptr < STACK_START(c_p));
+ c_p->stop = prev;
+ if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO) && return_to_trace_ptr) {
+ /* The stackframe closest to the catch contained an
+ * return_to_trace entry, so since the execution now
+ * continues after the catch, a return_to trace message
+ * would be appropriate.
+ */
+ erts_trace_return_to(c_p, cp_val(*return_to_trace_ptr));
+ }
+ return catch_pc(*ptr);
+}
+
+/*
+ * Terminating the process when an exception is not caught
+ */
+static void
+terminate_proc(Process* c_p, Eterm Value)
+{
+ /* Add a stacktrace if this is an error. */
+ if (GET_EXC_CLASS(c_p->freason) == EXTAG_ERROR) {
+ Value = add_stacktrace(c_p, Value, c_p->ftrace);
+ }
+ /* EXF_LOG is a primary exception flag */
+ if (c_p->freason & EXF_LOG) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Error in process %T ", c_p->id);
+ if (erts_is_alive)
+ erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
+ erts_dsprintf(dsbufp,"with exit value: %0.*T\n", display_items, Value);
+ erts_send_error_to_logger(c_p->group_leader, dsbufp);
+ }
+ /*
+ * If we use a shared heap, the process will be garbage-collected.
+ * Must zero c_p->arity to indicate that there are no live registers.
+ */
+ c_p->arity = 0;
+ erts_do_exit_process(c_p, Value);
+}
+
+/*
+ * Build and add a symbolic stack trace to the error value.
+ */
+static Eterm
+add_stacktrace(Process* c_p, Eterm Value, Eterm exc) {
+ Eterm Where = build_stacktrace(c_p, exc);
+ Eterm* hp = HAlloc(c_p, 3);
+ return TUPLE2(hp, Value, Where);
+}
+
+/*
+ * Forming the correct error value from the internal error code.
+ * This does not update c_p->fvalue or c_p->freason.
+ */
+Eterm
+expand_error_value(Process* c_p, Uint freason, Eterm Value) {
+ Eterm* hp;
+ Uint r;
+
+ r = GET_EXC_INDEX(freason);
+ ASSERT(r < NUMBER_EXIT_CODES); /* range check */
+ ASSERT(is_value(Value));
+
+ switch (r) {
+ case (GET_EXC_INDEX(EXC_PRIMARY)):
+ /* Primary exceptions use fvalue as it is */
+ break;
+ case (GET_EXC_INDEX(EXC_BADMATCH)):
+ case (GET_EXC_INDEX(EXC_CASE_CLAUSE)):
+ case (GET_EXC_INDEX(EXC_TRY_CLAUSE)):
+ case (GET_EXC_INDEX(EXC_BADFUN)):
+ case (GET_EXC_INDEX(EXC_BADARITY)):
+ /* Some common exceptions: value -> {atom, value} */
+ ASSERT(is_value(Value));
+ hp = HAlloc(c_p, 3);
+ Value = TUPLE2(hp, error_atom[r], Value);
+ break;
+ default:
+ /* Other exceptions just use an atom as descriptor */
+ Value = error_atom[r];
+ break;
+ }
+#ifdef DEBUG
+ ASSERT(Value != am_internal_error);
+#endif
+ return Value;
+}
+
+/*
+ * Quick-saving the stack trace in an internal form on the heap. Note
+ * that c_p->ftrace will point to a cons cell which holds the given args
+ * and the saved data (encoded as a bignum).
+ *
+ * (It would be much better to put the arglist - when it exists - in the
+ * error value instead of in the actual trace; e.g. '{badarg, Args}'
+ * instead of using 'badarg' with Args in the trace. The arglist may
+ * contain very large values, and right now they will be kept alive as
+ * long as the stack trace is live. Preferably, the stack trace should
+ * always be small, so that it does not matter if it is long-lived.
+ * However, it is probably not possible to ever change the format of
+ * error terms.)
+ */
+
+static void
+save_stacktrace(Process* c_p, Eterm* pc, Eterm* reg, BifFunction bf,
+ Eterm args) {
+ struct StackTrace* s;
+ int sz;
+ int depth = erts_backtrace_depth; /* max depth (never negative) */
+ if (depth > 0) {
+ /* There will always be a current function */
+ depth --;
+ }
+
+ /* Create a container for the exception data */
+ sz = (offsetof(struct StackTrace, trace) + sizeof(Eterm)*depth
+ + sizeof(Eterm) - 1) / sizeof(Eterm);
+ s = (struct StackTrace *) HAlloc(c_p, 1 + sz);
+ /* The following fields are inside the bignum */
+ s->header = make_pos_bignum_header(sz);
+ s->freason = c_p->freason;
+ s->depth = 0;
+
+ /*
+ * If the failure was in a BIF other than 'error', 'exit' or
+ * 'throw', find the bif-table index and save the argument
+ * registers by consing up an arglist.
+ */
+ if (bf != NULL && bf != error_1 && bf != error_2 &&
+ bf != exit_1 && bf != throw_1) {
+ int i;
+ int a = 0;
+ for (i = 0; i < BIF_SIZE; i++) {
+ if (bf == bif_table[i].f || bf == bif_table[i].traced) {
+ Export *ep = bif_export[i];
+ s->current = ep->code;
+ a = bif_table[i].arity;
+ break;
+ }
+ }
+ if (i >= BIF_SIZE) {
+ /*
+ * The Bif does not really exist (no BIF entry). It is a
+ * TRAP and traps are called through apply_bif, which also
+ * sets c_p->current (luckily).
+ */
+ ASSERT(c_p->current);
+ s->current = c_p->current;
+ a = s->current[2];
+ ASSERT(s->current[2] <= 3);
+ }
+ /* Save first stack entry */
+ ASSERT(pc);
+ if (depth > 0) {
+ s->trace[s->depth++] = pc;
+ depth--;
+ }
+ /* Save second stack entry if CP is valid and different from pc */
+ if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
+ s->trace[s->depth++] = c_p->cp;
+ depth--;
+ }
+ s->pc = NULL;
+ args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
+ } else {
+ s->current = c_p->current;
+ /*
+ * For a function_clause error, the arguments are in the beam
+ * registers, c_p->cp is valid, and c_p->current is set.
+ */
+ if ( (GET_EXC_INDEX(s->freason)) ==
+ (GET_EXC_INDEX(EXC_FUNCTION_CLAUSE)) ) {
+ int a;
+ ASSERT(s->current);
+ a = s->current[2];
+ args = make_arglist(c_p, reg, a); /* Overwrite CAR(c_p->ftrace) */
+ /* Save first stack entry */
+ ASSERT(c_p->cp);
+ if (depth > 0) {
+ s->trace[s->depth++] = c_p->cp;
+ depth--;
+ }
+ s->pc = NULL; /* Ignore pc */
+ } else {
+ if (depth > 0 && c_p->cp != 0 && c_p->cp != pc) {
+ s->trace[s->depth++] = c_p->cp;
+ depth--;
+ }
+ s->pc = pc;
+ }
+ }
+
+ /* Package args and stack trace */
+ {
+ Eterm *hp;
+ hp = HAlloc(c_p, 2);
+ c_p->ftrace = CONS(hp, args, make_big((Eterm *) s));
+ }
+
+ /* Save the actual stack trace */
+ if (depth > 0) {
+ Eterm *ptr, *prev = s->depth ? s->trace[s->depth-1] : NULL;
+ Uint i_return_trace = beam_return_trace[0];
+ Uint i_return_to_trace = beam_return_to_trace[0];
+ /*
+ * Traverse the stack backwards and add all unique continuation
+ * pointers to the buffer, up to the maximum stack trace size.
+ *
+ * Skip trace stack frames.
+ */
+ ptr = c_p->stop;
+ if (ptr < STACK_START(c_p)
+ && (is_not_CP(*ptr)|| (*cp_val(*ptr) != i_return_trace &&
+ *cp_val(*ptr) != i_return_to_trace))
+ && c_p->cp) {
+ /* Can not follow cp here - code may be unloaded */
+ Uint *cpp = cp_val((Eterm) c_p->cp);
+ if (cpp == beam_exception_trace || cpp == beam_return_trace) {
+ /* Skip return_trace parameters */
+ ptr += 2;
+ } else if (cpp == beam_return_to_trace) {
+ /* Skip return_to_trace parameters */
+ ptr += 1;
+ }
+ }
+ while (ptr < STACK_START(c_p) && depth > 0) {
+ if (is_CP(*ptr)) {
+ if (*cp_val(*ptr) == i_return_trace) {
+ /* Skip stack frame variables */
+ do ++ptr; while (is_not_CP(*ptr));
+ /* Skip return_trace parameters */
+ ptr += 2;
+ } else if (*cp_val(*ptr) == i_return_to_trace) {
+ /* Skip stack frame variables */
+ do ++ptr; while (is_not_CP(*ptr));
+ } else {
+ Eterm *cp = (Eterm *)(*ptr);
+ if (cp != prev) {
+ /* Record non-duplicates only */
+ prev = cp;
+ s->trace[s->depth++] = cp;
+ depth--;
+ }
+ ptr++;
+ }
+ } else ptr++;
+ }
+ }
+}
+
+/*
+ * Getting the relevant fields from the term pointed to by ftrace
+ */
+
+static struct StackTrace *get_trace_from_exc(Eterm exc) {
+ if (exc == NIL) {
+ return NULL;
+ } else {
+ ASSERT(is_list(exc));
+ return (struct StackTrace *) big_val(CDR(list_val(exc)));
+ }
+}
+
+static Eterm get_args_from_exc(Eterm exc) {
+ if (exc == NIL) {
+ return NIL;
+ } else {
+ ASSERT(is_list(exc));
+ return CAR(list_val(exc));
+ }
+}
+
+static int is_raised_exc(Eterm exc) {
+ if (exc == NIL) {
+ return 0;
+ } else {
+ ASSERT(is_list(exc));
+ return bignum_header_is_neg(*big_val(CDR(list_val(exc))));
+ }
+}
+
+/*
+ * Creating a list with the argument registers
+ */
+static Eterm
+make_arglist(Process* c_p, Eterm* reg, int a) {
+ Eterm args = NIL;
+ Eterm* hp = HAlloc(c_p, 2*a);
+ while (a > 0) {
+ args = CONS(hp, reg[a-1], args);
+ hp += 2;
+ a--;
+ }
+ return args;
+}
+
+/*
+ * Building a symbolic representation of a saved stack trace. Note that
+ * the exception object 'exc', unless NIL, points to a cons cell which
+ * holds the given args and the quick-saved data (encoded as a bignum).
+ *
+ * If the bignum is negative, the given args is a complete stacktrace.
+ */
+Eterm
+build_stacktrace(Process* c_p, Eterm exc) {
+ struct StackTrace* s;
+ Eterm args;
+ int depth;
+ Eterm* current;
+ Eterm Where = NIL;
+ Eterm* next_p = &Where;
+
+ if (! (s = get_trace_from_exc(exc))) {
+ return NIL;
+ }
+#ifdef HIPE
+ if (s->freason & EXF_NATIVE) {
+ return hipe_build_stacktrace(c_p, s);
+ }
+#endif
+ if (is_raised_exc(exc)) {
+ return get_args_from_exc(exc);
+ }
+
+ /*
+ * Find the current function. If the saved s->pc is null, then the
+ * saved s->current should already contain the proper value.
+ */
+ if (s->pc != NULL) {
+ current = find_function_from_pc(s->pc);
+ } else {
+ current = s->current;
+ }
+ /*
+ * If current is still NULL, default to the initial function
+ * (e.g. spawn_link(erlang, abs, [1])).
+ */
+ if (current == NULL) {
+ current = c_p->initial;
+ args = am_true; /* Just in case */
+ } else {
+ args = get_args_from_exc(exc);
+ }
+
+ depth = s->depth;
+
+ /*
+ * Add the {M,F,A} for the current function
+ * (where A is arity or [Argument]).
+ */
+ {
+ int i;
+ Eterm mfa;
+ Uint heap_size = 6*(depth+1);
+ Eterm* hp = HAlloc(c_p, heap_size);
+ Eterm* hp_end = hp + heap_size;
+
+ if (args != am_true) {
+ /* We have an arglist - use it */
+ mfa = TUPLE3(hp, current[0], current[1], args);
+ } else {
+ Eterm arity = make_small(current[2]);
+ mfa = TUPLE3(hp, current[0], current[1], arity);
+ }
+ hp += 4;
+ ASSERT(*next_p == NIL);
+ *next_p = CONS(hp, mfa, NIL);
+ next_p = &CDR(list_val(*next_p));
+ hp += 2;
+
+ /*
+ * Finally, we go through the saved continuation pointers.
+ */
+ for (i = 0; i < depth; i++) {
+ Eterm *fi = find_function_from_pc((Eterm *) s->trace[i]);
+ if (fi == NULL) continue;
+ mfa = TUPLE3(hp, fi[0], fi[1], make_small(fi[2]));
+ hp += 4;
+ ASSERT(*next_p == NIL);
+ *next_p = CONS(hp, mfa, NIL);
+ next_p = &CDR(list_val(*next_p));
+ hp += 2;
+ }
+ ASSERT(hp <= hp_end);
+ HRelease(c_p, hp_end, hp);
+ }
+ return Where;
+}
+
+
+static Eterm
+call_error_handler(Process* p, Eterm* fi, Eterm* reg)
+{
+ Eterm* hp;
+ Export* ep;
+ int arity;
+ Eterm args;
+ Uint sz;
+ int i;
+
+ /*
+ * Search for the error_handler module.
+ */
+ ep = erts_find_function(erts_proc_get_error_handler(p),
+ am_undefined_function, 3);
+ if (ep == NULL) { /* No error handler */
+ p->current = fi;
+ p->freason = EXC_UNDEF;
+ return 0;
+ }
+ p->i = ep->address;
+
+ /*
+ * Create a list with all arguments in the x registers.
+ */
+
+ arity = fi[2];
+ sz = 2 * arity;
+ if (HeapWordsLeft(p) < sz) {
+ erts_garbage_collect(p, sz, reg, arity);
+ }
+ hp = HEAP_TOP(p);
+ HEAP_TOP(p) += sz;
+ args = NIL;
+ for (i = arity-1; i >= 0; i--) {
+ args = CONS(hp, reg[i], args);
+ hp += 2;
+ }
+
+ /*
+ * Set up registers for call to error_handler:undefined_function/3.
+ */
+ reg[0] = fi[0];
+ reg[1] = fi[1];
+ reg[2] = args;
+ return 1;
+}
+
+static Eterm
+call_breakpoint_handler(Process* p, Eterm* fi, Eterm* reg)
+{
+ Eterm* hp;
+ Export* ep;
+ int arity;
+ Eterm args;
+ Uint sz;
+ int i;
+
+ /*
+ * Search for error handler module.
+ */
+ ep = erts_find_function(erts_proc_get_error_handler(p),
+ am_breakpoint, 3);
+ if (ep == NULL) { /* No error handler */
+ p->current = fi;
+ p->freason = EXC_UNDEF;
+ return 0;
+ }
+ p->i = ep->address;
+
+ /*
+ * Create a list with all arguments in the x registers.
+ */
+
+ arity = fi[2];
+ sz = 2 * arity;
+ if (HeapWordsLeft(p) < sz) {
+ erts_garbage_collect(p, sz, reg, arity);
+ }
+ hp = HEAP_TOP(p);
+ HEAP_TOP(p) += sz;
+ args = NIL;
+ for (i = arity-1; i >= 0; i--) {
+ args = CONS(hp, reg[i], args);
+ hp += 2;
+ }
+
+ /*
+ * Set up registers for call to error_handler:breakpoint/3.
+ */
+ reg[0] = fi[0];
+ reg[1] = fi[1];
+ reg[2] = args;
+ return 1;
+}
+
+
+
+static Export*
+apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, Eterm* reg)
+{
+ Export* ep;
+
+ /*
+ * Find the export table index for the error handler. Return NULL if
+ * there is no error handler module.
+ */
+
+ if ((ep = erts_find_export_entry(erts_proc_get_error_handler(p),
+ am_undefined_function, 3)) == NULL) {
+ return NULL;
+ } else {
+ int i;
+ Uint sz = 2*arity;
+ Eterm* hp;
+ Eterm args = NIL;
+
+ /*
+ * Always copy args from registers to a new list; this ensures
+ * that we have the same behaviour whether or not this was
+ * called from apply or fixed_apply (any additional last
+ * THIS-argument will be included, assuming that arity has been
+ * properly adjusted).
+ */
+
+ if (HeapWordsLeft(p) < sz) {
+ erts_garbage_collect(p, sz, reg, arity);
+ }
+ hp = HEAP_TOP(p);
+ HEAP_TOP(p) += sz;
+ for (i = arity-1; i >= 0; i--) {
+ args = CONS(hp, reg[i], args);
+ hp += 2;
+ }
+ reg[0] = module;
+ reg[1] = function;
+ reg[2] = args;
+ }
+
+ return ep;
+}
+
+static Uint*
+apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+{
+ int arity;
+ Export* ep;
+ Eterm tmp, this;
+
+ /*
+ * Check the arguments which should be of the form apply(Module,
+ * Function, Arguments) where Function is an atom and
+ * Arguments is an arity long list of terms.
+ */
+ if (is_not_atom(function)) {
+ /*
+ * No need to test args here -- done below.
+ */
+ error:
+ p->freason = BADARG;
+
+ error2:
+ reg[0] = module;
+ reg[1] = function;
+ reg[2] = args;
+ return 0;
+ }
+
+ /* The module argument may be either an atom or an abstract module
+ * (currently implemented using tuples, but this might change).
+ */
+ this = THE_NON_VALUE;
+ if (is_not_atom(module)) {
+ Eterm* tp;
+
+ if (is_not_tuple(module)) goto error;
+ tp = tuple_val(module);
+ if (arityval(tp[0]) < 1) goto error;
+ this = module;
+ module = tp[1];
+ if (is_not_atom(module)) goto error;
+ }
+
+ /*
+ * Walk down the 3rd parameter of apply (the argument list) and copy
+ * the parameters to the x registers (reg[]). If the module argument
+ * was an abstract module, add 1 to the function arity and put the
+ * module argument in the n+1st x register as a THIS reference.
+ */
+
+ tmp = args;
+ arity = 0;
+ while (is_list(tmp)) {
+ if (arity < (MAX_REG - 1)) {
+ reg[arity++] = CAR(list_val(tmp));
+ tmp = CDR(list_val(tmp));
+ } else {
+ p->freason = SYSTEM_LIMIT;
+ goto error2;
+ }
+ }
+ if (is_not_nil(tmp)) { /* Must be well-formed list */
+ goto error;
+ }
+ if (this != THE_NON_VALUE) {
+ reg[arity++] = this;
+ }
+
+ /*
+ * Get the index into the export table, or failing that the export
+ * entry for the error handler.
+ *
+ * Note: All BIFs have export entries; thus, no special case is needed.
+ */
+
+ if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
+ if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL) goto error;
+ } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
+ save_calls(p, ep);
+ }
+
+ return ep->address;
+}
+
+static Uint*
+fixed_apply(Process* p, Eterm* reg, Uint arity)
+{
+ Export* ep;
+ Eterm module;
+ Eterm function;
+
+ module = reg[arity]; /* The THIS pointer already in place */
+ function = reg[arity+1];
+
+ if (is_not_atom(function)) {
+ error:
+ p->freason = BADARG;
+ reg[0] = module;
+ reg[1] = function;
+ reg[2] = NIL;
+ return 0;
+ }
+
+ /* The module argument may be either an atom or an abstract module
+ * (currently implemented using tuples, but this might change).
+ */
+ if (is_not_atom(module)) {
+ Eterm* tp;
+ if (is_not_tuple(module)) goto error;
+ tp = tuple_val(module);
+ if (arityval(tp[0]) < 1) goto error;
+ module = tp[1];
+ if (is_not_atom(module)) goto error;
+ ++arity;
+ }
+
+ /*
+ * Get the index into the export table, or failing that the export
+ * entry for the error handler module.
+ *
+ * Note: All BIFs have export entries; thus, no special case is needed.
+ */
+
+ if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
+ if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL)
+ goto error;
+ } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
+ save_calls(p, ep);
+ }
+
+ return ep->address;
+}
+
+static int
+hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+{
+ int arity;
+ Eterm tmp;
+
+ if (is_not_atom(module) || is_not_atom(function)) {
+ /*
+ * No need to test args here -- done below.
+ */
+ error:
+ c_p->freason = BADARG;
+
+ error2:
+ reg[0] = module;
+ reg[1] = function;
+ reg[2] = args;
+ return 0;
+ }
+
+ arity = 0;
+ tmp = args;
+ while (is_list(tmp)) {
+ if (arity < MAX_REG) {
+ tmp = CDR(list_val(tmp));
+ arity++;
+ } else {
+ c_p->freason = SYSTEM_LIMIT;
+ goto error2;
+ }
+ }
+ if (is_not_nil(tmp)) { /* Must be well-formed list */
+ goto error;
+ }
+
+ /*
+ * At this point, arguments are known to be good.
+ */
+
+ if (c_p->arg_reg != c_p->def_arg_reg) {
+ /* Save some memory */
+ erts_free(ERTS_ALC_T_ARG_REG, c_p->arg_reg);
+ c_p->arg_reg = c_p->def_arg_reg;
+ c_p->max_arg_reg = sizeof(c_p->def_arg_reg)/sizeof(c_p->def_arg_reg[0]);
+ }
+
+ /*
+ * Arrange for the process to be resumed at the given MFA with
+ * the stack cleared.
+ */
+ c_p->arity = 3;
+ c_p->arg_reg[0] = module;
+ c_p->arg_reg[1] = function;
+ c_p->arg_reg[2] = args;
+ c_p->stop = STACK_START(c_p);
+ c_p->catches = 0;
+ c_p->i = beam_apply;
+ c_p->cp = (Eterm *) beam_apply+1;
+
+ /*
+ * If there are no waiting messages, garbage collect and
+ * shrink the heap.
+ */
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ if (c_p->msg.len > 0) {
+ erts_add_to_runq(c_p);
+ } else {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ c_p->fvalue = NIL;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ erts_garbage_collect_hibernate(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ c_p->status = P_WAITING;
+#ifdef ERTS_SMP
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ if (c_p->msg.len > 0)
+ erts_add_to_runq(c_p);
+#endif
+ }
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ c_p->current = bif_export[BIF_hibernate_3]->code;
+ return 1;
+}
+
+static Uint*
+call_fun(Process* p, /* Current process. */
+ int arity, /* Number of arguments for Fun. */
+ Eterm* reg, /* Contents of registers. */
+ Eterm args) /* THE_NON_VALUE or pre-built list of arguments. */
+{
+ Eterm fun = reg[arity];
+ Eterm hdr;
+ int i;
+ Eterm function;
+ Eterm* hp;
+
+ if (!is_boxed(fun)) {
+ goto badfun;
+ }
+ hdr = *boxed_val(fun);
+
+ if (is_fun_header(hdr)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
+ ErlFunEntry* fe;
+ Eterm* code_ptr;
+ Eterm* var_ptr;
+ int actual_arity;
+ unsigned num_free;
+
+ fe = funp->fe;
+ num_free = funp->num_free;
+ code_ptr = fe->address;
+ actual_arity = (int) code_ptr[-1];
+
+ if (actual_arity == arity+num_free) {
+ if (num_free == 0) {
+ return code_ptr;
+ } else {
+ var_ptr = funp->env;
+ reg += arity;
+ i = 0;
+ do {
+ reg[i] = var_ptr[i];
+ i++;
+ } while (i < num_free);
+ reg[i] = fun;
+ return code_ptr;
+ }
+ return code_ptr;
+ } else {
+ /*
+ * Something wrong here. First build a list of the arguments.
+ */
+
+ if (is_non_value(args)) {
+ Uint sz = 2 * arity;
+ args = NIL;
+ if (HeapWordsLeft(p) < sz) {
+ erts_garbage_collect(p, sz, reg, arity+1);
+ fun = reg[arity];
+ }
+ hp = HEAP_TOP(p);
+ HEAP_TOP(p) += sz;
+ for (i = arity-1; i >= 0; i--) {
+ args = CONS(hp, reg[i], args);
+ hp += 2;
+ }
+ }
+
+ if (actual_arity >= 0) {
+ /*
+ * There is a fun defined, but the call has the wrong arity.
+ */
+ hp = HAlloc(p, 3);
+ p->freason = EXC_BADARITY;
+ p->fvalue = TUPLE2(hp, fun, args);
+ return NULL;
+ } else {
+ Export* ep;
+ Module* modp;
+ Eterm module;
+
+
+ /*
+ * No arity. There is no module loaded that defines the fun,
+ * either because the fun is newly created from the external
+ * representation (the module has never been loaded),
+ * or the module defining the fun has been unloaded.
+ */
+
+ module = fe->module;
+ if ((modp = erts_get_module(module)) != NULL && modp->code != NULL) {
+ /*
+ * There is a module loaded, but obviously the fun is not
+ * defined in it. We must not call the error_handler
+ * (or we will get into an infinite loop).
+ */
+ goto badfun;
+ }
+
+ /*
+ * No current code for this module. Call the error_handler module
+ * to attempt loading the module.
+ */
+
+ ep = erts_find_function(erts_proc_get_error_handler(p),
+ am_undefined_lambda, 3);
+ if (ep == NULL) { /* No error handler */
+ p->current = NULL;
+ p->freason = EXC_UNDEF;
+ return NULL;
+ }
+ reg[0] = module;
+ reg[1] = fun;
+ reg[2] = args;
+ return ep->address;
+ }
+ }
+ } else if (is_export_header(hdr)) {
+ Export* ep = (Export *) (export_val(fun))[1];
+ int actual_arity = (int) ep->code[2];
+ if (arity == actual_arity) {
+ return ep->address;
+ } else {
+ /*
+ * Wrong arity. First build a list of the arguments.
+ */
+
+ if (is_non_value(args)) {
+ args = NIL;
+ hp = HAlloc(p, arity*2);
+ for (i = arity-1; i >= 0; i--) {
+ args = CONS(hp, reg[i], args);
+ hp += 2;
+ }
+ }
+
+ hp = HAlloc(p, 3);
+ p->freason = EXC_BADARITY;
+ p->fvalue = TUPLE2(hp, fun, args);
+ return NULL;
+ }
+ } else if (hdr == make_arityval(2)) {
+ Eterm* tp;
+ Export* ep;
+ Eterm module;
+
+ tp = tuple_val(fun);
+ module = tp[1];
+ function = tp[2];
+ if (!is_atom(module) || !is_atom(function)) {
+ goto badfun;
+ }
+ if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
+ ep = erts_find_export_entry(erts_proc_get_error_handler(p),
+ am_undefined_function, 3);
+ if (ep == NULL) {
+ p->freason = EXC_UNDEF;
+ return 0;
+ }
+ if (is_non_value(args)) {
+ Uint sz = 2 * arity;
+ if (HeapWordsLeft(p) < sz) {
+ erts_garbage_collect(p, sz, reg, arity);
+ }
+ hp = HEAP_TOP(p);
+ HEAP_TOP(p) += sz;
+ args = NIL;
+ while (arity-- > 0) {
+ args = CONS(hp, reg[arity], args);
+ hp += 2;
+ }
+ }
+ reg[0] = module;
+ reg[1] = function;
+ reg[2] = args;
+ }
+ return ep->address;
+ } else {
+ badfun:
+ p->current = NULL;
+ p->freason = EXC_BADFUN;
+ p->fvalue = fun;
+ return NULL;
+ }
+}
+
+static Eterm*
+apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
+{
+ int arity;
+ Eterm tmp;
+
+ /*
+ * Walk down the 3rd parameter of apply (the argument list) and copy
+ * the parameters to the x registers (reg[]).
+ */
+
+ tmp = args;
+ arity = 0;
+ while (is_list(tmp)) {
+ if (arity < MAX_REG-1) {
+ reg[arity++] = CAR(list_val(tmp));
+ tmp = CDR(list_val(tmp));
+ } else {
+ p->freason = SYSTEM_LIMIT;
+ return NULL;
+ }
+ }
+
+ if (is_not_nil(tmp)) { /* Must be well-formed list */
+ p->freason = EXC_UNDEF;
+ return NULL;
+ }
+ reg[arity] = fun;
+ return call_fun(p, arity, reg, args);
+}
+
+
+static Eterm
+new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
+{
+ unsigned needed = ERL_FUN_SIZE + num_free;
+ ErlFunThing* funp;
+ Eterm* hp;
+ int i;
+
+ if (HEAP_LIMIT(p) - HEAP_TOP(p) <= needed) {
+ PROCESS_MAIN_CHK_LOCKS(p);
+ erts_garbage_collect(p, needed, reg, num_free);
+ PROCESS_MAIN_CHK_LOCKS(p);
+ }
+ hp = p->htop;
+ p->htop = hp + needed;
+ funp = (ErlFunThing *) hp;
+ hp = funp->env;
+ erts_refc_inc(&fe->refc, 2);
+ funp->thing_word = HEADER_FUN;
+#ifndef HYBRID /* FIND ME! */
+ funp->next = MSO(p).funs;
+ MSO(p).funs = funp;
+#endif
+ funp->fe = fe;
+ funp->num_free = num_free;
+ funp->creator = p->id;
+#ifdef HIPE
+ funp->native_address = fe->native_address;
+#endif
+ funp->arity = (int)fe->address[-1] - num_free;
+ for (i = 0; i < num_free; i++) {
+ *hp++ = reg[i];
+ }
+ return make_fun(funp);
+}
+
+
+
+int catchlevel(Process *p)
+{
+ return p->catches;
+}
+
+/*
+ * Check if the given function is built-in (i.e. a BIF implemented in C).
+ *
+ * Returns 0 if not built-in, and a non-zero value if built-in.
+ */
+
+int
+erts_is_builtin(Eterm Mod, Eterm Name, int arity)
+{
+ Export e;
+ Export* ep;
+
+ e.code[0] = Mod;
+ e.code[1] = Name;
+ e.code[2] = arity;
+
+ if ((ep = export_get(&e)) == NULL) {
+ return 0;
+ }
+ return ep->address == ep->code+3 && (ep->code[3] == (Uint) em_apply_bif);
+}
+
+
+/*
+ * Return the current number of reductions for the given process.
+ * To get the total number of reductions, p->reds must be added.
+ */
+
+Uint
+erts_current_reductions(Process *current, Process *p)
+{
+ if (current != p) {
+ return 0;
+ } else if (current->fcalls < 0 && ERTS_PROC_GET_SAVED_CALLS_BUF(current)) {
+ return -current->fcalls;
+ } else {
+ return REDS_IN(current) - current->fcalls;
+ }
+}
+
+static BIF_RETTYPE nif_dispatcher_0(Process* p, Uint* I)
+{
+ typedef Eterm NifF(struct enif_environment_t*);
+ NifF* fp = (NifF*) I[1];
+ struct enif_environment_t env;
+ Eterm ret;
+ erts_pre_nif(&env, p, (void*)I[2]);
+ ret = (*fp)(&env);
+ erts_post_nif(&env);
+ return ret;
+}
+
+static BIF_RETTYPE nif_dispatcher_1(Process* p, Eterm arg1, Uint* I)
+{
+ typedef Eterm NifF(struct enif_environment_t*, Eterm);
+ NifF* fp = (NifF*) I[1];
+ struct enif_environment_t env;
+ Eterm ret;
+ erts_pre_nif(&env, p, (void*)I[2]);
+ ret = (*fp)(&env, arg1);
+ erts_post_nif(&env);
+ return ret;
+}
+
+static BIF_RETTYPE nif_dispatcher_2(Process* p, Eterm arg1, Eterm arg2, Uint* I)
+{
+ typedef Eterm NifF(struct enif_environment_t*, Eterm, Eterm);
+ NifF* fp = (NifF*) I[1];
+ struct enif_environment_t env;
+ Eterm ret;
+ erts_pre_nif(&env, p, (void*)I[2]);
+ ret = (*fp)(&env, arg1, arg2);
+ erts_post_nif(&env);
+ return ret;
+}
+
+static BIF_RETTYPE nif_dispatcher_3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3, Uint* I)
+{
+ typedef Eterm NifF(struct enif_environment_t*, Eterm, Eterm, Eterm);
+ NifF* fp = (NifF*) I[1];
+ struct enif_environment_t env;
+ Eterm ret;
+ erts_pre_nif(&env, p, (void*)I[2]);
+ ret = (*fp)(&env, arg1, arg2, arg3);
+ erts_post_nif(&env);
+ return ret;
+}
+
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
new file mode 100644
index 0000000000..47dd98117d
--- /dev/null
+++ b/erts/emulator/beam/beam_load.c
@@ -0,0 +1,5234 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_version.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "external.h"
+#include "beam_load.h"
+#include "big.h"
+#include "erl_bits.h"
+#include "beam_catches.h"
+#include "erl_binary.h"
+#include "erl_zlib.h"
+
+#ifdef HIPE
+#include "hipe_bif0.h"
+#include "hipe_mode_switch.h"
+#include "hipe_arch.h"
+#endif
+
+ErlDrvBinary* erts_gzinflate_buffer(char*, int);
+
+#define MAX_OPARGS 8
+#define CALLED 0
+#define DEFINED 1
+#define EXPORTED 2
+
+#ifdef NO_JUMP_TABLE
+# define BeamOpCode(Op) ((Uint)(Op))
+#else
+# define BeamOpCode(Op) ((Eterm)beam_ops[Op])
+#endif
+
+#if defined(WORDS_BIGENDIAN)
+# define NATIVE_ENDIAN(F) \
+ if ((F).val & BSF_NATIVE) { \
+ (F).val &= ~(BSF_LITTLE|BSF_NATIVE); \
+ } else {}
+#else
+# define NATIVE_ENDIAN(F) \
+ if ((F).val & BSF_NATIVE) { \
+ (F).val &= ~BSF_NATIVE; \
+ (F).val |= BSF_LITTLE; \
+ } else {}
+#endif
+
+/*
+ * Errors returned from tranform_engine().
+ */
+#define TE_OK 0
+#define TE_FAIL (-1)
+#define TE_SHORT_WINDOW (-2)
+
+typedef struct {
+ Uint value; /* Value of label (NULL if not known yet). */
+ Uint patches; /* Index (into code buffer) to first location
+ * which must be patched with the value of this label.
+ */
+#ifdef ERTS_SMP
+ Uint looprec_targeted; /* Non-zero if this label is the target of a loop_rec
+ * instruction.
+ */
+#endif
+} Label;
+
+/*
+ * Type for a operand for a generic instruction.
+ */
+
+typedef struct {
+ unsigned type; /* Type of operand. */
+ Uint val; /* Value of operand. */
+ Uint bigarity; /* Arity for bignumbers (only). */
+} GenOpArg;
+
+/*
+ * A generic operation.
+ */
+
+typedef struct genop {
+ int op; /* Opcode. */
+ int arity; /* Number of arguments. */
+ GenOpArg def_args[MAX_OPARGS]; /* Default buffer for arguments. */
+ GenOpArg* a; /* The arguments. */
+ struct genop* next; /* Next genop. */
+} GenOp;
+
+/*
+ * The allocation unit for generic blocks.
+ */
+
+typedef struct genop_block {
+ GenOp genop[32];
+ struct genop_block* next;
+} GenOpBlock;
+
+/*
+ * This structure contains information for an imported function or BIF.
+ */
+typedef struct {
+ Eterm module; /* Tagged atom for module. */
+ Eterm function; /* Tagged atom for function. */
+ int arity; /* Arity. */
+ Uint patches; /* Index to locations in code to
+ * eventually patch with a pointer into
+ * the export entry.
+ */
+ BifFunction bf; /* Pointer to BIF function if BIF;
+ * NULL otherwise.
+ */
+} ImportEntry;
+
+/*
+ * This structure contains information for a function exported from a module.
+ */
+
+typedef struct {
+ Eterm function; /* Tagged atom for function. */
+ int arity; /* Arity. */
+ Eterm* address; /* Address to function in code. */
+} ExportEntry;
+
+#define MakeIffId(a, b, c, d) \
+ (((Uint) (a) << 24) | ((Uint) (b) << 16) | ((Uint) (c) << 8) | (Uint) (d))
+
+#define ATOM_CHUNK 0
+#define CODE_CHUNK 1
+#define STR_CHUNK 2
+#define IMP_CHUNK 3
+#define EXP_CHUNK 4
+#define NUM_MANDATORY 5
+
+#define LAMBDA_CHUNK 5
+#define LITERAL_CHUNK 6
+#define ATTR_CHUNK 7
+#define COMPILE_CHUNK 8
+
+#define NUM_CHUNK_TYPES (sizeof(chunk_types)/sizeof(chunk_types[0]))
+
+/*
+ * An array with all chunk types recognized by the loader.
+ */
+
+static Uint chunk_types[] = {
+ /*
+ * Mandatory chunk types -- these MUST be present.
+ */
+ MakeIffId('A', 't', 'o', 'm'), /* 0 */
+ MakeIffId('C', 'o', 'd', 'e'), /* 1 */
+ MakeIffId('S', 't', 'r', 'T'), /* 2 */
+ MakeIffId('I', 'm', 'p', 'T'), /* 3 */
+ MakeIffId('E', 'x', 'p', 'T'), /* 4 */
+
+ /*
+ * Optional chunk types -- the loader will use them if present.
+ */
+ MakeIffId('F', 'u', 'n', 'T'), /* 5 */
+ MakeIffId('L', 'i', 't', 'T'), /* 6 */
+ MakeIffId('A', 't', 't', 'r'), /* 7 */
+ MakeIffId('C', 'I', 'n', 'f'), /* 8 */
+};
+
+/*
+ * This structure keeps load-time information about a lambda.
+ */
+
+typedef struct {
+ ErlFunEntry* fe; /* Entry in fun table. */
+ unsigned label; /* Label of function entry. */
+ Uint32 num_free; /* Number of free variables. */
+ Eterm function; /* Name of local function. */
+ int arity; /* Arity (including free variables). */
+} Lambda;
+
+/*
+ * This structure keeps load-time information about a literal.
+ */
+
+typedef struct {
+ Eterm term; /* The tagged term (in the heap). */
+ Uint heap_size; /* (Exact) size on the heap. */
+ Uint offset; /* Offset from temporary location to final. */
+ Eterm* heap; /* Heap for term. */
+} Literal;
+
+/*
+ * This structure keeps information about an operand that needs to be
+ * patched to contain the correct address of a literal when the code is
+ * frozen.
+ */
+
+typedef struct literal_patch LiteralPatch;
+struct literal_patch {
+ int pos; /* Position in code */
+ LiteralPatch* next;
+};
+
+/*
+ * This structure keeps information about an operand that needs to be
+ * patched to contain the correct address for an address into the string table.
+ */
+
+typedef struct string_patch StringPatch;
+struct string_patch {
+ int pos; /* Position in code */
+ StringPatch* next;
+};
+
+/*
+ * This structure contains all information about the module being loaded.
+ */
+
+typedef struct {
+ /*
+ * The current logical file within the binary.
+ */
+
+ char* file_name; /* Name of file we are reading (usually chunk name). */
+ byte* file_p; /* Current pointer within file. */
+ unsigned file_left; /* Number of bytes left in file. */
+
+ /*
+ * The following are used mainly for diagnostics.
+ */
+
+ Eterm group_leader; /* Group leader (for diagnostics). */
+ Eterm module; /* Tagged atom for module name. */
+ Eterm function; /* Tagged atom for current function
+ * (or 0 if none).
+ */
+ unsigned arity; /* Arity for current function. */
+
+ /*
+ * All found chunks.
+ */
+
+ struct {
+ byte* start; /* Start of chunk (in binary). */
+ unsigned size; /* Size of chunk. */
+ } chunks[NUM_CHUNK_TYPES];
+
+ /*
+ * Used for code loading (mainly).
+ */
+
+ byte* code_start; /* Start of code file. */
+ unsigned code_size; /* Size of code file. */
+ int specific_op; /* Specific opcode (-1 if not found). */
+ int num_functions; /* Number of functions in module. */
+ int num_labels; /* Number of labels. */
+ int code_buffer_size; /* Size of code buffer in words. */
+ Eterm* code; /* Loaded code. */
+ int ci; /* Current index into loaded code. */
+ Label* labels;
+ Uint put_strings; /* Linked list of put_string instructions. */
+ Uint new_bs_put_strings; /* Linked list of i_new_bs_put_string instructions. */
+ StringPatch* string_patches; /* Linked list of position into string table to patch. */
+ Uint catches; /* Linked list of catch_yf instructions. */
+ unsigned loaded_size; /* Final size of code when loaded. */
+ byte mod_md5[16]; /* MD5 for module code. */
+ int may_load_nif; /* true if NIFs may later be loaded for this module */
+ int on_load; /* Index in the code for the on_load function
+ * (or 0 if there is no on_load function)
+ */
+
+ /*
+ * Atom table.
+ */
+
+ int num_atoms; /* Number of atoms in atom table. */
+ Eterm* atom; /* Atom table. */
+
+ int num_exps; /* Number of exports. */
+ ExportEntry* export; /* Pointer to export table. */
+
+ int num_imports; /* Number of imports. */
+ ImportEntry* import; /* Import entry (translated information). */
+
+ /*
+ * Generic instructions.
+ */
+ GenOp* genop; /* The last generic instruction seen. */
+ GenOp* free_genop; /* List of free genops. */
+ GenOpBlock* genop_blocks; /* List of all block of allocated genops. */
+
+ /*
+ * Lambda table.
+ */
+
+ int num_lambdas; /* Number of lambdas in table. */
+ int lambdas_allocated; /* Size of allocated lambda table. */
+ Lambda* lambdas; /* Pointer to lambdas. */
+ Lambda def_lambdas[16]; /* Default storage for lambda table. */
+ char* lambda_error; /* Delayed missing 'FunT' error. */
+
+ /*
+ * Literals (constant pool).
+ */
+
+ int num_literals; /* Number of literals in table. */
+ int allocated_literals; /* Number of literal entries allocated. */
+ Literal* literals; /* Array of literals. */
+ LiteralPatch* literal_patches; /* Operands that need to be patched. */
+ Uint total_literal_size; /* Total heap size for all literals. */
+
+ /*
+ * Floating point.
+ */
+ int new_float_instructions; /* New allocation scheme for floating point. */
+} LoaderState;
+
+typedef struct {
+ unsigned num_functions; /* Number of functions. */
+ Eterm* func_tab[1]; /* Pointers to each function. */
+} LoadedCode;
+
+#define GetTagAndValue(Stp, Tag, Val) \
+ do { \
+ Uint __w; \
+ GetByte(Stp, __w); \
+ Tag = __w & 0x07; \
+ if ((__w & 0x08) == 0) { \
+ Val = __w >> 4; \
+ } else if ((__w & 0x10) == 0) { \
+ Val = ((__w >> 5) << 8); \
+ GetByte(Stp, __w); \
+ Val |= __w; \
+ } else { \
+ if (!get_int_val(Stp, __w, &(Val))) goto load_error; \
+ } \
+ } while (0)
+
+
+#define LoadError0(Stp, Fmt) \
+ do { \
+ load_printf(__LINE__, Stp, Fmt); \
+ goto load_error; \
+ } while (0)
+
+#define LoadError1(Stp, Fmt, Arg1) \
+ do { \
+ load_printf(__LINE__, stp, Fmt, Arg1); \
+ goto load_error; \
+ } while (0)
+
+#define LoadError2(Stp, Fmt, Arg1, Arg2) \
+ do { \
+ load_printf(__LINE__, Stp, Fmt, Arg1, Arg2); \
+ goto load_error; \
+ } while (0)
+
+#define LoadError3(Stp, Fmt, Arg1, Arg2, Arg3) \
+ do { \
+ load_printf(__LINE__, stp, Fmt, Arg1, Arg2, Arg3); \
+ goto load_error; \
+ } while (0)
+
+#define EndOfFile(Stp) (stp->file_left == 0)
+
+#define GetInt(Stp, N, Dest) \
+ if (Stp->file_left < (N)) { \
+ short_file(__LINE__, Stp, (N)); \
+ goto load_error; \
+ } else { \
+ int __n = (N); \
+ Uint __result = 0; \
+ Stp->file_left -= (unsigned) __n; \
+ while (__n-- > 0) { \
+ __result = __result << 8 | *Stp->file_p++; \
+ } \
+ Dest = __result; \
+ } while (0)
+
+#define GetByte(Stp, Dest) \
+ if ((Stp)->file_left < 1) { \
+ short_file(__LINE__, (Stp), 1); \
+ goto load_error; \
+ } else { \
+ Dest = *(Stp)->file_p++; \
+ (Stp)->file_left--; \
+ }
+
+#define GetString(Stp, Dest, N) \
+ if (Stp->file_left < (N)) { \
+ short_file(__LINE__, Stp, (N)); \
+ goto load_error; \
+ } else { \
+ Dest = (Stp)->file_p; \
+ (Stp)->file_p += (N); \
+ (Stp)->file_left -= (N); \
+ }
+
+#define GetAtom(Stp, Index, Dest) \
+ if ((Index) == 0) { \
+ LoadError1((Stp), "bad atom index 0 ([]) in %s", stp->file_name); \
+ } else if ((Index) < (Stp)->num_atoms) { \
+ Dest = (Stp)->atom[(Index)]; \
+ } else { \
+ LoadError2((Stp), "bad atom index %d in %s", (Index), stp->file_name); \
+ }
+
+#ifdef DEBUG
+# define GARBAGE 0xCC
+# define DEBUG_INIT_GENOP(Dst) memset(Dst, GARBAGE, sizeof(GenOp))
+#else
+# define DEBUG_INIT_GENOP(Dst)
+#endif
+
+#define NEW_GENOP(Stp, Dst) \
+ do { \
+ if ((Stp)->free_genop == NULL) { \
+ new_genop((Stp)); \
+ } \
+ Dst = (Stp)->free_genop; \
+ (Stp)->free_genop = (Stp)->free_genop->next; \
+ DEBUG_INIT_GENOP(Dst); \
+ (Dst)->a = (Dst)->def_args; \
+ } while (0)
+
+#define FREE_GENOP(Stp, Genop) \
+ do { \
+ if ((Genop)->a != (Genop)->def_args) { \
+ erts_free(ERTS_ALC_T_LOADER_TMP, (Genop)->a); \
+ } \
+ (Genop)->next = (Stp)->free_genop; \
+ (Stp)->free_genop = (Genop); \
+ } while (0)
+
+#define GENOP_ARITY(Genop, Arity) \
+ do { \
+ ASSERT((Genop)->a == (Genop)->def_args); \
+ (Genop)->arity = (Arity); \
+ (Genop)->a = erts_alloc(ERTS_ALC_T_LOADER_TMP, \
+ (Genop)->arity * sizeof(GenOpArg)); \
+ } while (0)
+
+
+static int bin_load(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm* modp, byte* bytes, int unloaded_size);
+static void init_state(LoaderState* stp);
+static int insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm module,
+ Eterm* code, Uint size, Uint catches);
+static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
+ Uint num_types, Uint num_mandatory);
+static int load_atom_table(LoaderState* stp);
+static int load_import_table(LoaderState* stp);
+static int read_export_table(LoaderState* stp);
+static int read_lambda_table(LoaderState* stp);
+static int read_literal_table(LoaderState* stp);
+static int read_code_header(LoaderState* stp);
+static int load_code(LoaderState* stp);
+static GenOp* gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
+ GenOpArg Tuple, GenOpArg Dst);
+static GenOp* gen_split_values(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+ GenOpArg Size, GenOpArg* Rest);
+static GenOp* gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+ GenOpArg Size, GenOpArg* Rest);
+static GenOp* gen_select_big(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+ GenOpArg Size, GenOpArg* Rest);
+static GenOp* const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+ GenOpArg Size, GenOpArg* Rest);
+static GenOp* gen_func_info(LoaderState* stp, GenOpArg mod, GenOpArg Func,
+ GenOpArg arity, GenOpArg label);
+static GenOp*
+gen_guard_bif(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
+ GenOpArg Src, GenOpArg Dst);
+
+static int freeze_code(LoaderState* stp);
+
+static void final_touch(LoaderState* stp);
+static void short_file(int line, LoaderState* stp, unsigned needed);
+static void load_printf(int line, LoaderState* context, char *fmt, ...);
+static int transform_engine(LoaderState* st);
+static void id_to_string(Uint id, char* s);
+static void new_genop(LoaderState* stp);
+static int get_int_val(LoaderState* stp, Uint len_code, Uint* result);
+static int get_erlang_integer(LoaderState* stp, Uint len_code, Uint* result);
+static int new_label(LoaderState* stp);
+static void new_literal_patch(LoaderState* stp, int pos);
+static void new_string_patch(LoaderState* stp, int pos);
+static Uint new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size);
+static int genopargcompare(GenOpArg* a, GenOpArg* b);
+static Eterm exported_from_module(Process* p, Eterm mod);
+static Eterm functions_in_module(Process* p, Eterm mod);
+static Eterm attributes_for_module(Process* p, Eterm mod);
+static Eterm compilation_info_for_module(Process* p, Eterm mod);
+static Eterm native_addresses(Process* p, Eterm mod);
+int patch_funentries(Eterm Patchlist);
+int patch(Eterm Addresses, Uint fe);
+static int safe_mul(Uint a, Uint b, Uint* resp);
+
+
+static int must_swap_floats;
+
+/*
+ * The following variables keep a sorted list of address ranges for
+ * each module. It allows us to quickly find a function given an
+ * instruction pointer.
+ */
+Range* modules = NULL; /* Sorted lists of module addresses. */
+int num_loaded_modules; /* Number of loaded modules. */
+int allocated_modules; /* Number of slots allocated. */
+Range* mid_module = NULL; /* Cached search start point */
+
+Uint erts_total_code_size;
+/**********************************************************************/
+
+
+void init_load(void)
+{
+ FloatDef f;
+
+ erts_total_code_size = 0;
+
+ beam_catches_init();
+
+ f.fd = 1.0;
+ must_swap_floats = (f.fw[0] == 0);
+
+ allocated_modules = 128;
+ modules = (Range *) erts_alloc(ERTS_ALC_T_MODULE_REFS,
+ allocated_modules*sizeof(Range));
+ mid_module = modules;
+ num_loaded_modules = 0;
+}
+
+static void
+define_file(LoaderState* stp, char* name, int idx)
+{
+ stp->file_name = name;
+ stp->file_p = stp->chunks[idx].start;
+ stp->file_left = stp->chunks[idx].size;
+}
+
+int
+erts_load_module(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ Eterm group_leader, /* Group leader or NIL if none. */
+ Eterm* modp, /*
+ * Module name as an atom (NIL to not check).
+ * On return, contains the actual module name.
+ */
+ byte* code, /* Points to the code to load */
+ int size) /* Size of code to load. */
+{
+ ErlDrvBinary* bin;
+ int result;
+
+ if (size >= 4 && code[0] == 'F' && code[1] == 'O' &&
+ code[2] == 'R' && code[3] == '1') {
+ /*
+ * The BEAM module is not compressed.
+ */
+ result = bin_load(c_p, c_p_locks, group_leader, modp, code, size);
+ } else {
+ /*
+ * The BEAM module is compressed (or possibly invalid/corrupted).
+ */
+ if ((bin = (ErlDrvBinary *) erts_gzinflate_buffer((char*)code, size)) == NULL) {
+ return -1;
+ }
+ result = bin_load(c_p, c_p_locks, group_leader, modp,
+ (byte*)bin->orig_bytes, bin->orig_size);
+ driver_free_binary(bin);
+ }
+ return result;
+}
+
+
+static int
+bin_load(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm* modp, byte* bytes, int unloaded_size)
+{
+ LoaderState state;
+ int rval = -1;
+
+ init_state(&state);
+ state.module = *modp;
+ state.group_leader = group_leader;
+
+ /*
+ * Scan the IFF file.
+ */
+
+ state.file_name = "IFF header for Beam file";
+ state.file_p = bytes;
+ state.file_left = unloaded_size;
+ if (!scan_iff_file(&state, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
+ goto load_error;
+ }
+
+ /*
+ * Read the header for the code chunk.
+ */
+
+ define_file(&state, "code chunk header", CODE_CHUNK);
+ if (!read_code_header(&state)) {
+ goto load_error;
+ }
+
+ /*
+ * Read the atom table.
+ */
+
+ define_file(&state, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(&state)) {
+ goto load_error;
+ }
+
+ /*
+ * Read the import table.
+ */
+
+ define_file(&state, "import table", IMP_CHUNK);
+ if (!load_import_table(&state)) {
+ goto load_error;
+ }
+
+ /*
+ * Read the lambda (fun) table.
+ */
+
+ if (state.chunks[LAMBDA_CHUNK].size > 0) {
+ define_file(&state, "lambda (fun) table", LAMBDA_CHUNK);
+ if (!read_lambda_table(&state)) {
+ goto load_error;
+ }
+ }
+
+ /*
+ * Read the literal table.
+ */
+
+ if (state.chunks[LITERAL_CHUNK].size > 0) {
+ define_file(&state, "literals table (constant pool)", LITERAL_CHUNK);
+ if (!read_literal_table(&state)) {
+ goto load_error;
+ }
+ }
+
+ /*
+ * Load the code chunk.
+ */
+
+ state.file_name = "code chunk";
+ state.file_p = state.code_start;
+ state.file_left = state.code_size;
+ if (!load_code(&state) || !freeze_code(&state)) {
+ goto load_error;
+ }
+
+ /*
+ * Read and validate the export table. (This must be done after
+ * loading the code, because it contains labels.)
+ */
+
+ define_file(&state, "export table", EXP_CHUNK);
+ if (!read_export_table(&state)) {
+ goto load_error;
+ }
+
+ /*
+ * Ready for the final touch: fixing the export table entries for
+ * exported and imported functions. This can't fail.
+ */
+
+ rval = insert_new_code(c_p, c_p_locks, state.group_leader, state.module,
+ state.code, state.loaded_size, state.catches);
+ if (rval < 0) {
+ goto load_error;
+ }
+ final_touch(&state);
+
+ /*
+ * Loading succeded.
+ */
+ rval = 0;
+ state.code = NULL; /* Prevent code from being freed. */
+ *modp = state.module;
+
+ /*
+ * If there is an on_load function, signal an error to
+ * indicate that the on_load function must be run.
+ */
+ if (state.on_load) {
+ rval = -5;
+ }
+
+ load_error:
+ if (state.code != 0) {
+ erts_free(ERTS_ALC_T_CODE, state.code);
+ }
+ if (state.labels != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.labels);
+ }
+ if (state.atom != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.atom);
+ }
+ if (state.import != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.import);
+ }
+ if (state.export != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.export);
+ }
+ if (state.lambdas != state.def_lambdas) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.lambdas);
+ }
+ if (state.literals != NULL) {
+ int i;
+ for (i = 0; i < state.num_literals; i++) {
+ if (state.literals[i].heap != NULL) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.literals[i].heap);
+ }
+ }
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.literals);
+ }
+ while (state.literal_patches != NULL) {
+ LiteralPatch* next = state.literal_patches->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.literal_patches);
+ state.literal_patches = next;
+ }
+ while (state.string_patches != NULL) {
+ StringPatch* next = state.string_patches->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.string_patches);
+ state.string_patches = next;
+ }
+ while (state.genop_blocks) {
+ GenOpBlock* next = state.genop_blocks->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.genop_blocks);
+ state.genop_blocks = next;
+ }
+
+ return rval;
+}
+
+
+static void
+init_state(LoaderState* stp)
+{
+ stp->function = THE_NON_VALUE; /* Function not known yet */
+ stp->arity = 0;
+ stp->specific_op = -1;
+ stp->genop = NULL;
+ stp->atom = NULL;
+ stp->code = NULL;
+ stp->labels = NULL;
+ stp->import = NULL;
+ stp->export = NULL;
+ stp->free_genop = NULL;
+ stp->genop_blocks = NULL;
+ stp->num_lambdas = 0;
+ stp->lambdas_allocated = sizeof(stp->def_lambdas)/sizeof(Lambda);
+ stp->lambdas = stp->def_lambdas;
+ stp->lambda_error = NULL;
+ stp->num_literals = 0;
+ stp->allocated_literals = 0;
+ stp->literals = 0;
+ stp->total_literal_size = 0;
+ stp->literal_patches = 0;
+ stp->string_patches = 0;
+ stp->new_float_instructions = 0;
+ stp->may_load_nif = 0;
+ stp->on_load = 0;
+}
+
+static int
+insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm module, Eterm* code, Uint size, Uint catches)
+{
+ Module* modp;
+ int rval;
+ int i;
+
+ if ((rval = beam_make_current_old(c_p, c_p_locks, module)) < 0) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "Module %T must be purged before loading\n",
+ module);
+ erts_send_error_to_logger(group_leader, dsbufp);
+ return rval;
+ }
+
+ /*
+ * Update module table.
+ */
+
+ erts_total_code_size += size;
+ modp = erts_put_module(module);
+ modp->code = code;
+ modp->code_length = size;
+ modp->catches = catches;
+
+ /*
+ * Update address table (used for finding a function from a PC value).
+ */
+
+ if (num_loaded_modules == allocated_modules) {
+ allocated_modules *= 2;
+ modules = (Range *) erts_realloc(ERTS_ALC_T_MODULE_REFS,
+ (void *) modules,
+ allocated_modules * sizeof(Range));
+ }
+ for (i = num_loaded_modules; i > 0; i--) {
+ if (code > modules[i-1].start) {
+ break;
+ }
+ modules[i] = modules[i-1];
+ }
+ modules[i].start = code;
+ modules[i].end = (Eterm *) (((byte *)code) + size);
+ num_loaded_modules++;
+ mid_module = &modules[num_loaded_modules/2];
+ return 0;
+}
+
+static int
+scan_iff_file(LoaderState* stp, Uint* chunk_types, Uint num_types, Uint num_mandatory)
+{
+ MD5_CTX context;
+ Uint id;
+ Uint count;
+ int i;
+
+ /*
+ * The binary must start with an IFF 'FOR1' chunk.
+ */
+
+ GetInt(stp, 4, id);
+ if (id != MakeIffId('F', 'O', 'R', '1')) {
+ LoadError0(stp, "not a BEAM file: no IFF 'FOR1' chunk");
+ }
+
+ /*
+ * Retrieve the chunk size and verify it. If the size is equal to
+ * or less than the size of the binary, it is ok and we will use it
+ * as the limit for the logical file size.
+ */
+
+ GetInt(stp, 4, count);
+ if (count > stp->file_left) {
+ LoadError2(stp, "form size %ld greater than size %ld of binary",
+ count, stp->file_left);
+ }
+ stp->file_left = count;
+
+ /*
+ * Verify that this is a BEAM file.
+ */
+
+ GetInt(stp, 4, id);
+ if (id != MakeIffId('B', 'E', 'A', 'M')) {
+ LoadError0(stp, "not a BEAM file: IFF form type is not 'BEAM'");
+ }
+
+ /*
+ * Initialize the chunks[] array in the state.
+ */
+
+ for (i = 0; i < num_types; i++) {
+ stp->chunks[i].start = NULL;
+ stp->chunks[i].size = 0;
+ }
+
+ /*
+ * Now we can go ahead and read all chunks in the BEAM form.
+ */
+
+ while (!EndOfFile(stp)) {
+
+ /*
+ * Read the chunk id and verify that it contains ASCII characters.
+ */
+ GetInt(stp, 4, id);
+ for (i = 0; i < 4; i++) {
+ unsigned c = (id >> i*8) & 0xff;
+ if (c < ' ' || c > 0x7E) {
+ LoadError1(stp, "non-ascii garbage '%lx' instead of chunk type id",
+ id);
+ }
+ }
+
+ /*
+ * Read the count and verify it.
+ */
+
+ GetInt(stp, 4, count);
+ if (count > stp->file_left) {
+ LoadError2(stp, "chunk size %ld for '%lx' greater than size %ld of binary",
+ count, stp->file_left);
+ }
+
+ /*
+ * See if the chunk is useful for the loader.
+ */
+ for (i = 0; i < num_types; i++) {
+ if (chunk_types[i] == id) {
+ stp->chunks[i].start = stp->file_p;
+ stp->chunks[i].size = count;
+ break;
+ }
+ }
+
+ /*
+ * Go on to the next chunk.
+ */
+ count = 4*((count+3)/4);
+ stp->file_p += count;
+ stp->file_left -= count;
+ }
+
+ /*
+ * At this point, we have read the entire IFF file, and we
+ * know that it is syntactically correct.
+ *
+ * Now check that it contains all mandatory chunks. At the
+ * same time calculate the MD5 for the module.
+ */
+
+ MD5Init(&context);
+ for (i = 0; i < num_mandatory; i++) {
+ if (stp->chunks[i].start != NULL) {
+ MD5Update(&context, stp->chunks[i].start, stp->chunks[i].size);
+ } else {
+ char sbuf[5];
+
+ id_to_string(chunk_types[i], sbuf);
+ LoadError1(stp, "mandatory chunk of type '%s' not found\n", sbuf);
+ }
+ }
+ if (LITERAL_CHUNK < num_types) {
+ if (stp->chunks[LAMBDA_CHUNK].start != 0) {
+ byte* start = stp->chunks[LAMBDA_CHUNK].start;
+ Uint left = stp->chunks[LAMBDA_CHUNK].size;
+
+ /*
+ * The idea here is to ignore the OldUniq field for the fun; it is
+ * based on the old broken hash function, which can be different
+ * on little endian and big endian machines.
+ */
+ if (left >= 4) {
+ static byte zero[4];
+ MD5Update(&context, start, 4);
+ start += 4;
+ left -= 4;
+
+ while (left >= 24) {
+ /* Include: Function Arity Index NumFree */
+ MD5Update(&context, start, 20);
+ /* Set to zero: OldUniq */
+ MD5Update(&context, zero, 4);
+ start += 24;
+ left -= 24;
+ }
+ }
+ /* Can't happen for a correct 'FunT' chunk */
+ if (left > 0) {
+ MD5Update(&context, start, left);
+ }
+ }
+ if (stp->chunks[LITERAL_CHUNK].start != 0) {
+ MD5Update(&context, stp->chunks[LITERAL_CHUNK].start,
+ stp->chunks[LITERAL_CHUNK].size);
+ }
+ }
+ MD5Final(stp->mod_md5, &context);
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+
+static int
+load_atom_table(LoaderState* stp)
+{
+ int i;
+
+ GetInt(stp, 4, stp->num_atoms);
+ stp->num_atoms++;
+ stp->atom = erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ erts_next_heap_size((stp->num_atoms*sizeof(Eterm)),
+ 0));
+
+ /*
+ * Read all atoms.
+ */
+
+ for (i = 1; i < stp->num_atoms; i++) {
+ byte* atom;
+ Uint n;
+
+ GetByte(stp, n);
+ GetString(stp, atom, n);
+ stp->atom[i] = am_atom_put((char*)atom, n);
+ }
+
+ /*
+ * Check the module name if a module name was given.
+ */
+
+ if (is_nil(stp->module)) {
+ stp->module = stp->atom[1];
+ } else if (stp->atom[1] != stp->module) {
+ char sbuf[256];
+ Atom* ap;
+
+ ap = atom_tab(atom_val(stp->atom[1]));
+ memcpy(sbuf, ap->name, ap->len);
+ sbuf[ap->len] = '\0';
+ LoadError1(stp, "module name in object code is %s", sbuf);
+ }
+
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+
+static int
+load_import_table(LoaderState* stp)
+{
+ int i;
+
+ GetInt(stp, 4, stp->num_imports);
+ stp->import = erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ erts_next_heap_size((stp->num_imports *
+ sizeof(ImportEntry)),
+ 0));
+ for (i = 0; i < stp->num_imports; i++) {
+ int n;
+ Eterm mod;
+ Eterm func;
+ Uint arity;
+ Export* e;
+
+ GetInt(stp, 4, n);
+ if (n >= stp->num_atoms) {
+ LoadError2(stp, "import entry %d: invalid atom number %d", i, n);
+ }
+ mod = stp->import[i].module = stp->atom[n];
+ GetInt(stp, 4, n);
+ if (n >= stp->num_atoms) {
+ LoadError2(stp, "import entry %d: invalid atom number %d", i, n);
+ }
+ func = stp->import[i].function = stp->atom[n];
+ GetInt(stp, 4, arity);
+ if (arity > MAX_REG) {
+ LoadError2(stp, "import entry %d: invalid arity %d", i, arity);
+ }
+ stp->import[i].arity = arity;
+ stp->import[i].patches = 0;
+ stp->import[i].bf = NULL;
+
+ /*
+ * If the export entry refers to a BIF, get the pointer to
+ * the BIF function.
+ */
+ if ((e = erts_find_export_entry(mod, func, arity)) != NULL) {
+ if (e->code[3] == (Uint) em_apply_bif) {
+ stp->import[i].bf = (BifFunction) e->code[4];
+ if (func == am_load_nif && mod == am_erlang && arity == 2) {
+ stp->may_load_nif = 1;
+ }
+ }
+ }
+ }
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+
+static int
+read_export_table(LoaderState* stp)
+{
+ static struct {
+ Eterm mod;
+ Eterm func;
+ int arity;
+ } allow_redef[] = {
+ /* The BIFs that are allowed to be redefined by Erlang code */
+ {am_erlang,am_apply,2},
+ {am_erlang,am_apply,3},
+ };
+ int i;
+
+ GetInt(stp, 4, stp->num_exps);
+ if (stp->num_exps > stp->num_functions) {
+ LoadError2(stp, "%d functions exported; only %d functions defined",
+ stp->num_exps, stp->num_functions);
+ }
+ stp->export
+ = (ExportEntry *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ (stp->num_exps * sizeof(ExportEntry)));
+
+ for (i = 0; i < stp->num_exps; i++) {
+ Uint n;
+ Uint value;
+ Eterm func;
+ Uint arity;
+ Export* e;
+
+ GetInt(stp, 4, n);
+ GetAtom(stp, n, func);
+ stp->export[i].function = func;
+ GetInt(stp, 4, arity);
+ if (arity > MAX_REG) {
+ LoadError2(stp, "export table entry %d: absurdly high arity %d", i, arity);
+ }
+ stp->export[i].arity = arity;
+ GetInt(stp, 4, n);
+ if (n >= stp->num_labels) {
+ LoadError3(stp, "export table entry %d: invalid label %d (highest defined label is %d)", i, n, stp->num_labels);
+ }
+ value = stp->labels[n].value;
+ if (value == 0) {
+ LoadError2(stp, "export table entry %d: label %d not resolved", i, n);
+ }
+ stp->export[i].address = stp->code + value;
+
+ /*
+ * Check that we are not redefining a BIF (except the ones allowed to
+ * redefine).
+ */
+ if ((e = erts_find_export_entry(stp->module, func, arity)) != NULL) {
+ if (e->code[3] == (Uint) em_apply_bif) {
+ int j;
+
+ for (j = 0; j < sizeof(allow_redef)/sizeof(allow_redef[0]); j++) {
+ if (stp->module == allow_redef[j].mod &&
+ func == allow_redef[j].func &&
+ arity == allow_redef[j].arity) {
+ break;
+ }
+ }
+ if (j == sizeof(allow_redef)/sizeof(allow_redef[0])) {
+ LoadError2(stp, "exported function %T/%d redefines BIF",
+ func, arity);
+ }
+ }
+ }
+ }
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+static int
+read_lambda_table(LoaderState* stp)
+{
+ int i;
+
+ GetInt(stp, 4, stp->num_lambdas);
+ stp->lambdas_allocated = stp->num_lambdas;
+ stp->lambdas = (Lambda *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_lambdas * sizeof(Lambda));
+ for (i = 0; i < stp->num_lambdas; i++) {
+ Uint n;
+ Uint32 Index;
+ Uint32 OldUniq;
+ ErlFunEntry* fe;
+ Uint arity;
+
+ GetInt(stp, 4, n); /* Function. */
+ GetAtom(stp, n, stp->lambdas[i].function);
+ GetInt(stp, 4, arity);
+ if (arity > MAX_REG) {
+ LoadError2(stp, "lambda entry %d: absurdly high arity %d", i, arity);
+ }
+ stp->lambdas[i].arity = arity;
+ GetInt(stp, 4, n);
+ if (n >= stp->num_labels) {
+ LoadError3(stp, "lambda entry %d: invalid label %d (highest defined label is %d)",
+ i, n, stp->num_labels);
+ }
+ stp->lambdas[i].label = n;
+ GetInt(stp, 4, Index);
+ GetInt(stp, 4, stp->lambdas[i].num_free);
+ GetInt(stp, 4, OldUniq);
+ fe = erts_put_fun_entry2(stp->module, OldUniq, i, stp->mod_md5,
+ Index, arity-stp->lambdas[i].num_free);
+ stp->lambdas[i].fe = fe;
+ }
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+static int
+read_literal_table(LoaderState* stp)
+{
+ int i;
+ Uint uncompressed_sz;
+ byte* uncompressed = 0;
+
+ GetInt(stp, 4, uncompressed_sz);
+ uncompressed = erts_alloc(ERTS_ALC_T_TMP, uncompressed_sz);
+ if (erl_zlib_uncompress(uncompressed, &uncompressed_sz,
+ stp->file_p, stp->file_left) != Z_OK) {
+ LoadError0(stp, "failed to uncompress literal table (constant pool)");
+ }
+ stp->file_p = uncompressed;
+ stp->file_left = uncompressed_sz;
+ GetInt(stp, 4, stp->num_literals);
+ stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_literals * sizeof(Literal));
+ stp->allocated_literals = stp->num_literals;
+
+ for (i = 0; i < stp->num_literals; i++) {
+ stp->literals[i].heap = 0;
+ }
+
+ for (i = 0; i < stp->num_literals; i++) {
+ int sz;
+ Sint heap_size;
+ byte* p;
+ Eterm val;
+ Eterm* hp;
+
+ GetInt(stp, 4, sz); /* Size of external term format. */
+ GetString(stp, p, sz);
+ if ((heap_size = erts_decode_ext_size(p, sz, 1)) < 0) {
+ LoadError1(stp, "literal %d: bad external format", i);
+ }
+ hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ heap_size*sizeof(Eterm));
+ val = erts_decode_ext(&hp, NULL, &p);
+ stp->literals[i].heap_size = hp - stp->literals[i].heap;
+ if (stp->literals[i].heap_size > heap_size) {
+ erl_exit(1, "overrun by %d word(s) for literal heap, term %d",
+ stp->literals[i].heap_size - heap_size, i);
+ }
+ if (is_non_value(val)) {
+ LoadError1(stp, "literal %d: bad external format", i);
+ }
+ stp->literals[i].term = val;
+ stp->total_literal_size += stp->literals[i].heap_size;
+ }
+ erts_free(ERTS_ALC_T_TMP, uncompressed);
+ return 1;
+
+ load_error:
+ if (uncompressed) {
+ erts_free(ERTS_ALC_T_TMP, uncompressed);
+ }
+ return 0;
+}
+
+
+static int
+read_code_header(LoaderState* stp)
+{
+ unsigned head_size;
+ unsigned version;
+ unsigned opcode_max;
+ int i;
+
+ /*
+ * Read size of sub-header for code information and from it calculate
+ * where the code begins. Also, use the size to limit the file size
+ * for header reading, so that we automatically get an error if the
+ * size is set too small.
+ */
+
+ GetInt(stp, 4, head_size);
+ stp->code_start = stp->file_p + head_size;
+ stp->code_size = stp->file_left - head_size;
+ stp->file_left = head_size;
+
+ /*
+ * Get and verify version of instruction set.
+ */
+
+ GetInt(stp, 4, version);
+ if (version != BEAM_FORMAT_NUMBER) {
+ LoadError2(stp, "wrong instruction set %d; expected %d",
+ version, BEAM_FORMAT_NUMBER);
+ }
+
+ /*
+ * Verify the number of the highest opcode used.
+ */
+
+ GetInt(stp, 4, opcode_max);
+ if (opcode_max > MAX_GENERIC_OPCODE) {
+ LoadError2(stp, "use of opcode %d; this emulator supports only up to %d",
+ opcode_max, MAX_GENERIC_OPCODE);
+ }
+
+ GetInt(stp, 4, stp->num_labels);
+ GetInt(stp, 4, stp->num_functions);
+
+ /*
+ * Initialize label table.
+ */
+
+ stp->labels = (Label *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_labels * sizeof(Label));
+ for (i = 0; i < stp->num_labels; i++) {
+ stp->labels[i].value = 0;
+ stp->labels[i].patches = 0;
+#ifdef ERTS_SMP
+ stp->labels[i].looprec_targeted = 0;
+#endif
+ }
+
+ /*
+ * Initialize code area.
+ */
+ stp->code_buffer_size = erts_next_heap_size(2048 + stp->num_functions, 0);
+ stp->code = (Eterm*) erts_alloc(ERTS_ALC_T_CODE,
+ sizeof(Eterm) * stp->code_buffer_size);
+
+ stp->code[MI_NUM_FUNCTIONS] = stp->num_functions;
+ stp->ci = MI_FUNCTIONS + stp->num_functions + 1;
+
+ stp->code[MI_ATTR_PTR] = 0;
+ stp->code[MI_ATTR_SIZE_ON_HEAP] = 0;
+ stp->code[MI_COMPILE_PTR] = 0;
+ stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
+ stp->code[MI_NUM_BREAKPOINTS] = 0;
+
+ stp->put_strings = 0;
+ stp->new_bs_put_strings = 0;
+ stp->catches = 0;
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+
+#define VerifyTag(Stp, Actual, Expected) \
+ if (Actual != Expected) { \
+ LoadError2(Stp, "bad tag %d; expected %d", Actual, Expected); \
+ } else {}
+
+#define Need(w) \
+ ASSERT(ci <= code_buffer_size); \
+ if (code_buffer_size < ci+(w)) { \
+ code_buffer_size = erts_next_heap_size(ci+(w), 0); \
+ stp->code = code \
+ = (Eterm *) erts_realloc(ERTS_ALC_T_CODE, \
+ (void *) code, \
+ code_buffer_size * sizeof(Eterm)); \
+ }
+
+
+
+static int
+load_code(LoaderState* stp)
+{
+ int i;
+ int tmp;
+ int ci;
+ int last_func_start = 0;
+ char* sign;
+ int arg; /* Number of current argument. */
+ int num_specific; /* Number of specific ops for current. */
+ Eterm* code;
+ int code_buffer_size;
+ int specific;
+ Uint last_label = 0; /* Number of last label. */
+ Uint function_number = 0;
+ GenOp* last_op = NULL;
+ GenOp** last_op_next = NULL;
+ int arity;
+
+ code = stp->code;
+ code_buffer_size = stp->code_buffer_size;
+ ci = stp->ci;
+
+ for (;;) {
+ int new_op;
+ GenOp* tmp_op;
+
+ ASSERT(ci <= code_buffer_size);
+
+ get_next_instr:
+ GetByte(stp, new_op);
+ if (new_op >= NUM_GENERIC_OPS) {
+ LoadError1(stp, "invalid opcode %d", new_op);
+ }
+ if (gen_opc[new_op].name[0] == '\0') {
+ LoadError1(stp, "invalid opcode %d", new_op);
+ }
+
+
+ /*
+ * Create a new generic operation and put it last in the chain.
+ */
+ if (last_op_next == NULL) {
+ last_op_next = &(stp->genop);
+ while (*last_op_next != NULL) {
+ last_op_next = &(*last_op_next)->next;
+ }
+ }
+
+ NEW_GENOP(stp, last_op);
+ last_op->next = NULL;
+ last_op->op = new_op;
+ *last_op_next = last_op;
+ last_op_next = &(last_op->next);
+ stp->specific_op = -1;
+
+ /*
+ * Read all arguments for the current operation.
+ */
+
+ arity = gen_opc[last_op->op].arity;
+ last_op->arity = 0;
+ ASSERT(arity <= MAX_OPARGS);
+
+#define GetValue(Stp, First, Val) \
+ do { \
+ if (((First) & 0x08) == 0) { \
+ Val = (First) >> 4; \
+ } else if (((First) & 0x10) == 0) { \
+ Uint __w; \
+ GetByte(Stp, __w); \
+ Val = (((First) >> 5) << 8) | __w; \
+ } else { \
+ if (!get_int_val(Stp, (First), &(Val))) goto load_error; \
+ } \
+ } while (0)
+
+ for (arg = 0; arg < arity; arg++) {
+ Uint first;
+
+ GetByte(stp, first);
+ last_op->a[arg].type = first & 0x07;
+ switch (last_op->a[arg].type) {
+ case TAG_i:
+ if ((first & 0x08) == 0) {
+ last_op->a[arg].val = first >> 4;
+ } else if ((first & 0x10) == 0) {
+ Uint w;
+ GetByte(stp, w);
+ ASSERT(first < 0x800);
+ last_op->a[arg].val = ((first >> 5) << 8) | w;
+ } else {
+ int i = get_erlang_integer(stp, first, &(last_op->a[arg].val));
+ if (i < 0) {
+ goto load_error;
+ }
+ last_op->a[arg].type = i;
+ }
+ break;
+ case TAG_u:
+ GetValue(stp, first, last_op->a[arg].val);
+ break;
+ case TAG_x:
+ GetValue(stp, first, last_op->a[arg].val);
+ if (last_op->a[arg].val == 0) {
+ last_op->a[arg].type = TAG_r;
+ } else if (last_op->a[arg].val >= MAX_REG) {
+ LoadError1(stp, "invalid x register number: %u",
+ last_op->a[arg].val);
+ }
+ break;
+ case TAG_y:
+ GetValue(stp, first, last_op->a[arg].val);
+ if (last_op->a[arg].val >= MAX_REG) {
+ LoadError1(stp, "invalid y register number: %u",
+ last_op->a[arg].val);
+ }
+ last_op->a[arg].val += CP_SIZE;
+ break;
+ case TAG_a:
+ GetValue(stp, first, last_op->a[arg].val);
+ if (last_op->a[arg].val == 0) {
+ last_op->a[arg].type = TAG_n;
+ } else if (last_op->a[arg].val >= stp->num_atoms) {
+ LoadError1(stp, "bad atom index: %d", last_op->a[arg].val);
+ } else {
+ last_op->a[arg].val = stp->atom[last_op->a[arg].val];
+ }
+ break;
+ case TAG_f:
+ GetValue(stp, first, last_op->a[arg].val);
+ if (last_op->a[arg].val == 0) {
+ last_op->a[arg].type = TAG_p;
+ } else if (last_op->a[arg].val >= stp->num_labels) {
+ LoadError1(stp, "bad label: %d", last_op->a[arg].val);
+ }
+ break;
+ case TAG_h:
+ GetValue(stp, first, last_op->a[arg].val);
+ if (last_op->a[arg].val > 65535) {
+ LoadError1(stp, "invalid range for character data type: %u",
+ last_op->a[arg].val);
+ }
+ break;
+ case TAG_z:
+ {
+ Uint ext_tag;
+ unsigned tag;
+
+ GetValue(stp, first, ext_tag);
+ switch (ext_tag) {
+ case 0: /* Floating point number */
+ {
+ Eterm* hp;
+# ifndef ARCH_64
+ Uint high, low;
+# endif
+ last_op->a[arg].val = new_literal(stp, &hp,
+ FLOAT_SIZE_OBJECT);
+ hp[0] = HEADER_FLONUM;
+ last_op->a[arg].type = TAG_q;
+# ifdef ARCH_64
+ GetInt(stp, 8, hp[1]);
+# else
+ GetInt(stp, 4, high);
+ GetInt(stp, 4, low);
+ if (must_swap_floats) {
+ Uint t = high;
+ high = low;
+ low = t;
+ }
+ hp[1] = high;
+ hp[2] = low;
+# endif
+ }
+ break;
+ case 1: /* List. */
+ if (arg+1 != arity) {
+ LoadError0(stp, "list argument must be the last argument");
+ }
+ GetTagAndValue(stp, tag, last_op->a[arg].val);
+ VerifyTag(stp, tag, TAG_u);
+ last_op->a[arg].type = TAG_u;
+ last_op->a =
+ erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ (arity+last_op->a[arg].val)
+ *sizeof(GenOpArg));
+ memcpy(last_op->a, last_op->def_args,
+ arity*sizeof(GenOpArg));
+ arity += last_op->a[arg].val;
+ break;
+ case 2: /* Float register. */
+ GetTagAndValue(stp, tag, last_op->a[arg].val);
+ VerifyTag(stp, tag, TAG_u);
+ last_op->a[arg].type = TAG_l;
+ break;
+ case 3: /* Allocation list. */
+ {
+ Uint n;
+ Uint type;
+ Uint val;
+ Uint words = 0;
+
+ stp->new_float_instructions = 1;
+ GetTagAndValue(stp, tag, n);
+ VerifyTag(stp, tag, TAG_u);
+ while (n-- > 0) {
+ GetTagAndValue(stp, tag, type);
+ VerifyTag(stp, tag, TAG_u);
+ GetTagAndValue(stp, tag, val);
+ VerifyTag(stp, tag, TAG_u);
+ switch (type) {
+ case 0: /* Heap words */
+ words += val;
+ break;
+ case 1:
+ words += FLOAT_SIZE_OBJECT*val;
+ break;
+ default:
+ LoadError1(stp, "alloc list: bad allocation "
+ "descriptor %d", type);
+ break;
+ }
+ }
+ last_op->a[arg].type = TAG_u;
+ last_op->a[arg].val = words;
+ break;
+ }
+ case 4: /* Literal. */
+ {
+ Uint val;
+
+ GetTagAndValue(stp, tag, val);
+ VerifyTag(stp, tag, TAG_u);
+ if (val >= stp->num_literals) {
+ LoadError1(stp, "bad literal index %d", val);
+ }
+ last_op->a[arg].type = TAG_q;
+ last_op->a[arg].val = val;
+ break;
+ }
+ default:
+ LoadError1(stp, "invalid extended tag %d", ext_tag);
+ break;
+ }
+ }
+ break;
+ default:
+ LoadError1(stp, "bad tag %d", last_op->a[arg].type);
+ break;
+ }
+ last_op->arity++;
+ }
+#undef GetValue
+
+ ASSERT(arity == last_op->arity);
+
+ do_transform:
+ if (stp->genop == NULL) {
+ last_op_next = NULL;
+ goto get_next_instr;
+ }
+
+ if (gen_opc[stp->genop->op].transform != -1) {
+ int need;
+ tmp_op = stp->genop;
+
+ for (need = gen_opc[stp->genop->op].min_window-1; need > 0; need--) {
+ if (tmp_op == NULL) {
+ goto get_next_instr;
+ }
+ tmp_op = tmp_op->next;
+ }
+ switch (transform_engine(stp)) {
+ case TE_FAIL:
+ last_op_next = NULL;
+ last_op = NULL;
+ break;
+ case TE_OK:
+ last_op_next = NULL;
+ last_op = NULL;
+ goto do_transform;
+ case TE_SHORT_WINDOW:
+ last_op_next = NULL;
+ last_op = NULL;
+ goto get_next_instr;
+ }
+ }
+
+ if (stp->genop == NULL) {
+ last_op_next = NULL;
+ goto get_next_instr;
+ }
+
+ /*
+ * Special error message instruction.
+ */
+ if (stp->genop->op == genop_too_old_compiler_0) {
+ LoadError0(stp, "please re-compile this module with an "
+ ERLANG_OTP_RELEASE " compiler");
+ }
+
+ /*
+ * From the collected generic instruction, find the specific
+ * instruction.
+ */
+
+ {
+ Uint32 mask[3] = {0, 0, 0};
+
+ tmp_op = stp->genop;
+ arity = gen_opc[tmp_op->op].arity;
+ if (arity > 6) {
+ LoadError0(stp, "no specific operation found (arity > 6)");
+ }
+ for (arg = 0; arg < arity; arg++) {
+ mask[arg/2] |= ((Uint32)1 << (tmp_op->a[arg].type)) << ((arg%2)*16);
+ }
+ specific = gen_opc[tmp_op->op].specific;
+ num_specific = gen_opc[tmp_op->op].num_specific;
+ for (i = 0; i < num_specific; i++) {
+ if (((opc[specific].mask[0] & mask[0]) == mask[0]) &&
+ ((opc[specific].mask[1] & mask[1]) == mask[1]) &&
+ ((opc[specific].mask[2] & mask[2]) == mask[2])) {
+ break;
+ }
+ specific++;
+ }
+
+ /*
+ * No specific operation found.
+ */
+ if (i == num_specific) {
+ stp->specific_op = -1;
+ for (arg = 0; arg < tmp_op->arity; arg++) {
+ /*
+ * We'll give the error message here (instead of earlier)
+ * to get a printout of the offending operation.
+ */
+ if (tmp_op->a[arg].type == TAG_h) {
+ LoadError0(stp, "the character data type not supported");
+ }
+ }
+
+ /*
+ * No specific operations and no transformations means that
+ * the instruction is obsolete.
+ */
+ if (num_specific == 0 && gen_opc[tmp_op->op].transform == -1) {
+ LoadError0(stp, "please re-compile this module with an "
+ ERLANG_OTP_RELEASE " compiler ");
+ }
+
+ LoadError0(stp, "no specific operation found");
+ }
+
+ stp->specific_op = specific;
+ Need(opc[stp->specific_op].sz+2); /* Extra margin for packing */
+ code[ci++] = BeamOpCode(stp->specific_op);
+ }
+
+ /*
+ * Load the found specific operation.
+ */
+
+ sign = opc[stp->specific_op].sign;
+ ASSERT(sign != NULL);
+ arg = 0;
+ while (*sign) {
+ Uint tag;
+
+ ASSERT(arg < stp->genop->arity);
+ tag = stp->genop->a[arg].type;
+ switch (*sign) {
+ case 'r': /* x(0) */
+ case 'n': /* Nil */
+ VerifyTag(stp, tag_to_letter[tag], *sign);
+ break;
+ case 'x': /* x(N) */
+ case 'y': /* y(N) */
+ VerifyTag(stp, tag_to_letter[tag], *sign);
+ code[ci++] = tmp_op->a[arg].val * sizeof(Eterm);
+ break;
+ case 'a': /* Tagged atom */
+ VerifyTag(stp, tag_to_letter[tag], *sign);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ case 'i': /* Tagged integer */
+ ASSERT(is_small(tmp_op->a[arg].val));
+ VerifyTag(stp, tag_to_letter[tag], *sign);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ case 'c': /* Tagged constant */
+ switch (tag) {
+ case TAG_i:
+ code[ci++] = make_small(tmp_op->a[arg].val);
+ break;
+ case TAG_a:
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ case TAG_n:
+ code[ci++] = NIL;
+ break;
+ case TAG_q:
+ new_literal_patch(stp, ci);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ default:
+ LoadError1(stp, "bad tag %d for tagged constant",
+ tmp_op->a[arg].type);
+ break;
+ }
+ break;
+ case 's': /* Any source (tagged constant or register) */
+ switch (tag) {
+ case TAG_r:
+ code[ci++] = make_rreg();
+ break;
+ case TAG_x:
+ code[ci++] = make_xreg(tmp_op->a[arg].val);
+ break;
+ case TAG_y:
+ code[ci++] = make_yreg(tmp_op->a[arg].val);
+ break;
+ case TAG_i:
+ code[ci++] = make_small(tmp_op->a[arg].val);
+ break;
+ case TAG_a:
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ case TAG_n:
+ code[ci++] = NIL;
+ break;
+ default:
+ LoadError1(stp, "bad tag %d for general source",
+ tmp_op->a[arg].type);
+ break;
+ }
+ break;
+ case 'd': /* Destination (x(0), x(N), y(N) */
+ switch (tag) {
+ case TAG_r:
+ code[ci++] = make_rreg();
+ break;
+ case TAG_x:
+ code[ci++] = make_xreg(tmp_op->a[arg].val);
+ break;
+ case TAG_y:
+ code[ci++] = make_yreg(tmp_op->a[arg].val);
+ break;
+ default:
+ LoadError1(stp, "bad tag %d for destination",
+ tmp_op->a[arg].type);
+ break;
+ }
+ break;
+ case 'I': /* Untagged integer (or pointer). */
+ VerifyTag(stp, tag, TAG_u);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ case 't': /* Small untagged integer -- can be packed. */
+ VerifyTag(stp, tag, TAG_u);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ case 'A': /* Arity value. */
+ VerifyTag(stp, tag, TAG_u);
+ code[ci++] = make_arityval(tmp_op->a[arg].val);
+ break;
+ case 'f': /* Destination label */
+ VerifyTag(stp, tag_to_letter[tag], *sign);
+ code[ci] = stp->labels[tmp_op->a[arg].val].patches;
+ stp->labels[tmp_op->a[arg].val].patches = ci;
+ ci++;
+ break;
+ case 'j': /* 'f' or 'p' */
+ if (tag == TAG_p) {
+ code[ci] = 0;
+ } else if (tag == TAG_f) {
+ code[ci] = stp->labels[tmp_op->a[arg].val].patches;
+ stp->labels[tmp_op->a[arg].val].patches = ci;
+ } else {
+ LoadError3(stp, "bad tag %d; expected %d or %d",
+ tag, TAG_f, TAG_p);
+ }
+ ci++;
+ break;
+ case 'L': /* Define label */
+ ci--; /* Remove label from loaded code */
+ ASSERT(stp->specific_op == op_label_L);
+ VerifyTag(stp, tag, TAG_u);
+ last_label = tmp_op->a[arg].val;
+ if (!(0 < last_label && last_label < stp->num_labels)) {
+ LoadError2(stp, "invalid label num %d (0 < label < %d)",
+ tmp_op->a[arg].val, stp->num_labels);
+ }
+ if (stp->labels[last_label].value != 0) {
+ LoadError1(stp, "label %d defined more than once", last_label);
+ }
+ stp->labels[last_label].value = ci;
+ ASSERT(stp->labels[last_label].patches < ci);
+ break;
+ case 'e': /* Export entry */
+ VerifyTag(stp, tag, TAG_u);
+ if (tmp_op->a[arg].val >= stp->num_imports) {
+ LoadError1(stp, "invalid import table index %d", tmp_op->a[arg].val);
+ }
+ code[ci] = stp->import[tmp_op->a[arg].val].patches;
+ stp->import[tmp_op->a[arg].val].patches = ci;
+ ci++;
+ break;
+ case 'b':
+ VerifyTag(stp, tag, TAG_u);
+ i = tmp_op->a[arg].val;
+ if (i >= stp->num_imports) {
+ LoadError1(stp, "invalid import table index %d", i);
+ }
+ if (stp->import[i].bf == NULL) {
+ LoadError1(stp, "not a BIF: import table index %d", i);
+ }
+ code[ci++] = (Eterm) stp->import[i].bf;
+ break;
+ case 'P': /* Byte offset into tuple */
+ VerifyTag(stp, tag, TAG_u);
+ tmp = tmp_op->a[arg].val;
+ code[ci++] = (Eterm) ((tmp_op->a[arg].val+1) * sizeof(Eterm *));
+ break;
+ case 'l': /* Floating point register. */
+ VerifyTag(stp, tag_to_letter[tag], *sign);
+ code[ci++] = tmp_op->a[arg].val * sizeof(FloatDef);
+ break;
+ case 'q': /* Literal */
+ new_literal_patch(stp, ci);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ default:
+ LoadError1(stp, "bad argument tag: %d", *sign);
+ }
+ sign++;
+ arg++;
+ }
+
+ /*
+ * Load any list arguments using the primitive tags.
+ */
+
+ for ( ; arg < tmp_op->arity; arg++) {
+ switch (tmp_op->a[arg].type) {
+ case TAG_i:
+ Need(1);
+ code[ci++] = make_small(tmp_op->a[arg].val);
+ break;
+ case TAG_u:
+ case TAG_a:
+ case TAG_v:
+ Need(1);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ case TAG_f:
+ Need(1);
+ code[ci] = stp->labels[tmp_op->a[arg].val].patches;
+ stp->labels[tmp_op->a[arg].val].patches = ci;
+ ci++;
+ break;
+ case TAG_q:
+ {
+ Eterm lit;
+
+ lit = stp->literals[tmp_op->a[arg].val].term;
+ if (is_big(lit)) {
+ Eterm* bigp;
+ Uint size;
+
+ bigp = big_val(lit);
+ size = bignum_header_arity(*bigp);
+ Need(size+1);
+ code[ci++] = *bigp++;
+ while (size-- > 0) {
+ code[ci++] = *bigp++;
+ }
+ } else if (is_float(lit)) {
+#ifdef ARCH_64
+ Need(1);
+ code[ci++] = float_val(stp->literals[tmp_op->a[arg].val].term)[1];
+#else
+ Eterm* fptr;
+
+ fptr = float_val(stp->literals[tmp_op->a[arg].val].term)+1;
+ Need(2);
+ code[ci++] = *fptr++;
+ code[ci++] = *fptr;
+#endif
+ } else {
+ LoadError0(stp, "literal is neither float nor big");
+ }
+ }
+ break;
+ default:
+ LoadError1(stp, "unsupported primitive type '%c'",
+ tag_to_letter[tmp_op->a[arg].type]);
+ }
+ }
+
+ /*
+ * The packing engine.
+ */
+ if (opc[stp->specific_op].pack[0]) {
+ char* prog; /* Program for packing engine. */
+ Uint stack[8]; /* Stack. */
+ Uint* sp = stack; /* Points to next free position. */
+ Uint packed = 0; /* Accumulator for packed operations. */
+
+ for (prog = opc[stp->specific_op].pack; *prog; prog++) {
+ switch (*prog) {
+ case 'g': /* Get instruction; push on stack. */
+ *sp++ = code[--ci];
+ break;
+ case 'i': /* Initialize packing accumulator. */
+ packed = code[--ci];
+ break;
+ case '0': /* Tight shift */
+ packed = (packed << BEAM_TIGHT_SHIFT) | code[--ci];
+ break;
+ case '6': /* Shift 16 steps */
+ packed = (packed << 16) | code[--ci];
+ break;
+ case 'p': /* Put instruction (from stack). */
+ code[ci++] = *--sp;
+ break;
+ case 'P': /* Put packed operands. */
+ *sp++ = packed;
+ packed = 0;
+ break;
+ default:
+ ASSERT(0);
+ }
+ }
+ ASSERT(sp == stack); /* Incorrect program? */
+ }
+
+ /*
+ * Handle a few special cases.
+ */
+ switch (stp->specific_op) {
+ case op_i_func_info_IaaI:
+ {
+ Uint offset;
+ enum { FINFO_SZ = 5 };
+
+ if (function_number >= stp->num_functions) {
+ LoadError1(stp, "too many functions in module (header said %d)",
+ stp->num_functions);
+ }
+
+ if (stp->may_load_nif) {
+ const int finfo_ix = ci - FINFO_SZ;
+ enum { MIN_FUNC_SZ = 3 };
+ if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
+ /* Must make room for call_nif op */
+ int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
+ ASSERT(pad > 0 && pad < MIN_FUNC_SZ);
+ Need(pad);
+ sys_memmove(&code[finfo_ix+pad], &code[finfo_ix], FINFO_SZ*sizeof(Eterm));
+ sys_memset(&code[finfo_ix], 0, pad*sizeof(Eterm));
+ ci += pad;
+ stp->labels[last_label].value += pad;
+ }
+ }
+ last_func_start = ci;
+ /*
+ * Save context for error messages.
+ */
+ stp->function = code[ci-2];
+ stp->arity = code[ci-1];
+ ASSERT(stp->labels[last_label].value == ci - FINFO_SZ);
+ offset = MI_FUNCTIONS + function_number;
+ code[offset] = stp->labels[last_label].patches;
+ stp->labels[last_label].patches = offset;
+ function_number++;
+ if (stp->arity > MAX_ARG) {
+ LoadError1(stp, "too many arguments: %d", stp->arity);
+ }
+#ifdef DEBUG
+ ASSERT(stp->labels[0].patches == 0); /* Should not be referenced. */
+ for (i = 1; i < stp->num_labels; i++) {
+ ASSERT(stp->labels[i].patches < ci);
+ }
+#endif
+ }
+ break;
+ case op_on_load:
+ ci--; /* Get rid of the instruction */
+
+ /* Remember offset for the on_load function. */
+ stp->on_load = ci;
+ break;
+ case op_put_string_IId:
+ {
+ /*
+ * At entry:
+ *
+ * code[ci-4] &&lb_put_string_IId
+ * code[ci-3] length of string
+ * code[ci-2] offset into string table
+ * code[ci-1] destination register
+ *
+ * Since we don't know the address of the string table yet,
+ * just check the offset and length for validity, and use
+ * the instruction field as a link field to link all put_string
+ * instructions into a single linked list. At exit:
+ *
+ * code[ci-4] pointer to next put_string instruction (or 0
+ * if this is the last)
+ */
+ Uint offset = code[ci-2];
+ Uint len = code[ci-3];
+ unsigned strtab_size = stp->chunks[STR_CHUNK].size;
+ if (offset > strtab_size || offset + len > strtab_size) {
+ LoadError2(stp, "invalid string reference %d, size %d", offset, len);
+ }
+ code[ci-4] = stp->put_strings;
+ stp->put_strings = ci - 4;
+ }
+ break;
+ case op_bs_put_string_II:
+ {
+ /*
+ * At entry:
+ *
+ * code[ci-3] &&lb_i_new_bs_put_string_II
+ * code[ci-2] length of string
+ * code[ci-1] offset into string table
+ *
+ * Since we don't know the address of the string table yet,
+ * just check the offset and length for validity, and use
+ * the instruction field as a link field to link all put_string
+ * instructions into a single linked list. At exit:
+ *
+ * code[ci-3] pointer to next i_new_bs_put_string instruction (or 0
+ * if this is the last)
+ */
+ Uint offset = code[ci-1];
+ Uint len = code[ci-2];
+ unsigned strtab_size = stp->chunks[STR_CHUNK].size;
+ if (offset > strtab_size || offset + len > strtab_size) {
+ LoadError2(stp, "invalid string reference %d, size %d", offset, len);
+ }
+ code[ci-3] = stp->new_bs_put_strings;
+ stp->new_bs_put_strings = ci - 3;
+ }
+ break;
+ case op_i_bs_match_string_rfII:
+ case op_i_bs_match_string_xfII:
+ new_string_patch(stp, ci-1);
+ break;
+
+ case op_catch_yf:
+ /* code[ci-3] &&lb_catch_yf
+ * code[ci-2] y-register offset in E
+ * code[ci-1] label; index tagged as CATCH at runtime
+ */
+ code[ci-3] = stp->catches;
+ stp->catches = ci-3;
+ break;
+
+ /*
+ * End of code found.
+ */
+ case op_int_code_end:
+ stp->code_buffer_size = code_buffer_size;
+ stp->ci = ci;
+ return 1;
+ }
+
+ /*
+ * Delete the generic instruction just loaded.
+ */
+ {
+ GenOp* next = stp->genop->next;
+ FREE_GENOP(stp, stp->genop);
+ stp->genop = next;
+ goto do_transform;
+ }
+ }
+
+#undef Need
+
+ load_error:
+ return 0;
+}
+
+
+#define succ(St, X, Y) ((X).type == (Y).type && (X).val + 1 == (Y).val)
+#define succ2(St, X, Y) ((X).type == (Y).type && (X).val + 2 == (Y).val)
+#define succ3(St, X, Y) ((X).type == (Y).type && (X).val + 3 == (Y).val)
+
+#ifdef NO_FPE_SIGNALS
+#define no_fpe_signals(St) 1
+#else
+#define no_fpe_signals(St) 0
+#endif
+
+/*
+ * Predicate that tests whether a jump table can be used.
+ */
+
+static int
+use_jump_tab(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
+{
+ Sint min, max;
+ Sint i;
+
+ if (Size.val < 2 || Size.val % 2 != 0) {
+ return 0;
+ }
+
+ /* we may be called with sequences of tagged fixnums or atoms;
+ return early in latter case, before we access the values */
+ if (Rest[0].type != TAG_i || Rest[1].type != TAG_f)
+ return 0;
+ min = max = Rest[0].val;
+ for (i = 2; i < Size.val; i += 2) {
+ if (Rest[i].type != TAG_i || Rest[i+1].type != TAG_f) {
+ return 0;
+ }
+ if (Rest[i].val < min) {
+ min = Rest[i].val;
+ } else if (max < Rest[i].val) {
+ max = Rest[i].val;
+ }
+ }
+
+ return max - min <= Size.val;
+}
+
+/*
+ * Predicate to test whether all values in a table are big numbers.
+ */
+
+static int
+all_values_are_big(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
+{
+ int i;
+
+ if (Size.val < 2 || Size.val % 2 != 0) {
+ return 0;
+ }
+
+ for (i = 0; i < Size.val; i += 2) {
+ if (Rest[i].type != TAG_q) {
+ return 0;
+ }
+ if (is_not_big(stp->literals[Rest[i].val].term)) {
+ return 0;
+ }
+ if (Rest[i+1].type != TAG_f) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+
+/*
+ * Predicate to test whether all values in a table have a fixed size.
+ */
+
+static int
+fixed_size_values(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
+{
+ int i;
+
+ if (Size.val < 2 || Size.val % 2 != 0) {
+ return 0;
+ }
+
+ for (i = 0; i < Size.val; i += 2) {
+ if (Rest[i+1].type != TAG_f)
+ return 0;
+ switch (Rest[i].type) {
+ case TAG_a:
+ case TAG_i:
+ case TAG_v:
+ break;
+ case TAG_q:
+ return is_float(stp->literals[Rest[i].val].term);
+ default:
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int
+mixed_types(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
+{
+ int i;
+ Uint type;
+
+ if (Size.val < 2 || Size.val % 2 != 0) {
+ return 0;
+ }
+
+ type = Rest[0].type;
+ for (i = 0; i < Size.val; i += 2) {
+ if (Rest[i].type != type)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Generate an instruction for element/2.
+ */
+
+static GenOp*
+gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
+ GenOpArg Tuple, GenOpArg Dst)
+{
+ GenOp* op;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_i_element_4;
+ op->arity = 4;
+ op->a[0] = Fail;
+ op->a[1] = Index;
+ op->a[2] = Tuple;
+ op->a[3] = Dst;
+ op->next = NULL;
+
+ /*
+ * If safe, generate a faster instruction.
+ */
+
+ if (Index.type == TAG_i && Index.val > 0 &&
+ (Tuple.type == TAG_r || Tuple.type == TAG_x || Tuple.type == TAG_y)) {
+ op->op = genop_i_fast_element_4;
+ op->a[1].type = TAG_u;
+ op->a[1].val = Index.val;
+ }
+
+ return op;
+}
+
+static GenOp*
+gen_bs_save(LoaderState* stp, GenOpArg Reg, GenOpArg Index)
+{
+ GenOp* op;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_i_bs_save2_2;
+ op->arity = 2;
+ op->a[0] = Reg;
+ op->a[1] = Index;
+ if (Index.type == TAG_u) {
+ op->a[1].val = Index.val+1;
+ } else if (Index.type == TAG_a && Index.val == am_start) {
+ op->a[1].type = TAG_u;
+ op->a[1].val = 0;
+ }
+ op->next = NULL;
+ return op;
+}
+
+static GenOp*
+gen_bs_restore(LoaderState* stp, GenOpArg Reg, GenOpArg Index)
+{
+ GenOp* op;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_i_bs_restore2_2;
+ op->arity = 2;
+ op->a[0] = Reg;
+ op->a[1] = Index;
+ if (Index.type == TAG_u) {
+ op->a[1].val = Index.val+1;
+ } else if (Index.type == TAG_a && Index.val == am_start) {
+ op->a[1].type = TAG_u;
+ op->a[1].val = 0;
+ }
+ op->next = NULL;
+ return op;
+}
+
+/*
+ * Generate the fastest instruction to fetch an integer from a binary.
+ */
+
+static GenOp*
+gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
+ GenOpArg Size, GenOpArg Unit,
+ GenOpArg Flags, GenOpArg Dst)
+{
+ GenOp* op;
+ Uint bits;
+
+ NEW_GENOP(stp, op);
+
+ NATIVE_ENDIAN(Flags);
+ if (Size.type == TAG_i) {
+ if (!safe_mul(Size.val, Unit.val, &bits)) {
+ goto error;
+ } else if ((Flags.val & BSF_SIGNED) != 0) {
+ goto generic;
+ } else if (bits == 8) {
+ op->op = genop_i_bs_get_integer_8_3;
+ op->arity = 3;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
+ op->a[2] = Dst;
+ } else if (bits == 16 && (Flags.val & BSF_LITTLE) == 0) {
+ op->op = genop_i_bs_get_integer_16_3;
+ op->arity = 3;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
+ op->a[2] = Dst;
+ } else if (bits == 32 && (Flags.val & BSF_LITTLE) == 0) {
+ op->op = genop_i_bs_get_integer_32_4;
+ op->arity = 4;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
+ op->a[2] = Live;
+ op->a[3] = Dst;
+ } else {
+ generic:
+ if (bits < SMALL_BITS) {
+ op->op = genop_i_bs_get_integer_small_imm_5;
+ op->arity = 5;
+ op->a[0] = Ms;
+ op->a[1].type = TAG_u;
+ op->a[1].val = bits;
+ op->a[2] = Fail;
+ op->a[3] = Flags;
+ op->a[4] = Dst;
+ } else {
+ op->op = genop_i_bs_get_integer_imm_6;
+ op->arity = 6;
+ op->a[0] = Ms;
+ op->a[1].type = TAG_u;
+ op->a[1].val = bits;
+ op->a[2] = Live;
+ op->a[3] = Fail;
+ op->a[4] = Flags;
+ op->a[5] = Dst;
+ }
+ }
+ } else if (Size.type == TAG_q) {
+ Eterm big = stp->literals[Size.val].term;
+ Uint bigval;
+
+ if (!term_to_Uint(big, &bigval)) {
+ error:
+ op->op = genop_jump_1;
+ op->arity = 1;
+ op->a[0] = Fail;
+ } else {
+ if (!safe_mul(bigval, Unit.val, &bits)) {
+ goto error;
+ }
+ goto generic;
+ }
+ } else {
+ GenOp* op2;
+ NEW_GENOP(stp, op2);
+
+ op->op = genop_i_fetch_2;
+ op->arity = 2;
+ op->a[0] = Ms;
+ op->a[1] = Size;
+ op->next = op2;
+
+ op2->op = genop_i_bs_get_integer_4;
+ op2->arity = 4;
+ op2->a[0] = Fail;
+ op2->a[1] = Live;
+ op2->a[2].type = TAG_u;
+ op2->a[2].val = (Unit.val << 3) | Flags.val;
+ op2->a[3] = Dst;
+ op2->next = NULL;
+ return op;
+ }
+ op->next = NULL;
+ return op;
+}
+
+/*
+ * Generate the fastest instruction to fetch a binary from a binary.
+ */
+
+static GenOp*
+gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
+ GenOpArg Size, GenOpArg Unit,
+ GenOpArg Flags, GenOpArg Dst)
+{
+ GenOp* op;
+ NEW_GENOP(stp, op);
+
+ NATIVE_ENDIAN(Flags);
+ if (Size.type == TAG_a && Size.val == am_all) {
+ if (Ms.type == Dst.type && Ms.val == Dst.val) {
+ op->op = genop_i_bs_get_binary_all_reuse_3;
+ op->arity = 3;
+ op->a[0] = Ms;
+ op->a[1] = Fail;
+ op->a[2] = Unit;
+ } else {
+ op->op = genop_i_bs_get_binary_all2_5;
+ op->arity = 5;
+ op->a[0] = Fail;
+ op->a[1] = Ms;
+ op->a[2] = Live;
+ op->a[3] = Unit;
+ op->a[4] = Dst;
+ }
+ } else if (Size.type == TAG_i) {
+ op->op = genop_i_bs_get_binary_imm2_6;
+ op->arity = 6;
+ op->a[0] = Fail;
+ op->a[1] = Ms;
+ op->a[2] = Live;
+ op->a[3].type = TAG_u;
+ if (!safe_mul(Size.val, Unit.val, &op->a[3].val)) {
+ goto error;
+ }
+ op->a[4] = Flags;
+ op->a[5] = Dst;
+ } else if (Size.type == TAG_q) {
+ Eterm big = stp->literals[Size.val].term;
+ Uint bigval;
+
+ if (!term_to_Uint(big, &bigval)) {
+ error:
+ op->op = genop_jump_1;
+ op->arity = 1;
+ op->a[0] = Fail;
+ } else {
+ op->op = genop_i_bs_get_binary_imm2_6;
+ op->arity = 6;
+ op->a[0] = Fail;
+ op->a[1] = Ms;
+ op->a[2] = Live;
+ op->a[3].type = TAG_u;
+ if (!safe_mul(bigval, Unit.val, &op->a[3].val)) {
+ goto error;
+ }
+ op->a[4] = Flags;
+ op->a[5] = Dst;
+ }
+ } else {
+ op->op = genop_i_bs_get_binary2_6;
+ op->arity = 6;
+ op->a[0] = Fail;
+ op->a[1] = Ms;
+ op->a[2] = Live;
+ op->a[3] = Size;
+ op->a[4].type = TAG_u;
+ op->a[4].val = (Unit.val << 3) | Flags.val;
+ op->a[5] = Dst;
+ }
+ op->next = NULL;
+ return op;
+}
+
+/*
+ * Predicate to test whether a heap binary should be generated.
+ */
+
+static int
+should_gen_heap_bin(LoaderState* stp, GenOpArg Src)
+{
+ return Src.val <= ERL_ONHEAP_BIN_LIMIT;
+}
+
+/*
+ * Predicate to test whether a binary construction is too big.
+ */
+
+static int
+binary_too_big(LoaderState* stp, GenOpArg Size)
+{
+ return Size.type == TAG_u && ((Size.val >> (8*sizeof(Uint)-3)) != 0);
+}
+
+static int
+binary_too_big_bits(LoaderState* stp, GenOpArg Size)
+{
+ return Size.type == TAG_u && (((Size.val+7)/8) >> (8*sizeof(Uint)-3) != 0);
+}
+
+#define new_float_allocation(Stp) ((Stp)->new_float_instructions)
+
+static GenOp*
+gen_put_binary(LoaderState* stp, GenOpArg Fail,GenOpArg Size,
+ GenOpArg Unit, GenOpArg Flags, GenOpArg Src)
+{
+ GenOp* op;
+ NEW_GENOP(stp, op);
+
+ NATIVE_ENDIAN(Flags);
+ if (Size.type == TAG_a && Size.val == am_all) {
+ op->op = genop_i_new_bs_put_binary_all_3;
+ op->arity = 3;
+ op->a[0] = Fail;
+ op->a[1] = Src;
+ op->a[2] = Unit;
+ } else if (Size.type == TAG_i) {
+ op->op = genop_i_new_bs_put_binary_imm_3;
+ op->arity = 3;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+ if (safe_mul(Size.val, Unit.val, &op->a[1].val)) {
+ op->a[2] = Src;
+ } else {
+ op->op = genop_badarg_1;
+ op->arity = 1;
+ op->a[0] = Fail;
+ }
+ } else {
+ op->op = genop_i_new_bs_put_binary_4;
+ op->arity = 4;
+ op->a[0] = Fail;
+ op->a[1] = Size;
+ op->a[2].type = TAG_u;
+ op->a[2].val = (Unit.val << 3) | (Flags.val & 7);
+ op->a[3] = Src;
+ }
+
+ op->next = NULL;
+ return op;
+}
+
+static GenOp*
+gen_put_integer(LoaderState* stp, GenOpArg Fail, GenOpArg Size,
+ GenOpArg Unit, GenOpArg Flags, GenOpArg Src)
+{
+ GenOp* op;
+ NEW_GENOP(stp, op);
+
+ NATIVE_ENDIAN(Flags);
+ if (Size.type == TAG_i && Size.val < 0) {
+ error:
+ /* Negative size must fail */
+ op->op = genop_badarg_1;
+ op->arity = 1;
+ op->a[0] = Fail;
+ } else if (Size.type == TAG_i) {
+ op->op = genop_i_new_bs_put_integer_imm_4;
+ op->arity = 4;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+ if (!safe_mul(Size.val, Unit.val, &op->a[1].val)) {
+ goto error;
+ }
+ op->a[1].val = Size.val * Unit.val;
+ op->a[2].type = Flags.type;
+ op->a[2].val = (Flags.val & 7);
+ op->a[3] = Src;
+ } else if (Size.type == TAG_q) {
+ Eterm big = stp->literals[Size.val].term;
+ Uint bigval;
+
+ if (!term_to_Uint(big, &bigval)) {
+ goto error;
+ } else {
+ op->op = genop_i_new_bs_put_integer_imm_4;
+ op->arity = 4;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+ op->a[1].val = bigval * Unit.val;
+ op->a[2].type = Flags.type;
+ op->a[2].val = (Flags.val & 7);
+ op->a[3] = Src;
+ }
+ } else {
+ op->op = genop_i_new_bs_put_integer_4;
+ op->arity = 4;
+ op->a[0] = Fail;
+ op->a[1] = Size;
+ op->a[2].type = TAG_u;
+ op->a[2].val = (Unit.val << 3) | (Flags.val & 7);
+ op->a[3] = Src;
+ }
+ op->next = NULL;
+ return op;
+}
+
+static GenOp*
+gen_put_float(LoaderState* stp, GenOpArg Fail, GenOpArg Size,
+ GenOpArg Unit, GenOpArg Flags, GenOpArg Src)
+{
+ GenOp* op;
+ NEW_GENOP(stp, op);
+
+ NATIVE_ENDIAN(Flags);
+ if (Size.type == TAG_i) {
+ op->op = genop_i_new_bs_put_float_imm_4;
+ op->arity = 4;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+ if (!safe_mul(Size.val, Unit.val, &op->a[1].val)) {
+ op->op = genop_badarg_1;
+ op->arity = 1;
+ op->a[0] = Fail;
+ } else {
+ op->a[2] = Flags;
+ op->a[3] = Src;
+ }
+ } else {
+ op->op = genop_i_new_bs_put_float_4;
+ op->arity = 4;
+ op->a[0] = Fail;
+ op->a[1] = Size;
+ op->a[2].type = TAG_u;
+ op->a[2].val = (Unit.val << 3) | (Flags.val & 7);
+ op->a[3] = Src;
+ }
+ op->next = NULL;
+ return op;
+}
+
+/*
+ * Generate an instruction to fetch a float from a binary.
+ */
+
+static GenOp*
+gen_get_float2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live,
+ GenOpArg Size, GenOpArg Unit, GenOpArg Flags, GenOpArg Dst)
+{
+ GenOp* op;
+ NEW_GENOP(stp, op);
+
+ NATIVE_ENDIAN(Flags);
+ op->op = genop_i_bs_get_float2_6;
+ op->arity = 6;
+ op->a[0] = Fail;
+ op->a[1] = Ms;
+ op->a[2] = Live;
+ op->a[3] = Size;
+ op->a[4].type = TAG_u;
+ op->a[4].val = (Unit.val << 3) | Flags.val;
+ op->a[5] = Dst;
+ op->next = NULL;
+ return op;
+}
+
+/*
+ * Generate the fastest instruction for bs_skip_bits.
+ */
+
+static GenOp*
+gen_skip_bits2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms,
+ GenOpArg Size, GenOpArg Unit, GenOpArg Flags)
+{
+ GenOp* op;
+
+ NATIVE_ENDIAN(Flags);
+ NEW_GENOP(stp, op);
+ if (Size.type == TAG_a && Size.val == am_all) {
+ op->op = genop_i_bs_skip_bits_all2_3;
+ op->arity = 3;
+ op->a[0] = Fail;
+ op->a[1] = Ms;
+ op->a[2] = Unit;
+ } else if (Size.type == TAG_i) {
+ op->op = genop_i_bs_skip_bits_imm2_3;
+ op->arity = 3;
+ op->a[0] = Fail;
+ op->a[1] = Ms;
+ op->a[2].type = TAG_u;
+ if (!safe_mul(Size.val, Unit.val, &op->a[2].val)) {
+ goto error;
+ }
+ } else if (Size.type == TAG_q) {
+ Eterm big = stp->literals[Size.val].term;
+ Uint bigval;
+
+ if (!term_to_Uint(big, &bigval)) {
+ error:
+ op->op = genop_jump_1;
+ op->arity = 1;
+ op->a[0] = Fail;
+ } else {
+ op->op = genop_i_bs_skip_bits_imm2_3;
+ op->arity = 3;
+ op->a[0] = Fail;
+ op->a[1] = Ms;
+ op->a[2].type = TAG_u;
+ if (!safe_mul(bigval, Unit.val, &op->a[2].val)) {
+ goto error;
+ }
+ }
+ } else {
+ op->op = genop_i_bs_skip_bits2_4;
+ op->arity = 4;
+ op->a[0] = Fail;
+ op->a[1] = Ms;
+ op->a[2] = Size;
+ op->a[3] = Unit;
+ }
+ op->next = NULL;
+ return op;
+}
+
+static int
+smp(LoaderState* stp)
+{
+#ifdef ERTS_SMP
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/*
+ * Mark this label.
+ */
+static int
+smp_mark_target_label(LoaderState* stp, GenOpArg L)
+{
+#ifdef ERTS_SMP
+ ASSERT(L.type == TAG_f);
+ stp->labels[L.val].looprec_targeted = 1;
+#endif
+ return 1;
+}
+
+/*
+ * Test whether this label was targeted by a loop_rec/2 instruction.
+ */
+
+static int
+smp_already_locked(LoaderState* stp, GenOpArg L)
+{
+#ifdef ERTS_SMP
+ ASSERT(L.type == TAG_u);
+ return stp->labels[L.val].looprec_targeted;
+#else
+ return 0;
+#endif
+}
+
+/*
+ * Generate a timeout instruction for a literal timeout.
+ */
+
+static GenOp*
+gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
+{
+ GenOp* op;
+ Sint timeout;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_i_wait_timeout_2;
+ op->next = NULL;
+ op->arity = 2;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+
+ if (Time.type == TAG_i && (timeout = Time.val) >= 0 &&
+#ifdef ARCH_64
+ (timeout >> 32) == 0
+#else
+ 1
+#endif
+ ) {
+ op->a[1].val = timeout;
+#if !defined(ARCH_64)
+ } else if (Time.type == TAG_q) {
+ Eterm big;
+
+ big = stp->literals[Time.val].term;
+ if (is_not_big(big)) {
+ goto error;
+ }
+ if (big_arity(big) > 1 || big_sign(big)) {
+ goto error;
+ } else {
+ (void) term_to_Uint(big, &op->a[1].val);
+ }
+#endif
+ } else {
+#if !defined(ARCH_64)
+ error:
+#endif
+ op->op = genop_i_wait_error_0;
+ op->arity = 0;
+ }
+ return op;
+}
+
+static GenOp*
+gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time)
+{
+ GenOp* op;
+ Sint timeout;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_i_wait_timeout_locked_2;
+ op->next = NULL;
+ op->arity = 2;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+
+ if (Time.type == TAG_i && (timeout = Time.val) >= 0 &&
+#ifdef ARCH_64
+ (timeout >> 32) == 0
+#else
+ 1
+#endif
+ ) {
+ op->a[1].val = timeout;
+#ifndef ARCH_64
+ } else if (Time.type == TAG_q) {
+ Eterm big;
+
+ big = stp->literals[Time.val].term;
+ if (is_not_big(big)) {
+ goto error;
+ }
+ if (big_arity(big) > 1 || big_sign(big)) {
+ goto error;
+ } else {
+ (void) term_to_Uint(big, &op->a[1].val);
+ }
+#endif
+ } else {
+#ifndef ARCH_64
+ error:
+#endif
+ op->op = genop_i_wait_error_locked_0;
+ op->arity = 0;
+ }
+ return op;
+}
+
+/*
+ * Tag the list of values with tuple arity tags.
+ */
+
+static GenOp*
+gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+ GenOpArg Size, GenOpArg* Rest)
+
+{
+ GenOp* op;
+ int arity = Size.val + 3;
+ int size = Size.val / 2;
+ int i;
+
+ /*
+ * Verify the validity of the list.
+ */
+
+ if (Size.val % 2 != 0)
+ return NULL;
+ for (i = 0; i < Size.val; i += 2) {
+ if (Rest[i].type != TAG_u || Rest[i+1].type != TAG_f) {
+ return NULL;
+ }
+ }
+
+ /*
+ * Generate the generic instruction.
+ */
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_i_select_tuple_arity_3;
+ GENOP_ARITY(op, arity);
+ op->a[0] = S;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = Size.val / 2;
+ for (i = 0; i < Size.val; i += 2) {
+ op->a[i+3].type = TAG_v;
+ op->a[i+3].val = make_arityval(Rest[i].val);
+ op->a[i+4] = Rest[i+1];
+ }
+
+ /*
+ * Sort the values to make them useful for a binary search.
+ */
+
+ qsort(op->a+3, size, 2*sizeof(GenOpArg),
+ (int (*)(const void *, const void *)) genopargcompare);
+#ifdef DEBUG
+ for (i = 3; i < arity-2; i += 2) {
+ ASSERT(op->a[i].val < op->a[i+2].val);
+ }
+#endif
+ return op;
+}
+
+/*
+ * Split a list consisting of both small and bignumbers into two
+ * select_val instructions.
+ */
+
+static GenOp*
+gen_split_values(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+ GenOpArg Size, GenOpArg* Rest)
+
+{
+ GenOp* op1;
+ GenOp* op2;
+ GenOp* label;
+ Uint type;
+ int i;
+
+ ASSERT(Size.val >= 2 && Size.val % 2 == 0);
+
+ NEW_GENOP(stp, label);
+ label->op = genop_label_1;
+ label->arity = 1;
+ label->a[0].type = TAG_u;
+ label->a[0].val = new_label(stp);
+
+ NEW_GENOP(stp, op1);
+ op1->op = genop_select_val_3;
+ GENOP_ARITY(op1, 3 + Size.val);
+ op1->arity = 3;
+ op1->a[0] = S;
+ op1->a[1].type = TAG_f;
+ op1->a[1].val = label->a[0].val;
+ op1->a[2].type = TAG_u;
+ op1->a[2].val = 0;
+
+ NEW_GENOP(stp, op2);
+ op2->op = genop_select_val_3;
+ GENOP_ARITY(op2, 3 + Size.val);
+ op2->arity = 3;
+ op2->a[0] = S;
+ op2->a[1] = Fail;
+ op2->a[2].type = TAG_u;
+ op2->a[2].val = 0;
+
+ op1->next = label;
+ label->next = op2;
+ op2->next = NULL;
+
+ type = Rest[0].type;
+
+ ASSERT(Size.type == TAG_u);
+ for (i = 0; i < Size.val; i += 2) {
+ GenOp* op = (Rest[i].type == type) ? op1 : op2;
+ int dst = 3 + op->a[2].val;
+
+ ASSERT(Rest[i+1].type == TAG_f);
+ op->a[dst] = Rest[i];
+ op->a[dst+1] = Rest[i+1];
+ op->arity += 2;
+ op->a[2].val += 2;
+ }
+
+ /*
+ * None of the instructions should have zero elements in the list.
+ */
+
+ ASSERT(op1->a[2].val > 0);
+ ASSERT(op2->a[2].val > 0);
+
+ return op1;
+}
+
+/*
+ * Generate a jump table.
+ */
+
+static GenOp*
+gen_jump_tab(LoaderState* stp, GenOpArg S, GenOpArg Fail, GenOpArg Size, GenOpArg* Rest)
+{
+ Sint min, max;
+ Sint i;
+ Sint size;
+ Sint arity;
+ int fixed_args;
+ GenOp* op;
+
+ ASSERT(Size.val >= 2 && Size.val % 2 == 0);
+
+ /*
+ * Calculate the minimum and maximum values and size of jump table.
+ */
+
+ ASSERT(Rest[0].type == TAG_i);
+ min = max = Rest[0].val;
+ for (i = 2; i < Size.val; i += 2) {
+ ASSERT(Rest[i].type == TAG_i && Rest[i+1].type == TAG_f);
+ if (Rest[i].val < min) {
+ min = Rest[i].val;
+ } else if (max < Rest[i].val) {
+ max = Rest[i].val;
+ }
+ }
+ size = max - min + 1;
+
+
+ /*
+ * Allocate structure and fill in the fixed fields.
+ */
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ if (min == 0) {
+ op->op = genop_i_jump_on_val_zero_3;
+ fixed_args = 3;
+ } else {
+ op->op = genop_i_jump_on_val_4;
+ fixed_args = 4;
+ }
+ arity = fixed_args + size;
+ GENOP_ARITY(op, arity);
+ op->a[0] = S;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = size;
+ op->a[3].type = TAG_u;
+ op->a[3].val = min;
+
+
+ /*
+ * Fill in the jump table.
+ */
+
+ for (i = fixed_args; i < arity; i++) {
+ op->a[i] = Fail;
+ }
+ for (i = 0; i < Size.val; i += 2) {
+ int index;
+ index = fixed_args+Rest[i].val-min;
+ ASSERT(fixed_args <= index && index < arity);
+ op->a[index] = Rest[i+1];
+ }
+ return op;
+}
+
+/*
+ * Compare function for qsort().
+ */
+
+static int
+genopargcompare(GenOpArg* a, GenOpArg* b)
+{
+ if (a->val < b->val)
+ return -1;
+ else if (a->val == b->val)
+ return 0;
+ else
+ return 1;
+}
+
+/*
+ * Generate a select_val instruction. We know that a jump table is not suitable,
+ * and that all values are of the same type (integer, atoms, floats; never bignums).
+ */
+
+static GenOp*
+gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+ GenOpArg Size, GenOpArg* Rest)
+{
+ GenOp* op;
+ int arity = Size.val + 3;
+ int size = Size.val / 2;
+ int i;
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ if (Rest[0].type != TAG_q) {
+ op->op = genop_i_select_val_3;
+ } else {
+ ASSERT(is_float(stp->literals[Rest[0].val].term));
+ op->op = genop_i_select_float_3;
+ }
+ GENOP_ARITY(op, arity);
+ op->a[0] = S;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = size;
+ for (i = 3; i < arity; i++) {
+ op->a[i] = Rest[i-3];
+ }
+
+ /*
+ * Sort the values to make them useful for a binary search.
+ */
+
+ qsort(op->a+3, size, 2*sizeof(GenOpArg),
+ (int (*)(const void *, const void *)) genopargcompare);
+#ifdef DEBUG
+ for (i = 3; i < arity-2; i += 2) {
+ ASSERT(op->a[i].val < op->a[i+2].val);
+ }
+#endif
+
+ return op;
+}
+
+/*
+ * Compare function for qsort().
+ */
+
+static int
+genbigcompare(GenOpArg* a, GenOpArg* b)
+{
+ int val = (int)(b->bigarity - a->bigarity);
+
+ return val != 0 ? val : ((int) (a->val - b->val));
+}
+
+/*
+ * Generate a select_val instruction for big numbers.
+ */
+
+static GenOp*
+gen_select_big(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+ GenOpArg Size, GenOpArg* Rest)
+{
+ GenOp* op;
+ int arity = Size.val + 2 + 1;
+ int size = Size.val / 2;
+ int i;
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_i_select_big_2;
+ GENOP_ARITY(op, arity);
+ op->a[0] = S;
+ op->a[1] = Fail;
+ for (i = 0; i < Size.val; i += 2) {
+ ASSERT(Rest[i].type == TAG_q);
+ op->a[i+2] = Rest[i];
+ op->a[i+2].bigarity = *big_val(stp->literals[op->a[i+2].val].term);
+ op->a[i+3] = Rest[i+1];
+ }
+ ASSERT(i+2 == arity-1);
+ op->a[arity-1].type = TAG_u;
+ op->a[arity-1].val = 0;
+
+ /*
+ * Sort the values in descending arity order.
+ */
+
+ qsort(op->a+2, size, 2*sizeof(GenOpArg),
+ (int (*)(const void *, const void *)) genbigcompare);
+
+ return op;
+}
+
+
+/*
+ * Replace a select_val instruction with a constant controlling expression
+ * with a jump instruction.
+ */
+
+static GenOp*
+const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+ GenOpArg Size, GenOpArg* Rest)
+{
+ GenOp* op;
+ int i;
+
+ ASSERT(Size.type == TAG_u);
+ ASSERT(S.type == TAG_q);
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ op->op = genop_jump_1;
+ op->arity = 1;
+
+ /*
+ * Search for a literal matching the controlling expression.
+ */
+
+ if (S.type == TAG_q) {
+ Eterm expr = stp->literals[S.val].term;
+ for (i = 0; i < Size.val; i += 2) {
+ if (Rest[i].type == TAG_q) {
+ Eterm term = stp->literals[Rest[i].val].term;
+ if (eq(term, expr)) {
+ ASSERT(Rest[i+1].type == TAG_f);
+ op->a[0] = Rest[i+1];
+ return op;
+ }
+ }
+ }
+ }
+
+ /*
+ * No match. Use the failure label.
+ */
+
+ op->a[0] = Fail;
+ return op;
+}
+
+
+static GenOp*
+gen_func_info(LoaderState* stp, GenOpArg mod, GenOpArg func,
+ GenOpArg arity, GenOpArg label)
+{
+ GenOp* fi;
+ GenOp* op;
+
+ NEW_GENOP(stp, fi);
+ fi->op = genop_i_func_info_4;
+ fi->arity = 4;
+ fi->a[0].type = TAG_u; /* untagged Zero */
+ fi->a[0].val = 0;
+ fi->a[1] = mod;
+ fi->a[2] = func;
+ fi->a[3] = arity;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_label_1;
+ op->arity = 1;
+ op->a[0] = label;
+
+ fi->next = op;
+ op->next = NULL;
+
+ return fi;
+}
+
+
+
+static GenOp*
+gen_make_fun2(LoaderState* stp, GenOpArg idx)
+{
+ ErlFunEntry* fe;
+ GenOp* op;
+
+ if (idx.val >= stp->num_lambdas) {
+ stp->lambda_error = "missing or short chunk 'FunT'";
+ fe = 0;
+ } else {
+ fe = stp->lambdas[idx.val].fe;
+ }
+
+ NEW_GENOP(stp, op);
+ op->op = genop_i_make_fun_2;
+ op->arity = 2;
+ op->a[0].type = TAG_u;
+ op->a[0].val = (Uint) fe;
+ op->a[1].type = TAG_u;
+ op->a[1].val = stp->lambdas[idx.val].num_free;
+ op->next = NULL;
+ return op;
+}
+
+static GenOp*
+gen_guard_bif(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
+ GenOpArg Src, GenOpArg Dst)
+{
+ GenOp* op;
+ BifFunction bf;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_i_gc_bif1_5;
+ op->arity = 5;
+ op->a[0] = Fail;
+ op->a[1].type = TAG_u;
+ bf = stp->import[Bif.val].bf;
+ if (bf == length_1) {
+ op->a[1].val = (Uint) (void *) erts_gc_length_1;
+ } else if (bf == size_1) {
+ op->a[1].val = (Uint) (void *) erts_gc_size_1;
+ } else if (bf == bit_size_1) {
+ op->a[1].val = (Uint) (void *) erts_gc_bit_size_1;
+ } else if (bf == byte_size_1) {
+ op->a[1].val = (Uint) (void *) erts_gc_byte_size_1;
+ } else if (bf == abs_1) {
+ op->a[1].val = (Uint) (void *) erts_gc_abs_1;
+ } else if (bf == float_1) {
+ op->a[1].val = (Uint) (void *) erts_gc_float_1;
+ } else if (bf == round_1) {
+ op->a[1].val = (Uint) (void *) erts_gc_round_1;
+ } else if (bf == trunc_1) {
+ op->a[1].val = (Uint) (void *) erts_gc_trunc_1;
+ } else {
+ abort();
+ }
+ op->a[2] = Src;
+ op->a[3] = Live;
+ op->a[4] = Dst;
+ op->next = NULL;
+ return op;
+}
+
+
+/*
+ * Freeze the code in memory, move the string table into place,
+ * resolve all labels.
+ */
+
+static int
+freeze_code(LoaderState* stp)
+{
+ Eterm* code = stp->code;
+ Uint index;
+ int i;
+ byte* str_table;
+ unsigned strtab_size = stp->chunks[STR_CHUNK].size;
+ unsigned attr_size = stp->chunks[ATTR_CHUNK].size;
+ unsigned compile_size = stp->chunks[COMPILE_CHUNK].size;
+ Uint size;
+ unsigned catches;
+ Sint decoded_size;
+
+ /*
+ * Verify that there was a correct 'FunT' chunk if there were
+ * make_fun2 instructions in the file.
+ */
+
+ if (stp->lambda_error != NULL) {
+ LoadError0(stp, stp->lambda_error);
+ }
+
+
+ /*
+ * Calculate the final size of the code.
+ */
+
+ size = (stp->ci + stp->total_literal_size) * sizeof(Eterm) +
+ strtab_size + attr_size + compile_size;
+
+ /*
+ * Move the code to its final location.
+ */
+
+ code = (Eterm *) erts_realloc(ERTS_ALC_T_CODE, (void *) code, size);
+
+ /*
+ * Place a pointer to the op_int_code_end instruction in the
+ * function table in the beginning of the file.
+ */
+
+ code[MI_FUNCTIONS+stp->num_functions] = (Eterm) (code + stp->ci - 1);
+
+ /*
+ * Store the pointer to the on_load function.
+ */
+
+ if (stp->on_load) {
+ code[MI_ON_LOAD_FUNCTION_PTR] = (Eterm) (code + stp->on_load);
+ } else {
+ code[MI_ON_LOAD_FUNCTION_PTR] = 0;
+ }
+
+ /*
+ * Place the literal heap directly after the code and fix up all
+ * put_literal instructions that refer to it.
+ */
+ {
+ Eterm* ptr;
+ Eterm* low;
+ Eterm* high;
+ LiteralPatch* lp;
+
+ low = code+stp->ci;
+ high = low + stp->total_literal_size;
+ code[MI_LITERALS_START] = (Eterm) low;
+ code[MI_LITERALS_END] = (Eterm) high;
+ ptr = low;
+ for (i = 0; i < stp->num_literals; i++) {
+ Uint offset;
+
+ sys_memcpy(ptr, stp->literals[i].heap,
+ stp->literals[i].heap_size*sizeof(Eterm));
+ offset = ptr - stp->literals[i].heap;
+ stp->literals[i].offset = offset;
+ high = ptr + stp->literals[i].heap_size;
+ while (ptr < high) {
+ Eterm val = *ptr;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ *ptr++ = offset_ptr(val, offset);
+ break;
+ case TAG_PRIMARY_HEADER:
+ ptr++;
+ if (header_is_thing(val)) {
+ ptr += thing_arityval(val);
+ }
+ break;
+ default:
+ ptr++;
+ break;
+ }
+ }
+ ASSERT(ptr == high);
+ }
+ lp = stp->literal_patches;
+ while (lp != 0) {
+ Uint* op_ptr;
+ Uint literal;
+ Literal* lit;
+
+ op_ptr = code + lp->pos;
+ lit = &stp->literals[op_ptr[0]];
+ literal = lit->term;
+ if (is_boxed(literal) || is_list(literal)) {
+ literal = offset_ptr(literal, lit->offset);
+ }
+ op_ptr[0] = literal;
+ lp = lp->next;
+ }
+ stp->ci += stp->total_literal_size;
+ }
+
+ /*
+ * Place the string table and, optionally, attributes, after the literal heap.
+ */
+
+ sys_memcpy(code+stp->ci, stp->chunks[STR_CHUNK].start, strtab_size);
+ str_table = (byte *) (code+stp->ci);
+ if (attr_size) {
+ byte* attr = str_table + strtab_size;
+ sys_memcpy(attr, stp->chunks[ATTR_CHUNK].start, stp->chunks[ATTR_CHUNK].size);
+ code[MI_ATTR_PTR] = (Eterm) attr;
+ code[MI_ATTR_SIZE] = (Eterm) stp->chunks[ATTR_CHUNK].size;
+ decoded_size = erts_decode_ext_size(attr, attr_size, 0);
+ if (decoded_size < 0) {
+ LoadError0(stp, "bad external term representation of module attributes");
+ }
+ code[MI_ATTR_SIZE_ON_HEAP] = decoded_size;
+ }
+ if (compile_size) {
+ byte* compile_info = str_table + strtab_size + attr_size;
+ sys_memcpy(compile_info, stp->chunks[COMPILE_CHUNK].start,
+ stp->chunks[COMPILE_CHUNK].size);
+ code[MI_COMPILE_PTR] = (Eterm) compile_info;
+ code[MI_COMPILE_SIZE] = (Eterm) stp->chunks[COMPILE_CHUNK].size;
+ decoded_size = erts_decode_ext_size(compile_info, compile_size, 0);
+ if (decoded_size < 0) {
+ LoadError0(stp, "bad external term representation of compilation information");
+ }
+ code[MI_COMPILE_SIZE_ON_HEAP] = decoded_size;
+ }
+
+
+ /*
+ * Go through all put_strings instructions, restore the pointer to
+ * the instruction and convert string offsets to pointers (to the
+ * LAST character).
+ */
+
+ index = stp->put_strings;
+ while (index != 0) {
+ Uint next = code[index];
+ code[index] = BeamOpCode(op_put_string_IId);
+ code[index+2] = (Uint) (str_table + code[index+2] + code[index+1] - 1);
+ index = next;
+ }
+
+ /*
+ * Go through all i_new_bs_put_strings instructions, restore the pointer to
+ * the instruction and convert string offsets to pointers (to the
+ * FIRST character).
+ */
+
+ index = stp->new_bs_put_strings;
+ while (index != 0) {
+ Uint next = code[index];
+ code[index] = BeamOpCode(op_bs_put_string_II);
+ code[index+2] = (Uint) (str_table + code[index+2]);
+ index = next;
+ }
+
+ {
+ StringPatch* sp = stp->string_patches;
+
+ while (sp != 0) {
+ Uint* op_ptr;
+ byte* strp;
+
+ op_ptr = code + sp->pos;
+ strp = str_table + op_ptr[0];
+ op_ptr[0] = (Eterm) strp;
+ sp = sp->next;
+ }
+ }
+
+ /*
+ * Resolve all labels.
+ */
+
+ for (i = 0; i < stp->num_labels; i++) {
+ Uint this_patch;
+ Uint next_patch;
+ Uint value = stp->labels[i].value;
+
+ if (value == 0 && stp->labels[i].patches != 0) {
+ LoadError1(stp, "label %d not resolved", i);
+ }
+ ASSERT(value < stp->ci);
+ this_patch = stp->labels[i].patches;
+ while (this_patch != 0) {
+ ASSERT(this_patch < stp->ci);
+ next_patch = code[this_patch];
+ ASSERT(next_patch < stp->ci);
+ code[this_patch] = (Uint) (code + value);
+ this_patch = next_patch;
+ }
+ }
+
+ /*
+ * Fix all catch_yf instructions.
+ */
+ index = stp->catches;
+ catches = BEAM_CATCHES_NIL;
+ while (index != 0) {
+ Uint next = code[index];
+ code[index] = BeamOpCode(op_catch_yf);
+ catches = beam_catches_cons((Uint*)code[index+2], catches);
+ code[index+2] = make_catch(catches);
+ index = next;
+ }
+ stp->catches = catches;
+
+ /*
+ * Save the updated code pointer and code size.
+ */
+
+ stp->code = code;
+ stp->loaded_size = size;
+
+ return 1;
+
+ load_error:
+ /*
+ * Make sure that the caller frees the newly reallocated block, and
+ * not the old one (in case it has moved).
+ */
+ stp->code = code;
+ return 0;
+}
+
+
+static void
+final_touch(LoaderState* stp)
+{
+ int i;
+ int on_load = stp->on_load;
+
+ /*
+ * Export functions.
+ */
+
+ for (i = 0; i < stp->num_exps; i++) {
+ Export* ep = erts_export_put(stp->module, stp->export[i].function,
+ stp->export[i].arity);
+ if (!on_load) {
+ ep->address = stp->export[i].address;
+ } else {
+ /*
+ * Don't make any of the exported functions
+ * callable yet.
+ */
+ ep->address = ep->code+3;
+ ep->code[4] = (Eterm) stp->export[i].address;
+ }
+ }
+
+ /*
+ * Import functions and patch all callers.
+ */
+
+ for (i = 0; i < stp->num_imports; i++) {
+ Eterm mod;
+ Eterm func;
+ Uint arity;
+ Uint import;
+ Uint current;
+ Uint next;
+
+ mod = stp->import[i].module;
+ func = stp->import[i].function;
+ arity = stp->import[i].arity;
+ import = (Uint) erts_export_put(mod, func, arity);
+ current = stp->import[i].patches;
+ while (current != 0) {
+ ASSERT(current < stp->ci);
+ next = stp->code[current];
+ stp->code[current] = import;
+ current = next;
+ }
+ }
+
+ /*
+ * Fix all funs.
+ */
+
+ if (stp->num_lambdas > 0) {
+ for (i = 0; i < stp->num_lambdas; i++) {
+ unsigned entry_label = stp->lambdas[i].label;
+ ErlFunEntry* fe = stp->lambdas[i].fe;
+ Eterm* code_ptr = (Eterm *) (stp->code + stp->labels[entry_label].value);
+
+ if (fe->address[0] != 0) {
+ /*
+ * We are hiding a pointer into older code.
+ */
+ erts_refc_dec(&fe->refc, 1);
+ }
+ fe->address = code_ptr;
+#ifdef HIPE
+ hipe_set_closure_stub(fe, stp->lambdas[i].num_free);
+#endif
+ }
+ }
+}
+
+
+static int
+transform_engine(LoaderState* st)
+{
+ Uint op;
+ int ap; /* Current argument. */
+ Uint* restart; /* Where to restart if current match fails. */
+ GenOpArg def_vars[TE_MAX_VARS]; /* Default buffer for variables. */
+ GenOpArg* var = def_vars;
+ int i; /* General index. */
+ Uint mask;
+ GenOp* instr;
+ Uint* pc;
+ int rval;
+
+ ASSERT(gen_opc[st->genop->op].transform != -1);
+ pc = op_transform + gen_opc[st->genop->op].transform;
+ restart = pc;
+
+ restart:
+ if (var != def_vars) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) var);
+ var = def_vars;
+ }
+ ASSERT(restart != NULL);
+ pc = restart;
+ ASSERT(*pc < NUM_TOPS); /* Valid instruction? */
+ ASSERT(*pc == TOP_try_me_else || *pc == TOP_fail);
+ instr = st->genop;
+
+#define RETURN(r) rval = (r); goto do_return;
+
+#ifdef DEBUG
+ restart = NULL;
+#endif
+ ap = 0;
+ for (;;) {
+ op = *pc++;
+
+ switch (op) {
+ case TOP_is_op:
+ if (instr == NULL) {
+ /*
+ * We'll need at least one more instruction to decide whether
+ * this combination matches or not.
+ */
+ RETURN(TE_SHORT_WINDOW);
+ }
+ if (*pc++ != instr->op)
+ goto restart;
+ break;
+ case TOP_is_type:
+ mask = *pc++;
+
+ ASSERT(ap < instr->arity);
+ ASSERT(instr->a[ap].type < BEAM_NUM_TAGS);
+ if (((1 << instr->a[ap].type) & mask) == 0)
+ goto restart;
+ break;
+ case TOP_pred:
+ i = *pc++;
+ switch (i) {
+#define RVAL i
+#include "beam_pred_funcs.h"
+#undef RVAL
+ default:
+ ASSERT(0);
+ }
+ if (i == 0)
+ goto restart;
+ break;
+ case TOP_is_eq:
+ ASSERT(ap < instr->arity);
+ if (*pc++ != instr->a[ap].val)
+ goto restart;
+ break;
+ case TOP_is_same_var:
+ ASSERT(ap < instr->arity);
+ i = *pc++;
+ ASSERT(i < TE_MAX_VARS);
+ if (var[i].type != instr->a[ap].type)
+ goto restart;
+ switch (var[i].type) {
+ case TAG_r: case TAG_n: break;
+ default:
+ if (var[i].val != instr->a[ap].val)
+ goto restart;
+ }
+ break;
+#if defined(TOP_is_bif)
+ case TOP_is_bif:
+ {
+ int bif_number = *pc++;
+
+ /*
+ * In debug build, the type must be 'u'.
+ * In a real build, don't match. (I.e. retain the original
+ * call instruction, this will work, but it will be a
+ * slight performance loss.)
+ */
+
+ ASSERT(instr->a[ap].type == TAG_u);
+ if (instr->a[ap].type != TAG_u)
+ goto restart;
+
+ /*
+ * In debug build, the assertion will catch invalid indexes
+ * immediately. In a real build, the loader will issue
+ * an diagnostic later when the instruction is loaded.
+ */
+
+ i = instr->a[ap].val;
+ ASSERT(i < st->num_imports);
+ if (i >= st->num_imports || st->import[i].bf == NULL)
+ goto restart;
+ if (bif_number != -1 &&
+ bif_export[bif_number]->code[4] != (Uint) st->import[i].bf) {
+ goto restart;
+ }
+ }
+ break;
+
+#endif
+#if defined(TOP_is_not_bif)
+ case TOP_is_not_bif:
+ {
+ pc++;
+
+ /*
+ * In debug build, the type must be 'u'.
+ */
+
+ ASSERT(instr->a[ap].type == TAG_u);
+ if (instr->a[ap].type != TAG_u) {
+ goto restart;
+ }
+ i = instr->a[ap].val;
+
+ /*
+ * erlang:apply/2,3 are strange. They exist as (dummy) BIFs
+ * so that they are included in the export table before
+ * the erlang module is loaded. They also exist in the erlang
+ * module as functions. When used in code, a special Beam
+ * instruction is used.
+ *
+ * Below we specially recognize erlang:apply/2,3 as special.
+ * This is necessary because after setting a trace pattern on
+ * them, you cannot no longer see from the export entry that
+ * they are special.
+ */
+ if (i < st->num_imports) {
+ if (st->import[i].bf != NULL ||
+ (st->import[i].module == am_erlang &&
+ st->import[i].function == am_apply &&
+ (st->import[i].arity == 2 || st->import[i].arity == 3))) {
+ goto restart;
+ }
+ }
+ }
+ break;
+
+#endif
+#if defined(TOP_is_func)
+ case TOP_is_func:
+ {
+ Eterm mod = *pc++;
+ Eterm func = *pc++;
+ int arity = *pc++;
+
+ ASSERT(instr->a[ap].type == TAG_u);
+ if (instr->a[ap].type != TAG_u) {
+ goto restart;
+ }
+ i = instr->a[ap].val;
+ ASSERT(i < st->num_imports);
+ if (i >= st->num_imports || st->import[i].module != mod ||
+ st->import[i].function != func ||
+ (arity < MAX_ARG && st->import[i].arity != arity)) {
+ goto restart;
+ }
+ }
+ break;
+#endif
+ case TOP_set_var_next_arg:
+ ASSERT(ap < instr->arity);
+ i = *pc++;
+ ASSERT(i < TE_MAX_VARS);
+ var[i].type = instr->a[ap].type;
+ var[i].val = instr->a[ap].val;
+ ap++;
+ break;
+
+#if defined(TOP_rest_args)
+ case TOP_rest_args:
+ {
+ int n = *pc++;
+ var = erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ instr->arity * sizeof(GenOpArg));
+ for (i = 0; i < n; i++) {
+ var[i] = def_vars[i];
+ }
+ while (i < instr->arity) {
+ var[i] = instr->a[i];
+ i++;
+ }
+ }
+ break;
+#endif
+
+ case TOP_next_arg:
+ ap++;
+ break;
+ case TOP_next_instr:
+ instr = instr->next;
+ ap = 0;
+ break;
+ case TOP_commit:
+ instr = instr->next; /* The next_instr was optimized away. */
+
+ /*
+ * The left-hand side of this transformation matched.
+ * Delete all matched instructions.
+ */
+ while (st->genop != instr) {
+ GenOp* next = st->genop->next;
+ FREE_GENOP(st, st->genop);
+ st->genop = next;
+ }
+#ifdef DEBUG
+ instr = 0;
+#endif
+ break;
+
+#if defined(TOP_call)
+ case TOP_call:
+ {
+ GenOp** lastp;
+ GenOp* new_instr;
+
+ i = *pc++;
+ switch (i) {
+#define RVAL new_instr
+#include "beam_tr_funcs.h"
+#undef RVAL
+ default:
+ new_instr = NULL; /* Silence compiler warning. */
+ ASSERT(0);
+ }
+ if (new_instr == NULL) {
+ goto restart;
+ }
+
+ lastp = &new_instr;
+ while (*lastp != NULL) {
+ lastp = &((*lastp)->next);
+ }
+
+ instr = instr->next; /* The next_instr was optimized away. */
+
+ /*
+ * The left-hand side of this transformation matched.
+ * Delete all matched instructions.
+ */
+ while (st->genop != instr) {
+ GenOp* next = st->genop->next;
+ FREE_GENOP(st, st->genop);
+ st->genop = next;
+ }
+ *lastp = st->genop;
+ st->genop = new_instr;
+ }
+ break;
+#endif
+ case TOP_new_instr:
+ /*
+ * Note that the instructions are generated in reverse order.
+ */
+ NEW_GENOP(st, instr);
+ instr->next = st->genop;
+ st->genop = instr;
+ ap = 0;
+ break;
+ case TOP_store_op:
+ instr->op = *pc++;
+ instr->arity = *pc++;
+ break;
+ case TOP_store_type:
+ i = *pc++;
+ instr->a[ap].type = i;
+ instr->a[ap].val = 0;
+ break;
+ case TOP_store_val:
+ i = *pc++;
+ instr->a[ap].val = i;
+ break;
+ case TOP_store_var:
+ i = *pc++;
+ ASSERT(i < TE_MAX_VARS);
+ instr->a[ap].type = var[i].type;
+ instr->a[ap].val = var[i].val;
+ break;
+ case TOP_try_me_else:
+ restart = pc + 1;
+ restart += *pc++;
+ ASSERT(*pc < NUM_TOPS); /* Valid instruction? */
+ break;
+ case TOP_end:
+ RETURN(TE_OK);
+ case TOP_fail:
+ RETURN(TE_FAIL)
+ default:
+ ASSERT(0);
+ }
+ }
+#undef RETURN
+
+ do_return:
+ if (var != def_vars) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) var);
+ }
+ return rval;
+}
+
+
+static void
+short_file(int line, LoaderState* stp, unsigned needed)
+{
+ load_printf(line, stp, "unexpected end of %s when reading %d byte(s)",
+ stp->file_name, needed);
+}
+
+
+static void
+load_printf(int line, LoaderState* context, char *fmt,...)
+{
+ erts_dsprintf_buf_t *dsbufp;
+ va_list va;
+
+ if (is_non_value(context->module)) {
+ /* Suppressed by code:get_chunk/2 */
+ return;
+ }
+
+ dsbufp = erts_create_logger_dsbuf();
+
+ erts_dsprintf(dsbufp, "%s(%d): Error loading ", __FILE__, line);
+
+ if (is_atom(context->function))
+ erts_dsprintf(dsbufp, "function %T:%T/%d", context->module,
+ context->function, context->arity);
+ else
+ erts_dsprintf(dsbufp, "module %T", context->module);
+
+ if (context->genop)
+ erts_dsprintf(dsbufp, ": op %s", gen_opc[context->genop->op].name);
+
+ if (context->specific_op != -1)
+ erts_dsprintf(dsbufp, ": %s", opc[context->specific_op].sign);
+ else if (context->genop) {
+ int i;
+ for (i = 0; i < context->genop->arity; i++)
+ erts_dsprintf(dsbufp, " %c",
+ tag_to_letter[context->genop->a[i].type]);
+ }
+
+ erts_dsprintf(dsbufp, ":\n ");
+
+ va_start(va, fmt);
+ erts_vdsprintf(dsbufp, fmt, va);
+ va_end(va);
+
+ erts_dsprintf(dsbufp, "\n");
+#ifdef DEBUG
+ erts_fprintf(stderr, "%s", dsbufp->str);
+#endif
+ erts_send_error_to_logger(context->group_leader, dsbufp);
+}
+
+
+static int
+get_int_val(LoaderState* stp, Uint len_code, Uint* result)
+{
+ Uint count;
+ Uint val;
+
+ len_code >>= 5;
+ ASSERT(len_code < 8);
+ if (len_code == 7) {
+ LoadError0(stp, "can't load integers bigger than 8 bytes yet\n");
+ }
+ count = len_code + 2;
+ if (count == 5) {
+ Uint msb;
+ GetByte(stp, msb);
+ if (msb == 0) {
+ count--;
+ }
+ GetInt(stp, 4, *result);
+ } else if (count <= 4) {
+ GetInt(stp, count, val);
+ *result = ((val << 8*(sizeof(val)-count)) >> 8*(sizeof(val)-count));
+ } else {
+ LoadError1(stp, "too big integer; %d bytes\n", count);
+ }
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+
+static int
+get_erlang_integer(LoaderState* stp, Uint len_code, Uint* result)
+{
+ Uint count;
+ Sint val;
+ byte default_buf[128];
+ byte* bigbuf = default_buf;
+ byte* s;
+ int i;
+ int neg = 0;
+ Uint arity;
+ Eterm* hp;
+
+ /*
+ * Retrieve the size of the value in bytes.
+ */
+
+ len_code >>= 5;
+ if (len_code < 7) {
+ count = len_code + 2;
+ } else {
+ Uint tag;
+
+ ASSERT(len_code == 7);
+ GetTagAndValue(stp, tag, len_code);
+ VerifyTag(stp, TAG_u, tag);
+ count = len_code + 9;
+ }
+
+ /*
+ * Handle values up to the size of an int, meaning either a small or bignum.
+ */
+
+ if (count <= sizeof(val)) {
+ GetInt(stp, count, val);
+
+ val = ((val << 8*(sizeof(val)-count)) >> 8*(sizeof(val)-count));
+ if (IS_SSMALL(val)) {
+ *result = val;
+ return TAG_i;
+ } else {
+ *result = new_literal(stp, &hp, BIG_UINT_HEAP_SIZE);
+ (void) small_to_big(val, hp);
+ return TAG_q;
+ }
+ }
+
+ /*
+ * Make sure that the number will fit in our temporary buffer
+ * (including margin).
+ */
+
+ if (count+8 > sizeof(default_buf)) {
+ bigbuf = erts_alloc(ERTS_ALC_T_LOADER_TMP, count+8);
+ }
+
+ /*
+ * Copy the number reversed to our temporary buffer.
+ */
+
+ GetString(stp, s, count);
+ for (i = 0; i < count; i++) {
+ bigbuf[count-i-1] = *s++;
+ }
+
+ /*
+ * Check if the number is negative, and negate it if so.
+ */
+
+ if ((bigbuf[count-1] & 0x80) != 0) {
+ unsigned carry = 1;
+
+ neg = 1;
+ for (i = 0; i < count; i++) {
+ bigbuf[i] = ~bigbuf[i] + carry;
+ carry = (bigbuf[i] == 0 && carry == 1);
+ }
+ ASSERT(carry == 0);
+ }
+
+ /*
+ * Align to word boundary.
+ */
+
+ if (bigbuf[count-1] == 0) {
+ count--;
+ }
+ if (bigbuf[count-1] == 0) {
+ LoadError0(stp, "bignum not normalized");
+ }
+ while (count % sizeof(Eterm) != 0) {
+ bigbuf[count++] = 0;
+ }
+
+ /*
+ * Allocate heap space for the bignum and copy it.
+ */
+
+ arity = count/sizeof(Eterm);
+ *result = new_literal(stp, &hp, arity+1);
+ (void) bytes_to_big(bigbuf, count, neg, hp);
+
+ if (bigbuf != default_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf);
+ }
+ return TAG_q;
+
+ load_error:
+ if (bigbuf != default_buf) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf);
+ }
+ return -1;
+}
+
+/*
+ * Converts an IFF id to a printable string.
+ */
+
+static void
+id_to_string(Uint id, char* s)
+{
+ int i;
+
+ for (i = 3; i >= 0; i--) {
+ *s++ = (id >> i*8) & 0xff;
+ }
+ *s++ = '\0';
+}
+
+static void
+new_genop(LoaderState* stp)
+{
+ GenOpBlock* p = (GenOpBlock *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ sizeof(GenOpBlock));
+ int i;
+
+ p->next = stp->genop_blocks;
+ stp->genop_blocks = p;
+ for (i = 0; i < sizeof(p->genop)/sizeof(p->genop[0])-1; i++) {
+ p->genop[i].next = p->genop + i + 1;
+ }
+ p->genop[i].next = NULL;
+ stp->free_genop = p->genop;
+}
+
+static int
+new_label(LoaderState* stp)
+{
+ int num = stp->num_labels;
+
+ stp->num_labels++;
+ stp->labels = (Label *) erts_realloc(ERTS_ALC_T_LOADER_TMP,
+ (void *) stp->labels,
+ stp->num_labels * sizeof(Label));
+ stp->labels[num].value = 0;
+ stp->labels[num].patches = 0;
+ return num;
+}
+
+static void
+new_literal_patch(LoaderState* stp, int pos)
+{
+ LiteralPatch* p = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LiteralPatch));
+ p->pos = pos;
+ p->next = stp->literal_patches;
+ stp->literal_patches = p;
+}
+
+static void
+new_string_patch(LoaderState* stp, int pos)
+{
+ StringPatch* p = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(StringPatch));
+ p->pos = pos;
+ p->next = stp->string_patches;
+ stp->string_patches = p;
+}
+
+static Uint
+new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size)
+{
+ Literal* lit;
+
+ if (stp->allocated_literals == 0) {
+ Uint need;
+
+ ASSERT(stp->literals == 0);
+ ASSERT(stp->num_literals == 0);
+ stp->allocated_literals = 8;
+ need = stp->allocated_literals * sizeof(Literal);
+ stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ need);
+ } else if (stp->allocated_literals <= stp->num_literals) {
+ Uint need;
+
+ stp->allocated_literals *= 2;
+ need = stp->allocated_literals * sizeof(Literal);
+ stp->literals = (Literal *) erts_realloc(ERTS_ALC_T_LOADER_TMP,
+ (void *) stp->literals,
+ need);
+ }
+
+ stp->total_literal_size += heap_size;
+ lit = stp->literals + stp->num_literals;
+ lit->offset = 0;
+ lit->heap_size = heap_size;
+ lit->heap = erts_alloc(ERTS_ALC_T_LOADER_TMP, heap_size*sizeof(Eterm));
+ lit->term = make_boxed(lit->heap);
+ *hpp = lit->heap;
+ return stp->num_literals++;
+}
+
+Eterm
+erts_module_info_0(Process* p, Eterm module)
+{
+ Eterm *hp;
+ Eterm list = NIL;
+ Eterm tup;
+
+ if (is_not_atom(module)) {
+ return THE_NON_VALUE;
+ }
+
+ if (erts_get_module(module) == NULL) {
+ return THE_NON_VALUE;
+ }
+
+#define BUILD_INFO(What) \
+ tup = erts_module_info_1(p, module, What); \
+ hp = HAlloc(p, 5); \
+ tup = TUPLE2(hp, What, tup); \
+ hp += 3; \
+ list = CONS(hp, tup, list)
+
+ BUILD_INFO(am_compile);
+ BUILD_INFO(am_attributes);
+ BUILD_INFO(am_imports);
+ BUILD_INFO(am_exports);
+#undef BUILD_INFO
+ return list;
+}
+
+Eterm
+erts_module_info_1(Process* p, Eterm module, Eterm what)
+{
+ if (what == am_module) {
+ return module;
+ } else if (what == am_imports) {
+ return NIL;
+ } else if (what == am_exports) {
+ return exported_from_module(p, module);
+ } else if (what == am_functions) {
+ return functions_in_module(p, module);
+ } else if (what == am_attributes) {
+ return attributes_for_module(p, module);
+ } else if (what == am_compile) {
+ return compilation_info_for_module(p, module);
+ } else if (what == am_native_addresses) {
+ return native_addresses(p, module);
+ }
+ return THE_NON_VALUE;
+}
+
+/*
+ * Builds a list of all functions in the given module:
+ * [{Name, Arity},...]
+ *
+ * Returns a tagged term, or 0 on error.
+ */
+
+Eterm
+functions_in_module(Process* p, /* Process whose heap to use. */
+ Eterm mod) /* Tagged atom for module. */
+{
+ Module* modp;
+ Eterm* code;
+ int i;
+ Uint num_functions;
+ Eterm* hp;
+ Eterm result = NIL;
+
+ if (is_not_atom(mod)) {
+ return THE_NON_VALUE;
+ }
+
+ modp = erts_get_module(mod);
+ if (modp == NULL) {
+ return THE_NON_VALUE;
+ }
+ code = modp->code;
+ num_functions = code[MI_NUM_FUNCTIONS];
+ hp = HAlloc(p, 5*num_functions);
+ for (i = num_functions-1; i >= 0 ; i--) {
+ Eterm* func_info = (Eterm *) code[MI_FUNCTIONS+i];
+ Eterm name = func_info[3];
+ int arity = func_info[4];
+ Eterm tuple;
+
+ ASSERT(is_atom(name));
+ tuple = TUPLE2(hp, name, make_small(arity));
+ hp += 3;
+ result = CONS(hp, tuple, result);
+ hp += 2;
+ }
+ return result;
+}
+
+/*
+ * Builds a list of all functions including native addresses.
+ * [{Name,Arity,NativeAddress},...]
+ *
+ * Returns a tagged term, or 0 on error.
+ */
+
+static Eterm
+native_addresses(Process* p, Eterm mod)
+{
+ Module* modp;
+ Eterm* code;
+ int i;
+ Eterm* hp;
+ Uint num_functions;
+ Uint need;
+ Eterm* hp_end;
+ Eterm result = NIL;
+
+ if (is_not_atom(mod)) {
+ return THE_NON_VALUE;
+ }
+
+ modp = erts_get_module(mod);
+ if (modp == NULL) {
+ return THE_NON_VALUE;
+ }
+
+ code = modp->code;
+ num_functions = code[MI_NUM_FUNCTIONS];
+ need = (6+BIG_UINT_HEAP_SIZE)*num_functions;
+ hp = HAlloc(p, need);
+ hp_end = hp + need;
+ for (i = num_functions-1; i >= 0 ; i--) {
+ Eterm* func_info = (Eterm *) code[MI_FUNCTIONS+i];
+ Eterm name = func_info[3];
+ int arity = func_info[4];
+ Eterm tuple;
+
+ ASSERT(is_atom(name));
+ if (func_info[1] != 0) {
+ Eterm addr = erts_bld_uint(&hp, NULL, func_info[1]);
+ tuple = erts_bld_tuple(&hp, NULL, 3, name, make_small(arity), addr);
+ result = erts_bld_cons(&hp, NULL, tuple, result);
+ }
+ }
+ HRelease(p, hp_end, hp);
+ return result;
+}
+
+
+/*
+ * Builds a list of all exported functions in the given module:
+ * [{Name, Arity},...]
+ *
+ * Returns a tagged term, or 0 on error.
+ */
+
+Eterm
+exported_from_module(Process* p, /* Process whose heap to use. */
+ Eterm mod) /* Tagged atom for module. */
+{
+ int i;
+ Eterm* hp = NULL;
+ Eterm* hend = NULL;
+ Eterm result = NIL;
+
+ if (is_not_atom(mod)) {
+ return THE_NON_VALUE;
+ }
+
+ for (i = 0; i < export_list_size(); i++) {
+ Export* ep = export_list(i);
+
+ if (ep->code[0] == mod) {
+ Eterm tuple;
+
+ if (ep->address == ep->code+3 &&
+ ep->code[3] == (Eterm) em_call_error_handler) {
+ /* There is a call to the function, but it does not exist. */
+ continue;
+ }
+
+ if (hp == hend) {
+ int need = 10 * 5;
+ hp = HAlloc(p, need);
+ hend = hp + need;
+ }
+ tuple = TUPLE2(hp, ep->code[1], make_small(ep->code[2]));
+ hp += 3;
+ result = CONS(hp, tuple, result);
+ hp += 2;
+ }
+ }
+ HRelease(p,hend,hp);
+ return result;
+}
+
+
+/*
+ * Returns a list of all attributes for the module.
+ *
+ * Returns a tagged term, or 0 on error.
+ */
+
+Eterm
+attributes_for_module(Process* p, /* Process whose heap to use. */
+ Eterm mod) /* Tagged atom for module. */
+
+{
+ Module* modp;
+ Eterm* code;
+ Eterm* hp;
+ byte* ext;
+ Eterm result = NIL;
+ Eterm* end;
+
+ if (is_not_atom(mod) || (is_not_list(result) && is_not_nil(result))) {
+ return THE_NON_VALUE;
+ }
+
+ modp = erts_get_module(mod);
+ if (modp == NULL) {
+ return THE_NON_VALUE;
+ }
+ code = modp->code;
+ ext = (byte *) code[MI_ATTR_PTR];
+ if (ext != NULL) {
+ hp = HAlloc(p, code[MI_ATTR_SIZE_ON_HEAP]);
+ end = hp + code[MI_ATTR_SIZE_ON_HEAP];
+ result = erts_decode_ext(&hp, &MSO(p), &ext);
+ if (is_value(result)) {
+ ASSERT(hp <= end);
+ }
+ HRelease(p,end,hp);
+ }
+ return result;
+}
+
+
+/*
+ * Returns a list containing compilation information.
+ *
+ * Returns a tagged term, or 0 on error.
+ */
+
+Eterm
+compilation_info_for_module(Process* p, /* Process whose heap to use. */
+ Eterm mod) /* Tagged atom for module. */
+{
+ Module* modp;
+ Eterm* code;
+ Eterm* hp;
+ byte* ext;
+ Eterm result = NIL;
+ Eterm* end;
+
+ if (is_not_atom(mod) || (is_not_list(result) && is_not_nil(result))) {
+ return THE_NON_VALUE;
+ }
+
+ modp = erts_get_module(mod);
+ if (modp == NULL) {
+ return THE_NON_VALUE;
+ }
+ code = modp->code;
+ ext = (byte *) code[MI_COMPILE_PTR];
+ if (ext != NULL) {
+ hp = HAlloc(p, code[MI_COMPILE_SIZE_ON_HEAP]);
+ end = hp + code[MI_COMPILE_SIZE_ON_HEAP];
+ result = erts_decode_ext(&hp, &MSO(p), &ext);
+ if (is_value(result)) {
+ ASSERT(hp <= end);
+ }
+ HRelease(p,end,hp);
+ }
+ return result;
+}
+
+
+/*
+ * Returns a pointer to {module, function, arity}, or NULL if not found.
+ */
+Eterm*
+find_function_from_pc(Eterm* pc)
+{
+ Range* low = modules;
+ Range* high = low + num_loaded_modules;
+ Range* mid = mid_module;
+
+ while (low < high) {
+ if (pc < mid->start) {
+ high = mid;
+ } else if (pc > mid->end) {
+ low = mid + 1;
+ } else {
+ Eterm** low1 = (Eterm **) (mid->start + MI_FUNCTIONS);
+ Eterm** high1 = low1 + mid->start[MI_NUM_FUNCTIONS];
+ Eterm** mid1;
+
+ while (low1 < high1) {
+ mid1 = low1 + (high1-low1) / 2;
+ if (pc < mid1[0]) {
+ high1 = mid1;
+ } else if (pc < mid1[1]) {
+ mid_module = mid;
+ return mid1[0]+2;
+ } else {
+ low1 = mid1 + 1;
+ }
+ }
+ return NULL;
+ }
+ mid = low + (high-low) / 2;
+ }
+ return NULL;
+}
+
+/*
+ * Read a specific chunk from a Beam binary.
+ */
+
+Eterm
+code_get_chunk_2(Process* p, Eterm Bin, Eterm Chunk)
+{
+ LoaderState state;
+ Uint chunk = 0;
+ ErlSubBin* sb;
+ Uint offset;
+ Uint bitoffs;
+ Uint bitsize;
+ byte* start;
+ int i;
+ Eterm res;
+ Eterm real_bin;
+ byte* temp_alloc = NULL;
+
+ if ((start = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
+ error:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(p, BADARG);
+ }
+ state.module = THE_NON_VALUE; /* Suppress diagnostiscs */
+ state.file_name = "IFF header for Beam file";
+ state.file_p = start;
+ state.file_left = binary_size(Bin);
+ for (i = 0; i < 4; i++) {
+ Eterm* chunkp;
+ Eterm num;
+ if (is_not_list(Chunk)) {
+ goto error;
+ }
+ chunkp = list_val(Chunk);
+ num = CAR(chunkp);
+ Chunk = CDR(chunkp);
+ if (!is_byte(num)) {
+ goto error;
+ }
+ chunk = chunk << 8 | unsigned_val(num);
+ }
+ if (is_not_nil(Chunk)) {
+ goto error;
+ }
+ if (!scan_iff_file(&state, &chunk, 1, 1)) {
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return am_undefined;
+ }
+ ERTS_GET_REAL_BIN(Bin, real_bin, offset, bitoffs, bitsize);
+ if (bitoffs) {
+ res = new_binary(p, state.chunks[0].start, state.chunks[0].size);
+ } else {
+ sb = (ErlSubBin *) HAlloc(p, ERL_SUB_BIN_SIZE);
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->orig = real_bin;
+ sb->size = state.chunks[0].size;
+ sb->bitsize = 0;
+ sb->bitoffs = 0;
+ sb->offs = offset + (state.chunks[0].start - start);
+ sb->is_writable = 0;
+ res = make_binary(sb);
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return res;
+}
+
+/*
+ * Calculate the MD5 for a module.
+ */
+
+Eterm
+code_module_md5_1(Process* p, Eterm Bin)
+{
+ LoaderState state;
+ byte* temp_alloc = NULL;
+
+ if ((state.file_p = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
+ BIF_ERROR(p, BADARG);
+ }
+ state.module = THE_NON_VALUE; /* Suppress diagnostiscs */
+ state.file_name = "IFF header for Beam file";
+ state.file_left = binary_size(Bin);
+
+ if (!scan_iff_file(&state, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
+ return am_undefined;
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return new_binary(p, state.mod_md5, sizeof(state.mod_md5));
+}
+
+#define WORDS_PER_FUNCTION 6
+
+static Eterm*
+make_stub(Eterm* fp, Eterm mod, Eterm func, Uint arity, Uint native, Eterm OpCode)
+{
+ fp[0] = (Eterm) BeamOp(op_i_func_info_IaaI);
+ fp[1] = native;
+ fp[2] = mod;
+ fp[3] = func;
+ fp[4] = arity;
+#ifdef HIPE
+ if (native) {
+ fp[5] = BeamOpCode(op_move_return_nr);
+ hipe_mfa_save_orig_beam_op(mod, func, arity, fp+5);
+ }
+#endif
+ fp[5] = OpCode;
+ return fp + WORDS_PER_FUNCTION;
+}
+
+static byte*
+stub_copy_info(LoaderState* stp,
+ int chunk, /* Chunk: ATTR_CHUNK or COMPILE_CHUNK */
+ byte* info, /* Where to store info. */
+ Eterm* ptr_word, /* Where to store pointer into info. */
+ Eterm* size_word) /* Where to store size of info. */
+{
+ Sint decoded_size;
+ Uint size = stp->chunks[chunk].size;
+ if (size != 0) {
+ memcpy(info, stp->chunks[chunk].start, size);
+ *ptr_word = (Eterm) info;
+ decoded_size = erts_decode_ext_size(info, size, 0);
+ if (decoded_size < 0) {
+ return 0;
+ }
+ *size_word = decoded_size;
+ }
+ return info + size;
+}
+
+static int
+stub_read_export_table(LoaderState* stp)
+{
+ int i;
+
+ GetInt(stp, 4, stp->num_exps);
+ if (stp->num_exps > stp->num_functions) {
+ LoadError2(stp, "%d functions exported; only %d functions defined",
+ stp->num_exps, stp->num_functions);
+ }
+ stp->export
+ = (ExportEntry *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->num_exps * sizeof(ExportEntry));
+
+ for (i = 0; i < stp->num_exps; i++) {
+ Uint n;
+
+ GetInt(stp, 4, n);
+ GetAtom(stp, n, stp->export[i].function);
+ GetInt(stp, 4, n);
+ if (n > MAX_REG) {
+ LoadError2(stp, "export table entry %d: absurdly high arity %d", i, n);
+ }
+ stp->export[i].arity = n;
+ GetInt(stp, 4, n); /* Ignore label */
+ }
+ return 1;
+
+ load_error:
+ return 0;
+}
+
+static void
+stub_final_touch(LoaderState* stp, Eterm* fp)
+{
+ int i;
+ int n = stp->num_exps;
+ Eterm function = fp[3];
+ int arity = fp[4];
+#ifdef HIPE
+ Lambda* lp;
+#endif
+
+ /*
+ * Test if the function should be exported.
+ */
+
+ for (i = 0; i < n; i++) {
+ if (stp->export[i].function == function && stp->export[i].arity == arity) {
+ Export* ep = erts_export_put(fp[2], function, arity);
+ ep->address = fp+5;
+ return;
+ }
+ }
+
+ /*
+ * Must be a plain local function or a lambda local function.
+ * Search the lambda table to find out which.
+ */
+
+#ifdef HIPE
+ n = stp->num_lambdas;
+ for (i = 0, lp = stp->lambdas; i < n; i++, lp++) {
+ ErlFunEntry* fe = stp->lambdas[i].fe;
+ if (lp->function == function && lp->arity == arity) {
+ fp[5] = (Eterm) BeamOpCode(op_hipe_trap_call_closure);
+ fe->address = &(fp[5]);
+ }
+ }
+#endif
+ return;
+}
+
+
+/* Takes an erlang list of addresses:
+ [{Adr, Patchtyppe} | Addresses]
+ and the address of a fun_entry.
+*/
+int
+patch(Eterm Addresses, Uint fe)
+ {
+#ifdef HIPE
+ Eterm* listp;
+ Eterm tuple;
+ Eterm* tp;
+ Eterm patchtype;
+ Uint AddressToPatch;
+
+ while (!is_nil(Addresses)) {
+ listp = list_val(Addresses);
+
+ tuple = CAR(listp);
+ if (is_not_tuple(tuple)) {
+ return 0; /* Signal error */
+ }
+
+ tp = tuple_val(tuple);
+ if (tp[0] != make_arityval(2)) {
+ return 0; /* Signal error */
+ }
+
+ if(term_to_Uint(tp[1], &AddressToPatch) == 0) {
+ return 0; /* Signal error */
+ }
+
+ patchtype = tp[2];
+ if (is_not_atom(patchtype)) {
+ return 0; /* Signal error */
+ }
+
+ hipe_patch_address((Uint *)AddressToPatch, patchtype, fe);
+
+ Addresses = CDR(listp);
+
+
+ }
+
+#endif
+ return 1;
+}
+
+
+int
+patch_funentries(Eterm Patchlist)
+ {
+#ifdef HIPE
+ while (!is_nil(Patchlist)) {
+ Eterm Info;
+ Eterm MFA;
+ Eterm Addresses;
+ Eterm tuple;
+ Eterm Mod;
+ Eterm* listp;
+ Eterm* tp;
+ ErlFunEntry* fe;
+ Uint index;
+ Uint uniq;
+ Uint native_address;
+
+ listp = list_val(Patchlist);
+ tuple = CAR(listp);
+ Patchlist = CDR(listp);
+
+ if (is_not_tuple(tuple)) {
+ return 0; /* Signal error */
+ }
+
+ tp = tuple_val(tuple);
+ if (tp[0] != make_arityval(3)) {
+ return 0; /* Signal error */
+ }
+
+ Info = tp[1];
+ if (is_not_tuple(Info)) {
+ return 0; /* Signal error */
+ }
+ Addresses = tp[2];
+ if (is_not_list(Addresses)) {
+ return 0; /* Signal error */
+ }
+
+ if(term_to_Uint(tp[3], &native_address) == 0) {
+ return 0; /* Signal error */
+ }
+
+
+
+ tp = tuple_val(Info);
+ if (tp[0] != make_arityval(3)) {
+ return 0; /* Signal error */
+ }
+ MFA = tp[1];
+ if (is_not_tuple(MFA)) {
+ return 0; /* Signal error */
+ }
+ if(term_to_Uint(tp[2], &uniq) == 0){
+ return 0; /* Signal error */
+ }
+ if(term_to_Uint(tp[3], &index) == 0) {
+ return 0; /* Signal error */
+ }
+
+
+
+
+ tp = tuple_val(MFA);
+ if (tp[0] != make_arityval(3)) {
+ return 0; /* Signal error */
+ }
+ Mod = tp[1];
+ if (is_not_atom(Mod)) {
+ return 0; /* Signal error */
+ }
+
+
+
+ fe = erts_get_fun_entry(Mod, uniq, index);
+ fe->native_address = (Uint *)native_address;
+ erts_refc_dec(&fe->refc, 1);
+
+ if (!patch(Addresses, (Uint) fe))
+ return 0;
+
+ }
+#endif
+ return 1; /* Signal that all went well */
+}
+
+
+/*
+ * Do a dummy load of a module. No threaded code will be loaded.
+ * Used for loading native code.
+ * Will also patch all references to fun_entries to point to
+ * the new fun_entries created.
+ */
+
+Eterm
+erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
+{
+ LoaderState state;
+ Eterm Funcs;
+ Eterm Patchlist;
+ Eterm* tp;
+ Eterm* code = NULL;
+ Eterm* ptrs;
+ Eterm* fp;
+ byte* info;
+ Uint ci;
+ int n;
+ int code_size;
+ int rval;
+ int i;
+ ErlDrvBinary* bin = NULL;
+ byte* temp_alloc = NULL;
+ byte* bytes;
+ Uint size;
+
+ /*
+ * Must initialize state.lambdas here because the error handling code
+ * at label 'error' uses it.
+ */
+ init_state(&state);
+
+ if (is_not_atom(Mod)) {
+ goto error;
+ }
+ if (is_not_tuple(Info)) {
+ goto error;
+ }
+ tp = tuple_val(Info);
+ if (tp[0] != make_arityval(2)) {
+ goto error;
+ }
+ Funcs = tp[1];
+ Patchlist = tp[2];
+
+ if ((n = list_length(Funcs)) < 0) {
+ goto error;
+ }
+ if ((bytes = erts_get_aligned_binary_bytes(Beam, &temp_alloc)) == NULL) {
+ goto error;
+ }
+ size = binary_size(Beam);
+
+ /*
+ * Uncompressed if needed.
+ */
+ if (!(size >= 4 && bytes[0] == 'F' && bytes[1] == 'O' &&
+ bytes[2] == 'R' && bytes[3] == '1')) {
+ bin = (ErlDrvBinary *) erts_gzinflate_buffer((char*)bytes, size);
+ if (bin == NULL) {
+ goto error;
+ }
+ bytes = (byte*)bin->orig_bytes;
+ size = bin->orig_size;
+ }
+
+ /*
+ * Scan the Beam binary and read the interesting sections.
+ */
+
+ state.file_name = "IFF header for Beam file";
+ state.file_p = bytes;
+ state.file_left = size;
+ state.module = Mod;
+ state.group_leader = p->group_leader;
+ state.num_functions = n;
+ if (!scan_iff_file(&state, chunk_types, NUM_CHUNK_TYPES, NUM_MANDATORY)) {
+ goto error;
+ }
+ define_file(&state, "code chunk header", CODE_CHUNK);
+ if (!read_code_header(&state)) {
+ goto error;
+ }
+ define_file(&state, "atom table", ATOM_CHUNK);
+ if (!load_atom_table(&state)) {
+ goto error;
+ }
+ define_file(&state, "export table", EXP_CHUNK);
+ if (!stub_read_export_table(&state)) {
+ goto error;
+ }
+
+ if (state.chunks[LAMBDA_CHUNK].size > 0) {
+ define_file(&state, "lambda (fun) table", LAMBDA_CHUNK);
+ if (!read_lambda_table(&state)) {
+ goto error;
+ }
+ }
+
+ /*
+ * Allocate memory for the stub module.
+ */
+
+ code_size = ((WORDS_PER_FUNCTION+1)*n + MI_FUNCTIONS + 2) * sizeof(Eterm);
+ code_size += state.chunks[ATTR_CHUNK].size;
+ code_size += state.chunks[COMPILE_CHUNK].size;
+ code = erts_alloc_fnf(ERTS_ALC_T_CODE, code_size);
+ if (!code) {
+ goto error;
+ }
+
+ /*
+ * Initialize code area.
+ */
+
+ code[MI_NUM_FUNCTIONS] = n;
+ code[MI_ATTR_PTR] = 0;
+ code[MI_ATTR_SIZE_ON_HEAP] = 0;
+ code[MI_COMPILE_PTR] = 0;
+ code[MI_COMPILE_SIZE_ON_HEAP] = 0;
+ code[MI_NUM_BREAKPOINTS] = 0;
+ ci = MI_FUNCTIONS + n + 1;
+
+ /*
+ * Make stubs for all functions.
+ */
+
+ ptrs = code + MI_FUNCTIONS;
+ fp = code + ci;
+ for (i = 0; i < n; i++) {
+ Eterm* listp;
+ Eterm tuple;
+ Eterm* tp;
+ Eterm func;
+ Eterm arity_term;
+ Uint arity;
+ Uint native_address;
+ Eterm op;
+
+ if (is_nil(Funcs)) {
+ break;
+ }
+ listp = list_val(Funcs);
+ tuple = CAR(listp);
+ Funcs = CDR(listp);
+
+ /* Error checking */
+ if (is_not_tuple(tuple)) {
+ goto error;
+ }
+ tp = tuple_val(tuple);
+ if (tp[0] != make_arityval(3)) {
+ goto error;
+ }
+ func = tp[1];
+ arity_term = tp[2];
+ if (is_not_atom(func) || is_not_small(arity_term)) {
+ goto error;
+ }
+ arity = signed_val(arity_term);
+ if (arity < 0) {
+ goto error;
+ }
+ if (term_to_Uint(tp[3], &native_address) == 0) {
+ goto error;
+ }
+
+ /*
+ * Set the pointer and make the stub. Put a return instruction
+ * as the body until we know what kind of trap we should put there.
+ */
+ ptrs[i] = (Eterm) fp;
+#ifdef HIPE
+ op = (Eterm) BeamOpCode(op_hipe_trap_call); /* Might be changed later. */
+#else
+ op = (Eterm) BeamOpCode(op_move_return_nr);
+#endif
+ fp = make_stub(fp, Mod, func, arity, (Uint)native_address, op);
+ }
+
+ /*
+ * Insert the last pointer and the int_code_end instruction.
+ */
+
+ ptrs[i] = (Eterm) fp;
+ *fp++ = (Eterm) BeamOp(op_int_code_end);
+
+ /*
+ * Copy attributes and compilation information.
+ */
+
+ info = (byte *) fp;
+ info = stub_copy_info(&state, ATTR_CHUNK, info,
+ code+MI_ATTR_PTR, code+MI_ATTR_SIZE_ON_HEAP);
+ if (info == NULL) {
+ goto error;
+ }
+ info = stub_copy_info(&state, COMPILE_CHUNK, info,
+ code+MI_COMPILE_PTR, code+MI_COMPILE_SIZE_ON_HEAP);
+ if (info == NULL) {
+ goto error;
+ }
+
+ /*
+ * Insert the module in the module table.
+ */
+
+ rval = insert_new_code(p, 0, p->group_leader, Mod, code, code_size,
+ BEAM_CATCHES_NIL);
+ if (rval < 0) {
+ goto error;
+ }
+
+ /*
+ * Export all stub functions and insert the correct type of HiPE trap.
+ */
+
+ fp = code + ci;
+ for (i = 0; i < n; i++) {
+ stub_final_touch(&state, fp);
+ fp += WORDS_PER_FUNCTION;
+ }
+
+ if (patch_funentries(Patchlist)) {
+ erts_free_aligned_binary_bytes(temp_alloc);
+ if (state.lambdas != state.def_lambdas) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.lambdas);
+ }
+ if (bin != NULL) {
+ driver_free_binary(bin);
+ }
+ return Mod;
+ }
+
+ error:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ if (code != NULL) {
+ erts_free(ERTS_ALC_T_CODE, code);
+ }
+ if (state.lambdas != state.def_lambdas) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) state.lambdas);
+ }
+ if (bin != NULL) {
+ driver_free_binary(bin);
+ }
+
+
+ BIF_ERROR(p, BADARG);
+}
+
+#undef WORDS_PER_FUNCTION
+
+static int safe_mul(Uint a, Uint b, Uint* resp)
+{
+ Uint res = a * b;
+ *resp = res;
+
+ if (b == 0) {
+ return 1;
+ } else {
+ return (res / b) == a;
+ }
+}
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
new file mode 100644
index 0000000000..c17844a553
--- /dev/null
+++ b/erts/emulator/beam/beam_load.h
@@ -0,0 +1,120 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef _BEAM_LOAD_H
+# define _BEAM_LOAD_H
+
+#include "beam_opcodes.h"
+#include "erl_process.h"
+
+int beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module);
+
+typedef struct gen_op_entry {
+ char* name;
+ int arity;
+ int specific;
+ int num_specific;
+ int transform;
+ int min_window;
+} GenOpEntry;
+
+extern GenOpEntry gen_opc[];
+
+#ifdef NO_JUMP_TABLE
+#define BeamOp(Op) (Op)
+#else
+extern void** beam_ops;
+#define BeamOp(Op) beam_ops[(Op)]
+#endif
+
+
+extern Eterm beam_debug_apply[];
+extern Eterm* em_call_error_handler;
+extern Eterm* em_apply_bif;
+extern Eterm* em_call_traced_function;
+typedef struct {
+ Eterm* start; /* Pointer to start of module. */
+ Eterm* end; /* Points one word beyond last function in module. */
+} Range;
+
+/*
+ * The following variables keep a sorted list of address ranges for
+ * each module. It allows us to quickly find a function given an
+ * instruction pointer.
+ */
+
+extern Range* modules;
+extern int num_loaded_modules;
+extern int allocated_modules;
+extern Range* mid_module;
+
+/* Total code size in bytes */
+extern Uint erts_total_code_size;
+/*
+ * Index into start of code chunks which contains additional information
+ * about the loaded module.
+ *
+ * First number of functions.
+ */
+
+#define MI_NUM_FUNCTIONS 0
+
+/*
+ * The attributes retrieved by Mod:module_info(attributes).
+ */
+
+#define MI_ATTR_PTR 1
+#define MI_ATTR_SIZE 2
+#define MI_ATTR_SIZE_ON_HEAP 3
+
+/*
+ * The compilation information retrieved by Mod:module_info(compile).
+ */
+
+#define MI_COMPILE_PTR 4
+#define MI_COMPILE_SIZE 5
+#define MI_COMPILE_SIZE_ON_HEAP 6
+
+/*
+ * Number of breakpoints in module is stored in this word
+ */
+#define MI_NUM_BREAKPOINTS 7
+
+/*
+ * Literal area (constant pool).
+ */
+#define MI_LITERALS_START 8
+#define MI_LITERALS_END 9
+
+/*
+ * Pointer to the on_load function (or NULL if none).
+ */
+#define MI_ON_LOAD_FUNCTION_PTR 10
+
+/*
+ * Start of function pointer table. This table contains pointers to
+ * all functions in the module plus an additional pointer just beyond
+ * the end of the last function.
+ *
+ * The actual loaded code (for the first function) start just beyond
+ * this table.
+ */
+
+#define MI_FUNCTIONS 11
+#endif /* _BEAM_LOAD_H */
diff --git a/erts/emulator/beam/benchmark.c b/erts/emulator/beam/benchmark.c
new file mode 100644
index 0000000000..7fbf44a03c
--- /dev/null
+++ b/erts/emulator/beam/benchmark.c
@@ -0,0 +1,395 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+#include "benchmark.h"
+
+#ifdef BM_COUNTERS
+unsigned long long processes_busy;
+unsigned long long processes_spawned;
+unsigned long long messages_sent;
+unsigned long long messages_copied;
+unsigned long long messages_ego;
+unsigned long long minor_gc;
+unsigned long long major_gc;
+#ifdef HYBRID
+unsigned long long minor_global_gc;
+unsigned long long major_global_gc;
+unsigned long long gc_in_copy;
+#ifdef INCREMENTAL
+unsigned long long minor_gc_cycles;
+unsigned long long major_gc_cycles;
+unsigned long long minor_gc_stages;
+unsigned long long major_gc_stages;
+#endif
+#endif
+#endif /* BM_COUNTERS */
+
+#ifdef BM_TIMERS
+
+#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
+
+#include "libperfctr.h"
+struct vperfctr *system_clock;
+double cpu_khz;
+BM_NEW_TIMER(start);
+
+static double get_hrvtime(void)
+{
+ unsigned long long ticks;
+ double milli_seconds;
+
+ ticks = vperfctr_read_tsc(system_clock);
+ milli_seconds = (double)ticks / cpu_khz;
+ return milli_seconds;
+}
+
+static void stop_hrvtime(void)
+{
+ if(system_clock)
+ {
+ vperfctr_stop(system_clock);
+ vperfctr_close(system_clock);
+ system_clock = NULL;
+ }
+}
+
+#else /* not perfctr, asuming Solaris */
+#include <time.h>
+BM_TIMER_T system_clock;
+#endif
+
+unsigned long local_pause_times[MAX_PAUSE_TIME];
+unsigned long pause_times[MAX_PAUSE_TIME];
+unsigned long pause_times_old[MAX_PAUSE_TIME];
+
+BM_TIMER_T mmu;
+BM_TIMER_T mmu_counter;
+
+BM_NEW_TIMER(timer);
+BM_NEW_TIMER(system);
+BM_NEW_TIMER(gc);
+BM_NEW_TIMER(minor_gc);
+BM_NEW_TIMER(major_gc);
+BM_NEW_TIMER(minor_global_gc);
+BM_NEW_TIMER(major_global_gc);
+BM_NEW_TIMER(send);
+BM_NEW_TIMER(copy);
+BM_NEW_TIMER(size);
+BM_NEW_TIMER(max_minor);
+BM_NEW_TIMER(max_major);
+BM_NEW_TIMER(max_global_minor);
+BM_NEW_TIMER(max_global_major);
+BM_NEW_TIMER(misc0);
+BM_NEW_TIMER(misc1);
+BM_NEW_TIMER(misc2);
+#endif /* BM_TIMERS */
+
+#ifdef BM_HEAP_SIZES
+unsigned long long max_used_heap;
+unsigned long long max_allocated_heap;
+unsigned long long max_used_global_heap;
+unsigned long long max_allocated_global_heap;
+#endif /* BM_HEAP_SIZES */
+
+#ifdef BM_MESSAGE_SIZES
+unsigned long long words_sent;
+unsigned long long words_copied;
+unsigned long long words_prealloc;
+unsigned long long message_sizes[1000];
+#endif /* BM_MESSAGE_SIZES */
+
+/*****
+ * The following functions have to be defined, but they only have contents
+ * if certain keywords are defined.
+ */
+
+void init_benchmarking()
+{
+#ifdef BM_TIMERS
+#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
+ /* pass `--with-perfctr=/path/to/perfctr' when configuring */
+ struct perfctr_info info;
+ struct vperfctr_control control;
+ int i;
+
+ system_clock = vperfctr_open();
+ if (system_clock != NULL)
+ {
+ if (vperfctr_info(system_clock,&info) >= 0)
+ {
+ cpu_khz = (double)info.cpu_khz;
+ if (info.cpu_features & PERFCTR_FEATURE_RDTSC)
+ {
+ memset(&control,0,sizeof control);
+ control.cpu_control.tsc_on = 1;
+ }
+ }
+ if (vperfctr_control(system_clock,&control) < 0)
+ {
+ vperfctr_close(system_clock);
+ system_clock = NULL;
+ }
+ }
+
+ for (i = 0; i < 1000; i++)
+ {
+ BM_START_TIMER(system);
+ BM_STOP_TIMER(system);
+ }
+
+ timer_time = system_time / 1000;
+ start_time = 0;
+#else
+ int i;
+ for (i = 0; i < 1000; i++)
+ {
+ BM_START_TIMER(system);
+ BM_STOP_TIMER(system);
+ }
+ timer_time = system_time / 1000;
+#endif
+
+ for (i = 0; i < MAX_PAUSE_TIME; i++) {
+ local_pause_times[i] = 0;
+ pause_times[i] = 0;
+ pause_times_old[i] = 0;
+ }
+
+ mmu = 0;
+ mmu_counter = 0;
+
+ BM_MMU_INIT();
+#endif /* BM_TIMERS */
+
+#ifdef BM_COUNTERS
+ processes_busy = 0;
+ processes_spawned = 0;
+ messages_sent = 0;
+ messages_copied = 0;
+ messages_ego = 0;
+ minor_gc = 0;
+ major_gc = 0;
+#ifdef HYBRID
+ minor_global_gc = 0;
+ major_global_gc = 0;
+ gc_in_copy = 0;
+#ifdef INCREMENTAL
+ minor_gc_cycles = 0;
+ major_gc_cycles = 0;
+ minor_gc_stages = 0;
+ major_gc_stages = 0;
+#endif
+#endif
+#endif /* BM_COUNTERS */
+
+#ifdef BM_HEAP_SIZES
+ max_used_heap = 0;
+ max_allocated_heap = 0;
+ max_used_global_heap = 0;
+ max_allocated_global_heap = 0;
+#endif /* BM_HEAP_SIZES */
+
+#ifdef BM_MESSAGE_SIZES
+ words_sent = 0;
+ words_copied = 0;
+ words_prealloc = 0;
+ {
+ int i;
+ for (i = 0; i < 1000; i++)
+ message_sizes[i] = 0;
+ }
+#endif /* BM_MESSAGE_SIZES */
+}
+
+void save_statistics()
+{
+#ifdef BM_STATISTICS
+ FILE *file = fopen(BM_STATISTICS_FILE,"a");
+ long i = 0;
+
+ if (file)
+ {
+ erts_fprintf(file,"-------------------------------------------------------------------------\n");
+ erts_fprintf(file,"The counters are reset at system start and are sums over the entire node.\n");
+ erts_fprintf(file,"You may reset them manually using the BIFs in the module hipe_bifs.\n");
+ erts_fprintf(file,"All times are given in milliseconds.\n");
+ erts_fprintf(file,"-------------------------------------------------------------------------\n");
+
+ erts_fprintf(file,"Node: %T\n",erts_this_node->sysname);
+
+#ifdef BM_COUNTERS
+ erts_fprintf(file,"Number of processes spawned: %lld\n",processes_spawned);
+ erts_fprintf(file,"Number of local minor GCs: %lld\n",minor_gc);
+ erts_fprintf(file,"Number of local major GCs: %lld\n",major_gc);
+#ifdef HYBRID
+ erts_fprintf(file,"Number of global minor GCs: %lld\n",minor_global_gc);
+ erts_fprintf(file,"Number of global major GCs: %lld\n",major_global_gc);
+#ifdef INCREMENTAL
+ erts_fprintf(file,"Number of minor GC-cycles: %lld\n",minor_gc_cycles);
+ erts_fprintf(file,"Number of major GC-cycles: %lld\n",major_gc_cycles);
+ erts_fprintf(file,"Number of minor GC-stages: %lld\n",minor_gc_stages);
+ erts_fprintf(file,"Number of major GC-stages: %lld\n",major_gc_stages);
+#endif
+#endif
+ erts_fprintf(file,"Number of messages sent: %lld\n",messages_sent);
+ erts_fprintf(file,"Number of messages copied: %lld\n",messages_copied);
+ erts_fprintf(file,"Number of messages sent to self: %lld\n",messages_ego);
+#endif /* BM_COUNTERS */
+
+#ifdef BM_MESSAGE_SIZES
+ erts_fprintf(file,"Number of words sent: %lld\n",words_sent);
+ erts_fprintf(file,"Number of words copied: %lld\n",words_copied);
+ erts_fprintf(file,"Number of words preallocated: %lld\n",words_prealloc);
+#endif /* BM_MESSAGE_SIZES */
+
+#ifdef BM_HEAP_SIZES
+ erts_fprintf(file,"Biggest local heap used (in words): %lld\n",max_used_heap);
+ erts_fprintf(file,"Biggest local heap allocated (in words): %lld\n",max_allocated_heap);
+ erts_fprintf(file,"Biggest global heap used (in words): %lld\n",max_used_global_heap);
+ erts_fprintf(file,"Biggest global heap allocated (in words): %lld\n",max_allocated_global_heap);
+#endif /* BM_HEAP_SIZES */
+
+#ifdef BM_TIMERS
+ erts_fprintf(file,"--- The total active system time is the sum of all times below ---\n");
+ BM_TIME_PRINTER("Mutator time",system_time);
+ BM_TIME_PRINTER("Time spent in send (excluding size & copy)",send_time);
+ BM_TIME_PRINTER("Time spent in size",size_time);
+ BM_TIME_PRINTER("Time spent in copy",copy_time);
+ BM_TIME_PRINTER("Time spent in local minor GC",minor_gc_time);
+ BM_TIME_PRINTER("Time spent in local major GC",major_gc_time);
+ BM_TIME_PRINTER("Time spent in global minor GC",minor_global_gc_time);
+ BM_TIME_PRINTER("Time spent in global major GC",major_global_gc_time);
+ erts_fprintf(file,"---\n");
+ BM_TIME_PRINTER("Maximum time spent in one separate local minor GC",max_minor_time);
+ BM_TIME_PRINTER("Maximum time spent in one separate local major GC",max_major_time);
+ BM_TIME_PRINTER("Maximum time spent in one separate global minor GC",max_global_minor_time);
+ BM_TIME_PRINTER("Maximum time spent in one separate global major GC",max_global_major_time);
+#endif /* BM_TIMERS */
+
+#if 0
+ /* Save a log file for import into excel */
+
+ long long total_time, n;
+ long left, right, mid;
+
+#ifdef BM_COUNTERS
+ erts_fprintf(file,"Spawns\tLocalGC\tMAGC\tMessages\tMutator_t\tLocalGC_t\tMAGC_t\tLocMaxP\tLocMeanP\tLocGeoMP\tMAMaxP\tMAMeanP\tMAGeoMP\t\tCMAGC\tCMAGC_t\n");
+ erts_fprintf(file,"%lld\t%lld\t%lld\t%lld\t",
+ processes_spawned,
+ minor_garbage_cols + major_garbage_cols,
+ minor_global_garbage_cols + major_global_garbage_cols,
+ messages_sent);
+#endif /* BM_COUNTERS */
+
+#ifdef BM_TIMERS
+ erts_fprintf(file,"%lld\t%lld\t%lld\t",
+ (long long)(system_time + send_time + size_time + copy_time),
+ (long long)(minor_gc_time + major_gc_time),
+ (long long)(minor_global_gc_time + major_global_gc_time));
+
+ total_time = 0; n = 0;
+ left = 0; right = 0; mid = 0;
+ for (i = 0; i < MAX_PAUSE_TIME; i++) {
+ total_time += local_pause_times[i] * i;
+ n += local_pause_times[i];
+ if (i > mid)
+ right += local_pause_times[i];
+ while(right > left) {
+ left += local_pause_times[mid++];
+ right -= local_pause_times[mid];
+ }
+ }
+ erts_fprintf(file,"%lld\t%lld\t%ld\t",
+ (long long)((max_minor_time > max_major_time ?
+ max_minor_time :
+ max_major_time)*1000),
+ total_time / n,
+ mid);
+
+ total_time = 0; n = 0;
+ left = 0; right = 0; mid = 0;
+ for (i = 0; i < MAX_PAUSE_TIME; i++) {
+ if (pause_times[i] > 0) {
+ total_time += pause_times[i] * i;
+ n += pause_times[i];
+ if (i > mid)
+ right += pause_times[i];
+ while(right > left) {
+ left += pause_times[mid++];
+ right -= pause_times[mid];
+ }
+ }
+ }
+ erts_fprintf(file,"%lld\t%lld\t%ld\t",
+ (long long)((max_global_minor_time > max_global_major_time ?
+ max_global_minor_time :
+ max_global_major_time)*1000),
+ (n > 0 ? total_time / n : 0),
+ mid);
+
+ erts_fprintf(file,"\t%lld\t%lld\n",n,total_time);
+
+ erts_fprintf(file,"\nMinor:\n");
+ for (i = 0; i < MAX_PAUSE_TIME; i++) {
+ if (i < 1000 || pause_times[i] > 0) {
+ erts_fprintf(file,"%d\t%ld\n",i,pause_times[i]);
+ }
+ }
+
+ fprintf(file,"Major:\n");
+ for (i = 0; i < MAX_PAUSE_TIME; i++) {
+ if (pause_times_old[i] > 0) {
+ fprintf(file,"%d\t%ld\n",i,pause_times_old[i]);
+ }
+ }
+#endif /* BM_TIMERS */
+
+#ifdef BM_TIMERS
+ total_time = 0; n = 0;
+ left = 0; right = 0; mid = 0;
+ fprintf(file,"\nLocal:\n");
+ for (i = 0; i < MAX_PAUSE_TIME; i++) {
+ if (local_pause_times[i] > 0) {
+ erts_fprintf(file,"%d\t%ld\n",i,local_pause_times[i]);
+ total_time += local_pause_times[i] * i;
+ n += local_pause_times[i];
+ if (i > mid)
+ right += local_pause_times[i];
+ while(right > left) {
+ left += local_pause_times[mid++];
+ right -= local_pause_times[mid];
+ }
+ }
+ }
+ erts_fprintf(file,"Mid: %ld Mean: %ld\n",(long)mid,
+ (long)(n > 0 ? total_time / n : 0));
+#endif
+#endif /* 0 */
+ fclose(file);
+ }
+ else
+ fprintf(stderr,"Sorry... Can not write to %s!\n\r",BM_STATISTICS_FILE);
+#endif /* BM_STATISTICS */
+}
diff --git a/erts/emulator/beam/benchmark.h b/erts/emulator/beam/benchmark.h
new file mode 100644
index 0000000000..eedb06a1b6
--- /dev/null
+++ b/erts/emulator/beam/benchmark.h
@@ -0,0 +1,340 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __BENCHMARK_H__
+#define __BENCHMARK_H__
+
+/* The define __BENCHMARK__ is the master switch to turn on and off
+ * benchmarking. This will enable the benchmark-BIFs in hipe_bif1.c.
+ * Documentation for the BIFs is in hipe_bif1.c, and that is where you
+ * will find the information about how to accually get some data out
+ * from these timers and counters.
+ */
+/* #define __BENCHMARK__ */
+
+#ifdef __BENCHMARK__
+/*
+ * The defines below enables different parts of the benchmaring.
+ * Counters and timers that are disabled, always report zero in
+ * the BIFs.
+ */
+
+/* BM_TIMERS keeps track of the time spent in diferent parts of the
+ * system. It only measures accual active time, not time spent in idle
+ * mode. These timers requires hardware support. For Linux, use the
+ * package perfctr from user.it.uu.se/~mikpe/linux/perfctr. If this
+ * package is not specified when configuring the system
+ * (--with-perfctr=PATH), the Solaris hrtime_t will be used.
+ * To add new timers look below.
+ */
+#define BM_TIMERS
+
+/* BM_COUNTERS count all kinds of events that occurs in the system.
+ * Among other things it counts the number of messages, then number of
+ * garbage collections, the number of processes spawned etc.
+ * To add new counters look below.
+ */
+#define BM_COUNTERS
+
+/* BM_MESSAGE_SIZES keeps a log of the size of all messages sent in
+ * the system. This introduce an overhead in time for the shared heap
+ * system since all message sizes have to be calculated at send.
+ */
+/* #define BM_MESSAGE_SIZES */
+
+/* BM_HEAP_SIZES goes through all processes at garbage collection time
+ * to sum their allocated and used heap sizes. In anything else than a
+ * shared heap system, this will cost.
+ */
+/* #define BM_HEAP_SIZES */
+
+/* BM_STATISTICS saves an entry in the file BM_STATISTICS_FILE. This
+ * is done for each erlang node at exit time.
+ */
+/* #define BM_STATISTICS */
+
+#endif /* __BENCHMARK__ */
+
+
+#ifdef BM_STATISTICS
+# define BM_STATISTICS_FILE "/tmp/erlang_statistics.joppe.log"
+#endif /* BM_STATISTICS */
+
+
+/************ There are no more settings below this line *************/
+
+/*
+ * Maintenance and how to add new stuff is documented by the code
+ * below ;-)
+ */
+
+#ifdef BM_COUNTERS
+/*********************************************************************
+ * To add new counters:
+ *
+ * Add the variable here AND in benchmark.c. Use the macro
+ * BM_COUNT(var) in the code where you want to increase it.
+ *
+ */
+extern unsigned long long processes_busy;
+extern unsigned long long processes_spawned;
+extern unsigned long long messages_sent;
+extern unsigned long long messages_copied;
+extern unsigned long long messages_ego;
+extern unsigned long long minor_gc;
+extern unsigned long long major_gc;
+#ifdef HYBRID
+extern unsigned long long minor_global_gc;
+extern unsigned long long major_global_gc;
+extern unsigned long long gc_in_copy;
+#ifdef INCREMENTAL
+extern unsigned long long minor_gc_cycles;
+extern unsigned long long major_gc_cycles;
+extern unsigned long long minor_gc_stages;
+extern unsigned long long major_gc_stages;
+#endif
+#endif
+
+#define BM_COUNT(var) (var)++;
+
+#define BM_EGO_COUNT(send,rec) { \
+ if ((send) == (rec)) \
+ BM_COUNT(messages_ego); }
+
+#define BM_LAZY_COPY_START long long gcs = minor_global_gc + major_global_gc;
+#define BM_LAZY_COPY_STOP { gcs = (minor_global_gc + major_global_gc) - gcs; \
+ if (gcs > gc_in_copy) gc_in_copy = gcs; }
+
+#else /* !BM_COUNTERS */
+# define BM_COUNT(var)
+# define BM_EGO_COUNT(send,rec)
+# define BM_LAZY_COPY_START
+# define BM_LAZY_COPY_STOP
+#endif /* BM_COUNTERS */
+
+
+#ifdef BM_TIMERS
+/*********************************************************************
+ * To add new timers:
+ *
+ * Add the variable below using the form extern BM_TIMER_T blah_time.
+ * Also add them in benchmark.c using the macro NEW_TIMER(blah). Use
+ * the macro BM_SWAP_TIMER(from,blah) ... BM_SWAP_TIMER(blah,to) to
+ * start and stop the new timer. Note, that you have to know what
+ * timer is running at the place where you want to insert your new
+ * timer to be able to stop and start (from,to) it.
+ *
+ * You can use the macros BM_STOP_TIMER(blah) and BM_START_TIMER(blah)
+ * around code that should not be timed at all. As above, you have to
+ * know what timer to start and stop. The system timer is running at
+ * most places in the emulator. Only the garbage collector and the
+ * message sending has its own timers at the moment.
+ *
+ * The timer_time used when stopping timers is the time it takes to
+ * start and stop the timers, calculated in init_benchmarking(). If it
+ * is not there, the time it takes to do this will accually be
+ * substantial compared to some small times in the system we want to
+ * meassure (send time in shared heap for instance).
+ */
+
+#if (defined(__i386__) || defined(__x86_64__)) && USE_PERFCTR
+#include "libperfctr.h"
+
+#define BM_TIMER_T double
+
+extern struct vperfctr *system_clock;
+extern double cpu_khz;
+extern BM_TIMER_T start_time;
+
+#define BM_START_TIMER(t) start_time = \
+ (BM_TIMER_T)vperfctr_read_tsc(system_clock) / \
+ cpu_khz;
+
+#define BM_STOP_TIMER(t) do { \
+ BM_TIMER_T tmp = ((BM_TIMER_T)vperfctr_read_tsc(system_clock) / cpu_khz); \
+ tmp -= (start_time + timer_time); \
+ t##_time += (tmp > 0 ? tmp : 0); \
+} while(0)
+
+#define BM_TIME_PRINTER(str,time) do { \
+ int min,sec,milli,micro; \
+ BM_TIMER_T tmp = (time) * 1000; \
+ micro = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
+ tmp /= 1000; \
+ milli = (uint)(tmp - ((int)(tmp / 1000)) * 1000); \
+ tmp /= 1000; \
+ sec = (uint)(tmp - ((int)(tmp / 60)) * 60); \
+ min = (uint)tmp / 60; \
+ erts_fprintf(file,str": %d:%02d.%03d %03d\n",min,sec,milli,micro); \
+} while(0)
+
+#else /* !USE_PERFCTR (Assuming Solaris) */
+
+#define BM_TIMER_T hrtime_t
+#define BM_START_TIMER(t) system_clock = sys_gethrtime()
+#define BM_STOP_TIMER(t) do { \
+ BM_TIMER_T tmp = (sys_gethrtime() - system_clock) - timer_time; \
+ t##_time += (tmp > 0 ? tmp : 0); \
+} while(0)
+
+#define BM_TIME_PRINTER(str,time) do { \
+ int min,sec,milli,micro; \
+ BM_TIMER_T tmp; \
+ tmp = (time) / 1000; \
+ micro = tmp % 1000; \
+ tmp /= 1000; \
+ milli = tmp % 1000; \
+ tmp /= 1000; \
+ sec = tmp % 60; \
+ min = tmp / 60; \
+ erts_fprintf(file,str": %d:%02d.%03d %03d\n",min,sec,milli,micro); \
+} while(0)
+
+extern BM_TIMER_T system_clock;
+#endif /* USE_PERFCTR */
+
+extern BM_TIMER_T timer_time;
+extern BM_TIMER_T system_time;
+extern BM_TIMER_T gc_time;
+extern BM_TIMER_T minor_gc_time;
+extern BM_TIMER_T major_gc_time;
+extern BM_TIMER_T minor_global_gc_time;
+extern BM_TIMER_T major_global_gc_time;
+extern BM_TIMER_T send_time;
+extern BM_TIMER_T copy_time;
+extern BM_TIMER_T size_time;
+extern BM_TIMER_T max_minor_time;
+extern BM_TIMER_T max_major_time;
+extern BM_TIMER_T max_global_minor_time;
+extern BM_TIMER_T max_global_major_time;
+extern BM_TIMER_T misc0_time;
+extern BM_TIMER_T misc1_time;
+extern BM_TIMER_T misc2_time;
+
+#define MAX_PAUSE_TIME 500000
+extern unsigned long local_pause_times[MAX_PAUSE_TIME];
+extern unsigned long pause_times[MAX_PAUSE_TIME];
+extern unsigned long pause_times_old[MAX_PAUSE_TIME];
+
+#define MMU_INTERVAL 5 /* milli seconds */
+extern BM_TIMER_T mmu_counter;
+extern BM_TIMER_T mmu;
+
+#define BM_NEW_TIMER(t) BM_TIMER_T t##_time = 0;
+#define BM_RESET_TIMER(t) t##_time = 0;
+#define BM_SWAP_TIMER(t1,t2) do { BM_STOP_TIMER(t1); BM_START_TIMER(t2); } while(0)
+#define BM_MMU_INIT() do { \
+ BM_TIMER_T gc = gc_time; \
+ while (gc > 0) { \
+ if (gc > MMU_INTERVAL) { \
+ gc -= MMU_INTERVAL - mmu_counter; \
+ erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \
+ mmu_counter = 0; mmu = 0; \
+ } else { \
+ mmu_counter += gc; \
+ if (mmu_counter >= MMU_INTERVAL) { \
+ mmu_counter -= MMU_INTERVAL; \
+ erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \
+ mmu = 0; \
+ } \
+ gc = 0; \
+ } \
+ } \
+ BM_RESET_TIMER(system); \
+ BM_RESET_TIMER(send); \
+ BM_RESET_TIMER(copy); \
+ BM_RESET_TIMER(size); \
+} while(0)
+
+#define BM_MMU_READ() do { \
+ BM_TIMER_T mut = system_time + send_time + copy_time + size_time; \
+ while (mut > 0) { \
+ if (mut > MMU_INTERVAL) { \
+ BM_TIMER_T tmp = MMU_INTERVAL - mmu_counter; \
+ mmu += tmp; mut -= tmp; \
+ erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \
+ mmu_counter = 0; mmu = 0; \
+ } else { \
+ mmu_counter += mut; mmu += mut; \
+ if (mmu_counter >= MMU_INTERVAL) { \
+ mmu_counter -= MMU_INTERVAL; \
+ mmu -= mmu_counter; \
+ erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \
+ mmu = mmu_counter; \
+ } \
+ mut = 0; \
+ } \
+ } \
+} while(0)
+
+#else /* !BM_TIMERS */
+# define BM_NEW_TIMER(t)
+# define BM_START_TIMER(t)
+# define BM_STOP_TIMER(t)
+# define BM_RESET_TIMER(t)
+# define BM_SWAP_TIMER(t1,t2)
+# define BM_TIME_PRINTER(str,time)
+# define BM_MMU_INIT()
+# define BM_MMU_READ()
+#endif /* BM_TIMERS */
+
+#ifdef BM_HEAP_SIZES
+extern unsigned long long max_used_heap;
+extern unsigned long long max_allocated_heap;
+extern unsigned long long max_used_global_heap;
+extern unsigned long long max_allocated_global_heap;
+#endif /* BM_HEAP_SIZES */
+
+#ifdef BM_MESSAGE_SIZES
+extern unsigned long long words_sent;
+extern unsigned long long words_copied;
+extern unsigned long long words_prealloc;
+extern unsigned long long message_sizes[1000];
+
+#define BM_MESSAGE_COPIED(size) { \
+ words_copied += size; \
+ BM_COUNT(messages_copied); }
+
+#define BM_PREALLOC_DATA(size) { \
+ words_prealloc += size; }
+
+#define BM_MESSAGE(mess,send,rec) { \
+ Uint msize = size_object(mess); \
+ words_sent += msize; \
+ if (msize < 1000) \
+ message_sizes[msize]++; \
+ else \
+ message_sizes[999]++; \
+ BM_EGO_COUNT(send,rec); \
+ BM_COUNT(messages_sent); }
+
+#else /* !BM_MESSAGE_SIZES */
+
+#define BM_MESSAGE_COPIED(size) BM_COUNT(messages_copied);
+#define BM_PREALLOC_DATA(size)
+#define BM_MESSAGE(mess,send,rec) { \
+ BM_EGO_COUNT(send,rec); \
+ BM_COUNT(messages_sent); }
+
+#endif /* BM_MESSAGE_SIZES */
+
+void init_benchmarking(void);
+void save_statistics(void);
+
+#endif /* _BENCHMARK_H_ */
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
new file mode 100644
index 0000000000..74b231d56d
--- /dev/null
+++ b/erts/emulator/beam/bif.c
@@ -0,0 +1,4201 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof() */
+#include "sys.h"
+#include "erl_vm.h"
+#include "erl_sys_driver.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+#include "dist.h"
+#include "erl_version.h"
+#include "erl_binary.h"
+#include "beam_bp.h"
+#include "erl_db_util.h"
+#include "register.h"
+
+static Export* flush_monitor_message_trap = NULL;
+static Export* set_cpu_topology_trap = NULL;
+static Export* await_proc_exit_trap = NULL;
+Export* erts_format_cpu_topology_trap = NULL;
+
+#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+
+/*
+ * The BIF's now follow, see the Erlang Manual for a description of what
+ * each individual BIF does.
+ */
+
+BIF_RETTYPE spawn_3(BIF_ALIST_3)
+{
+ ErlSpawnOpts so;
+ Eterm pid;
+
+ so.flags = 0;
+ pid = erl_create_process(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, &so);
+ if (is_non_value(pid)) {
+ BIF_ERROR(BIF_P, so.error_code);
+ } else {
+ if (ERTS_USE_MODIFIED_TIMING()) {
+ BIF_TRAP2(erts_delay_trap, BIF_P, pid, ERTS_MODIFIED_TIMING_DELAY);
+ }
+ BIF_RET(pid);
+ }
+}
+
+/**********************************************************************/
+
+/* Utility to add a new link between processes p and another internal
+ * process (rpid). Process p must be the currently executing process.
+ */
+static int insert_internal_link(Process* p, Eterm rpid)
+{
+ Process *rp;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
+
+ ASSERT(is_internal_pid(rpid));
+
+#ifdef ERTS_SMP
+ if (IS_TRACED(p) && (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)))
+ rp_locks = ERTS_PROC_LOCKS_ALL;
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
+#endif
+
+ /* get a pointer to the process struct of the linked process */
+ rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ rpid, rp_locks,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+
+ if (!rp) {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ return 0;
+ }
+
+ if (p != rp) {
+ erts_add_link(&(p->nlinks), LINK_PID, rp->id);
+ erts_add_link(&(rp->nlinks), LINK_PID, p->id);
+
+ ASSERT(is_nil(p->tracer_proc)
+ || is_internal_pid(p->tracer_proc)
+ || is_internal_port(p->tracer_proc));
+
+ if (IS_TRACED(p)) {
+ if (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ rp->trace_flags |= (p->trace_flags & TRACEE_FLAGS);
+ rp->tracer_proc = p->tracer_proc; /* maybe steal */
+
+ if (p->trace_flags & F_TRACE_SOL1) { /* maybe override */
+ rp->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ p->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ }
+ }
+ }
+ }
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS))
+ trace_proc(p, rp, am_getting_linked, p->id);
+
+ if (p == rp)
+ erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN);
+ else {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+
+ return 1;
+}
+
+
+/* create a link to the process */
+BIF_RETTYPE link_1(BIF_ALIST_1)
+{
+ DistEntry *dep;
+
+ if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
+ trace_proc(BIF_P, BIF_P, am_link, BIF_ARG_1);
+ }
+ /* check that the pid or port which is our argument is OK */
+
+ if (is_internal_pid(BIF_ARG_1)) {
+ if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (insert_internal_link(BIF_P, BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ else {
+ goto res_no_proc;
+ }
+ }
+
+ if (is_internal_port(BIF_ARG_1)) {
+ Port *pt = erts_id2port(BIF_ARG_1, BIF_P, ERTS_PROC_LOCK_MAIN);
+ if (!pt) {
+ goto res_no_proc;
+ }
+
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+
+ if (erts_add_link(&(BIF_P->nlinks), LINK_PID, BIF_ARG_1) >= 0)
+ erts_add_link(&(pt->nlinks), LINK_PID, BIF_P->id);
+ /* else: already linked */
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_smp_port_unlock(pt);
+ BIF_RET(am_true);
+ }
+ else if (is_external_port(BIF_ARG_1)
+ && external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
+ goto res_no_proc;
+ }
+
+ if (is_external_pid(BIF_ARG_1)) {
+
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+
+ /* We may earn time by checking first that we're not linked already */
+ if (erts_lookup_link(BIF_P->nlinks, BIF_ARG_1) != NULL) {
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ BIF_RET(am_true);
+ }
+ else {
+ ErtsLink *lnk;
+ int code;
+ ErtsDSigData dsd;
+ dep = external_pid_dist_entry(BIF_ARG_1);
+ if (dep == erts_this_dist_entry) {
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ goto res_no_proc;
+ }
+
+ code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_RLOCK, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ /* Let the dlink trap handle it */
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ BIF_TRAP1(dlink_trap, BIF_P, BIF_ARG_1);
+
+ case ERTS_DSIG_PREP_CONNECTED:
+ /* We are connected. Setup link and send link signal */
+
+ erts_smp_de_links_lock(dep);
+
+ erts_add_link(&(BIF_P->nlinks), LINK_PID, BIF_ARG_1);
+ lnk = erts_add_or_lookup_link(&(dep->nlinks),
+ LINK_PID,
+ BIF_P->id);
+ ASSERT(lnk != NULL);
+ erts_add_link(&ERTS_LINK_ROOT(lnk), LINK_PID, BIF_ARG_1);
+
+ erts_smp_de_links_unlock(dep);
+ erts_smp_de_runlock(dep);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+
+ code = erts_dsig_send_link(&dsd, BIF_P->id, BIF_ARG_1);
+ if (code == ERTS_DSIG_SEND_YIELD)
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ BIF_RET(am_true);
+ default:
+ ASSERT(! "Invalid dsig prepare result");
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ }
+ }
+ }
+
+ BIF_ERROR(BIF_P, BADARG);
+
+ res_no_proc:
+ if (BIF_P->flags & F_TRAPEXIT) {
+ ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
+ erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL);
+ erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks);
+ BIF_RET(am_true);
+ }
+ else
+ BIF_ERROR(BIF_P, EXC_NOPROC);
+}
+
+#define ERTS_DEMONITOR_FALSE 2
+#define ERTS_DEMONITOR_TRUE 1
+#define ERTS_DEMONITOR_BADARG 0
+#define ERTS_DEMONITOR_YIELD_TRUE -1
+#define ERTS_DEMONITOR_INTERNAL_ERROR -2
+
+static int
+remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
+{
+ ErtsDSigData dsd;
+ ErtsMonitor *dmon;
+ ErtsMonitor *mon;
+ int code;
+ int res;
+#ifndef ERTS_SMP
+ int stale_mon = 0;
+#endif
+
+ ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK)
+ == erts_proc_lc_my_proc_locks(c_p));
+
+ code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_DSP_RLOCK, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+#ifndef ERTS_SMP
+ /* XXX Is this possible? Shouldn't this link
+ previously have been removed if the node
+ had previously been disconnected. */
+ ASSERT(0);
+ stale_mon = 1;
+#endif
+ /*
+ * In the smp case this is possible if the node goes
+ * down just before the call to demonitor.
+ */
+ if (dep) {
+ erts_smp_de_links_lock(dep);
+ dmon = erts_remove_monitor(&dep->monitors, ref);
+ erts_smp_de_links_unlock(dep);
+ if (dmon)
+ erts_destroy_monitor(dmon);
+ }
+ mon = erts_remove_monitor(&c_p->monitors, ref);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
+
+ res = ERTS_DEMONITOR_TRUE;
+ break;
+
+ case ERTS_DSIG_PREP_CONNECTED:
+
+ erts_smp_de_links_lock(dep);
+ mon = erts_remove_monitor(&c_p->monitors, ref);
+ dmon = erts_remove_monitor(&dep->monitors, ref);
+ erts_smp_de_links_unlock(dep);
+ erts_smp_de_runlock(dep);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
+
+ if (!dmon) {
+#ifndef ERTS_SMP
+ /* XXX How is this possible? Shouldn't this link
+ previously have been removed when the distributed
+ end was removed. */
+ ASSERT(0);
+ stale_mon = 1;
+#endif
+ /*
+ * This is possible when smp support is enabled.
+ * 'DOWN' message just arrived.
+ */
+ res = ERTS_DEMONITOR_TRUE;
+ }
+ else {
+ /*
+ * Soft (no force) send, use ->data in dist slot
+ * monitor list since in case of monitor name
+ * the atom is stored there. Yield if necessary.
+ */
+ code = erts_dsig_send_demonitor(&dsd,
+ c_p->id,
+ (mon->name != NIL
+ ? mon->name
+ : mon->pid),
+ ref,
+ 0);
+ res = (code == ERTS_DSIG_SEND_YIELD
+ ? ERTS_DEMONITOR_YIELD_TRUE
+ : ERTS_DEMONITOR_TRUE);
+ erts_destroy_monitor(dmon);
+
+ }
+ break;
+ default:
+ ASSERT(! "Invalid dsig prepare result");
+ res = ERTS_DEMONITOR_INTERNAL_ERROR;
+ break;
+ }
+
+#ifndef ERTS_SMP
+ if (stale_mon) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Stale process monitor %T to ", ref);
+ if (is_atom(to))
+ erts_dsprintf(dsbufp, "{%T, %T}", to, dep->sysname);
+ else
+ erts_dsprintf(dsbufp, "%T", to);
+ erts_dsprintf(dsbufp, " found\n");
+ erts_send_error_to_logger(c_p->group_leader, dsbufp);
+ }
+#endif
+
+ /*
+ * We aren't allowed to destroy 'mon' until now, since 'to'
+ * may refer into 'mon' (external pid).
+ */
+ ASSERT(mon); /* Since link lock wasn't released between
+ lookup and remove */
+ erts_destroy_monitor(mon);
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ return res;
+}
+
+static int demonitor(Process *c_p, Eterm ref)
+{
+ ErtsMonitor *mon = NULL; /* The monitor entry to delete */
+ Process *rp; /* Local target process */
+ Eterm to = NIL; /* Monitor link traget */
+ Eterm ref_p; /* Pid of this end */
+ DistEntry *dep = NULL; /* Target's distribution entry */
+ int deref_de = 0;
+ int res;
+ int unlock_link = 1;
+
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_LINK);
+
+ if (is_not_internal_ref(ref)) {
+ res = ERTS_DEMONITOR_BADARG;
+ goto done; /* Cannot be this monitor's ref */
+ }
+ ref_p = c_p->id;
+
+ mon = erts_lookup_monitor(c_p->monitors, ref);
+ if (!mon) {
+ res = ERTS_DEMONITOR_FALSE;
+ goto done;
+ }
+
+ if (mon->type != MON_ORIGIN) {
+ res = ERTS_DEMONITOR_BADARG;
+ goto done;
+ }
+ to = mon->pid;
+
+ if (is_atom(to)) {
+ /* Monitoring a name at node to */
+ ASSERT(is_node_name_atom(to));
+ dep = erts_sysname_to_connected_dist_entry(to);
+ ASSERT(dep != erts_this_dist_entry);
+ if (dep)
+ deref_de = 1;
+ } else {
+ ASSERT(is_pid(to));
+ dep = pid_dist_entry(to);
+ }
+ if (dep != erts_this_dist_entry) {
+ res = remote_demonitor(c_p, dep, ref, to);
+ /* remote_demonitor() unlocks link lock on c_p */
+ unlock_link = 0;
+ }
+ else { /* Local monitor */
+ if (deref_de) {
+ deref_de = 0;
+ erts_deref_dist_entry(dep);
+ }
+ dep = NULL;
+ rp = erts_pid2proc_opt(c_p,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ to,
+ ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ mon = erts_remove_monitor(&c_p->monitors, ref);
+#ifndef ERTS_SMP
+ ASSERT(mon);
+#else
+ if (!mon)
+ res = ERTS_DEMONITOR_FALSE;
+ else
+#endif
+ {
+ res = ERTS_DEMONITOR_TRUE;
+ erts_destroy_monitor(mon);
+ }
+ if (rp) {
+ ErtsMonitor *rmon;
+ rmon = erts_remove_monitor(&(rp->monitors), ref);
+ if (rp != c_p)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (rmon != NULL)
+ erts_destroy_monitor(rmon);
+ }
+ else {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p);
+ }
+
+ }
+
+ done:
+
+ if (unlock_link)
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
+
+ if (deref_de) {
+ ASSERT(dep);
+ erts_deref_dist_entry(dep);
+ }
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
+ return res;
+}
+
+BIF_RETTYPE demonitor_1(BIF_ALIST_1)
+{
+ switch (demonitor(BIF_P, BIF_ARG_1)) {
+ case ERTS_DEMONITOR_FALSE:
+ case ERTS_DEMONITOR_TRUE:
+ BIF_RET(am_true);
+ case ERTS_DEMONITOR_YIELD_TRUE:
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ case ERTS_DEMONITOR_BADARG:
+ BIF_ERROR(BIF_P, BADARG);
+ case ERTS_DEMONITOR_INTERNAL_ERROR:
+ default:
+ ASSERT(! "demonitor(): internal error");
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ }
+}
+
+BIF_RETTYPE demonitor_2(BIF_ALIST_2)
+{
+ Eterm res = am_true;
+ int info = 0;
+ int flush = 0;
+ Eterm list = BIF_ARG_2;
+
+ while (is_list(list)) {
+ Eterm* consp = list_val(list);
+ switch (CAR(consp)) {
+ case am_flush:
+ flush = 1;
+ break;
+ case am_info:
+ info = 1;
+ break;
+ default:
+ goto badarg;
+ }
+ list = CDR(consp);
+ }
+
+ if (is_not_nil(list))
+ goto badarg;
+
+ switch (demonitor(BIF_P, BIF_ARG_1)) {
+ case ERTS_DEMONITOR_FALSE:
+ if (info)
+ res = am_false;
+ if (flush)
+ BIF_TRAP2(flush_monitor_message_trap, BIF_P, BIF_ARG_1, res);
+ case ERTS_DEMONITOR_TRUE:
+ BIF_RET(res);
+ case ERTS_DEMONITOR_YIELD_TRUE:
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ case ERTS_DEMONITOR_BADARG:
+ badarg:
+ BIF_ERROR(BIF_P, BADARG);
+ case ERTS_DEMONITOR_INTERNAL_ERROR:
+ default:
+ ASSERT(! "demonitor(): internal error");
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ }
+}
+
+/* Type must be atomic object! */
+void
+erts_queue_monitor_message(Process *p,
+ ErtsProcLocks *p_locksp,
+ Eterm ref,
+ Eterm type,
+ Eterm item,
+ Eterm reason)
+{
+ Eterm tup;
+ Eterm* hp;
+ Eterm reason_copy, ref_copy, item_copy;
+ Uint reason_size, ref_size, item_size, heap_size;
+ ErlOffHeap *ohp;
+ ErlHeapFragment *bp;
+
+ reason_size = IS_CONST(reason) ? 0 : size_object(reason);
+ item_size = IS_CONST(item) ? 0 : size_object(item);
+ ref_size = size_object(ref);
+
+ heap_size = 6+reason_size+ref_size+item_size;
+
+ hp = erts_alloc_message_heap(heap_size,
+ &bp,
+ &ohp,
+ p,
+ p_locksp);
+
+ reason_copy = (IS_CONST(reason)
+ ? reason
+ : copy_struct(reason, reason_size, &hp, ohp));
+ item_copy = (IS_CONST(item)
+ ? item
+ : copy_struct(item, item_size, &hp, ohp));
+ ref_copy = copy_struct(ref, ref_size, &hp, ohp);
+
+ tup = TUPLE5(hp, am_DOWN, ref_copy, type, item_copy, reason_copy);
+ erts_queue_message(p, p_locksp, bp, tup, NIL);
+}
+
+static BIF_RETTYPE
+local_pid_monitor(Process *p, Eterm target)
+{
+ BIF_RETTYPE ret;
+ Eterm mon_ref;
+ Process *rp;
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK;
+
+ mon_ref = erts_make_ref(p);
+ ERTS_BIF_PREP_RET(ret, mon_ref);
+ if (target == p->id) {
+ return ret;
+ }
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
+ rp = erts_pid2proc_opt(p, p_locks,
+ target, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ if (!rp) {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ p_locks &= ~ERTS_PROC_LOCK_LINK;
+ erts_queue_monitor_message(p, &p_locks,
+ mon_ref, am_process, target, am_noproc);
+ }
+ else {
+ ASSERT(rp != p);
+
+ erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, target, NIL);
+ erts_add_monitor(&(rp->monitors), MON_TARGET, mon_ref, p->id, NIL);
+
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ }
+
+ erts_smp_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN);
+
+ return ret;
+}
+
+static BIF_RETTYPE
+local_name_monitor(Process *p, Eterm target_name)
+{
+ BIF_RETTYPE ret;
+ Eterm mon_ref;
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK;
+ Process *rp;
+
+ mon_ref = erts_make_ref(p);
+ ERTS_BIF_PREP_RET(ret, mon_ref);
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
+ rp = erts_whereis_process(p, p_locks, target_name, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ if (!rp) {
+ Eterm lhp[3];
+ Eterm item;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ p_locks &= ~ERTS_PROC_LOCK_LINK;
+ item = TUPLE2(lhp, target_name, erts_this_dist_entry->sysname);
+ erts_queue_monitor_message(p, &p_locks,
+ mon_ref, am_process, item, am_noproc);
+ }
+ else if (rp != p) {
+ erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, rp->id,
+ target_name);
+ erts_add_monitor(&(rp->monitors), MON_TARGET, mon_ref, p->id,
+ target_name);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ }
+
+ erts_smp_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN);
+
+ return ret;
+}
+
+static BIF_RETTYPE
+remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2,
+ DistEntry *dep, Eterm target, int byname)
+{
+ ErtsDSigData dsd;
+ BIF_RETTYPE ret;
+ int code;
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
+ code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_RLOCK, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ /* Let the dmonitor_p trap handle it */
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ ERTS_BIF_PREP_TRAP2(ret, dmonitor_p_trap, p, bifarg1, bifarg2);
+ break;
+ case ERTS_DSIG_PREP_CONNECTED:
+ if (!(dep->flags & DFLAG_DIST_MONITOR)
+ || (byname && !(dep->flags & DFLAG_DIST_MONITOR_NAME))) {
+ erts_smp_de_runlock(dep);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ ERTS_BIF_PREP_ERROR(ret, p, BADARG);
+ }
+ else {
+ Eterm p_trgt, p_name, d_name, mon_ref;
+
+ mon_ref = erts_make_ref(p);
+
+ if (byname) {
+ p_trgt = dep->sysname;
+ p_name = target;
+ d_name = target;
+ }
+ else {
+ p_trgt = target;
+ p_name = NIL;
+ d_name = NIL;
+ }
+
+ erts_smp_de_links_lock(dep);
+
+ erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, p_trgt,
+ p_name);
+ erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->id,
+ d_name);
+
+ erts_smp_de_links_unlock(dep);
+ erts_smp_de_runlock(dep);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+
+ code = erts_dsig_send_monitor(&dsd, p->id, target, mon_ref);
+ if (code == ERTS_DSIG_SEND_YIELD)
+ ERTS_BIF_PREP_YIELD_RETURN(ret, p, mon_ref);
+ else
+ ERTS_BIF_PREP_RET(ret, mon_ref);
+ }
+ break;
+ default:
+ ASSERT(! "Invalid dsig prepare result");
+ ERTS_BIF_PREP_ERROR(ret, p, EXC_INTERNAL_ERROR);
+ break;
+ }
+
+ return ret;
+}
+
+BIF_RETTYPE monitor_2(BIF_ALIST_2)
+{
+ Eterm target = BIF_ARG_2;
+ BIF_RETTYPE ret;
+ DistEntry *dep = NULL;
+ int deref_de = 0;
+
+ /* Only process monitors are implemented */
+ if (BIF_ARG_1 != am_process) {
+ goto error;
+ }
+
+ if (is_internal_pid(target)) {
+ local_pid:
+ ret = local_pid_monitor(BIF_P, target);
+ } else if (is_external_pid(target)) {
+ dep = external_pid_dist_entry(target);
+ if (dep == erts_this_dist_entry)
+ goto local_pid;
+ ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, target, 0);
+ } else if (is_atom(target)) {
+ ret = local_name_monitor(BIF_P, target);
+ } else if (is_tuple(target)) {
+ Eterm *tp = tuple_val(target);
+ Eterm remote_node;
+ Eterm name;
+ if (arityval(*tp) != 2)
+ goto error;
+ remote_node = tp[2];
+ name = tp[1];
+ if (!is_atom(remote_node) || !is_atom(name)) {
+ goto error;
+ }
+ if (!erts_is_alive && remote_node != am_Noname) {
+ goto error; /* Remote monitor from (this) undistributed node */
+ }
+ dep = erts_sysname_to_connected_dist_entry(remote_node);
+ if (dep == erts_this_dist_entry) {
+ deref_de = 1;
+ ret = local_name_monitor(BIF_P, name);
+ } else {
+ if (dep)
+ deref_de = 1;
+ ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, name, 1);
+ }
+ } else {
+ error:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ }
+ if (deref_de) {
+ deref_de = 0;
+ erts_deref_dist_entry(dep);
+ }
+
+ return ret;
+}
+
+
+/**********************************************************************/
+/* this is a combination of the spawn and link BIFs */
+
+BIF_RETTYPE spawn_link_3(BIF_ALIST_3)
+{
+ ErlSpawnOpts so;
+ Eterm pid;
+
+ so.flags = SPO_LINK;
+ pid = erl_create_process(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, &so);
+ if (is_non_value(pid)) {
+ BIF_ERROR(BIF_P, so.error_code);
+ } else {
+ if (ERTS_USE_MODIFIED_TIMING()) {
+ BIF_TRAP2(erts_delay_trap, BIF_P, pid, ERTS_MODIFIED_TIMING_DELAY);
+ }
+ BIF_RET(pid);
+ }
+}
+
+/**********************************************************************/
+
+BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
+{
+ ErlSpawnOpts so;
+ Eterm pid;
+ Eterm* tp;
+ Eterm ap;
+ Eterm arg;
+ Eterm res;
+
+ /*
+ * Check that the first argument is a tuple of four elements.
+ */
+ if (is_not_tuple(BIF_ARG_1)) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ tp = tuple_val(BIF_ARG_1);
+ if (*tp != make_arityval(4))
+ goto error;
+
+ /*
+ * Store default values for options.
+ */
+ so.flags = SPO_USE_ARGS;
+ so.min_heap_size = H_MIN_SIZE;
+ so.priority = PRIORITY_NORMAL;
+ so.max_gen_gcs = (Uint16) erts_smp_atomic_read(&erts_max_gen_gcs);
+ so.scheduler = 0;
+
+ /*
+ * Walk through the option list.
+ */
+ ap = tp[4];
+ while (is_list(ap)) {
+ arg = CAR(list_val(ap));
+ if (arg == am_link) {
+ so.flags |= SPO_LINK;
+ } else if (arg == am_monitor) {
+ so.flags |= SPO_MONITOR;
+ } else if (is_tuple(arg)) {
+ Eterm* tp2 = tuple_val(arg);
+ Eterm val;
+ if (*tp2 != make_arityval(2))
+ goto error;
+ arg = tp2[1];
+ val = tp2[2];
+ if (arg == am_priority) {
+ if (val == am_max)
+ so.priority = PRIORITY_MAX;
+ else if (val == am_high)
+ so.priority = PRIORITY_HIGH;
+ else if (val == am_normal)
+ so.priority = PRIORITY_NORMAL;
+ else if (val == am_low)
+ so.priority = PRIORITY_LOW;
+ else
+ goto error;
+ } else if (arg == am_min_heap_size && is_small(val)) {
+ Sint min_heap_size = signed_val(val);
+ if (min_heap_size < 0) {
+ goto error;
+ } else if (min_heap_size < H_MIN_SIZE) {
+ so.min_heap_size = H_MIN_SIZE;
+ } else {
+ so.min_heap_size = erts_next_heap_size(min_heap_size, 0);
+ }
+ } else if (arg == am_fullsweep_after && is_small(val)) {
+ Sint max_gen_gcs = signed_val(val);
+ if (max_gen_gcs < 0) {
+ goto error;
+ } else {
+ so.max_gen_gcs = max_gen_gcs;
+ }
+ } else if (arg == am_scheduler && is_small(val)) {
+ Sint scheduler = signed_val(val);
+ if (erts_common_run_queue && erts_no_schedulers > 1)
+ goto error;
+ if (scheduler < 0 || erts_no_schedulers < scheduler)
+ goto error;
+ so.scheduler = (int) scheduler;
+ } else {
+ goto error;
+ }
+ } else {
+ goto error;
+ }
+ ap = CDR(list_val(ap));
+ }
+ if (is_not_nil(ap)) {
+ goto error;
+ }
+
+ /*
+ * Spawn the process.
+ */
+ pid = erl_create_process(BIF_P, tp[1], tp[2], tp[3], &so);
+ if (is_non_value(pid)) {
+ BIF_ERROR(BIF_P, so.error_code);
+ } else if (so.flags & SPO_MONITOR) {
+ Eterm* hp = HAlloc(BIF_P, 3);
+ res = TUPLE2(hp, pid, so.mref);
+ } else {
+ res = pid;
+ }
+
+ if (ERTS_USE_MODIFIED_TIMING()) {
+ BIF_TRAP2(erts_delay_trap, BIF_P, res, ERTS_MODIFIED_TIMING_DELAY);
+ }
+ else {
+ BIF_RET(res);
+ }
+}
+
+
+/**********************************************************************/
+/* remove a link from a process */
+BIF_RETTYPE unlink_1(BIF_ALIST_1)
+{
+ Process *rp;
+ DistEntry *dep;
+ ErtsLink *l = NULL, *rl = NULL;
+
+ /*
+ * SMP specific note concerning incoming exit signals:
+ * We have to have at least the status lock during removal of
+ * the link half on current process, and check for and handle
+ * a present pending exit while the status lock is held. This
+ * in order to ensure that we wont be exited by a link after
+ * it has been removed.
+ *
+ * (We also have to have the link lock, of course, in order to
+ * be allowed to remove the link...)
+ */
+
+ if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
+ trace_proc(BIF_P, BIF_P, am_unlink, BIF_ARG_1);
+ }
+
+ if (is_internal_port(BIF_ARG_1)) {
+ Port *pt = erts_id2port_sflgs(BIF_ARG_1,
+ BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_DEAD);
+
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+#ifdef ERTS_SMP
+ if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
+ if (pt)
+ erts_smp_port_unlock(pt);
+ goto handle_pending_exit;
+ }
+#endif
+
+ l = erts_remove_link(&BIF_P->nlinks, BIF_ARG_1);
+
+ ASSERT(pt || !l);
+
+ if (pt) {
+ rl = erts_remove_link(&pt->nlinks, BIF_P->id);
+ erts_smp_port_unlock(pt);
+ if (rl)
+ erts_destroy_link(rl);
+ }
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+
+ if (l)
+ erts_destroy_link(l);
+
+ BIF_RET(am_true);
+ }
+ else if (is_external_port(BIF_ARG_1)
+ && external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
+ BIF_RET(am_true);
+ }
+
+ if (is_not_pid(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (is_external_pid(BIF_ARG_1)) {
+ ErtsDistLinkData dld;
+ int code;
+ ErtsDSigData dsd;
+ /* Blind removal, we might have trapped or anything, this leaves
+ us in a state where monitors might be inconsistent, but the dist
+ code should take care of it. */
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+#ifdef ERTS_SMP
+ if (ERTS_PROC_PENDING_EXIT(BIF_P))
+ goto handle_pending_exit;
+#endif
+ l = erts_remove_link(&BIF_P->nlinks,BIF_ARG_1);
+
+ erts_smp_proc_unlock(BIF_P,
+ ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+
+ if (l)
+ erts_destroy_link(l);
+
+ dep = external_pid_dist_entry(BIF_ARG_1);
+ if (dep == erts_this_dist_entry) {
+ BIF_RET(am_true);
+ }
+
+ code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_NO_LOCK, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+#if 1
+ BIF_RET(am_true);
+#else
+ /*
+ * This is how we used to do it, but the link is obviously not
+ * active, so I see no point in setting up a connection.
+ * /Rickard
+ */
+ BIF_TRAP1(dunlink_trap, BIF_P, BIF_ARG_1);
+#endif
+
+ case ERTS_DSIG_PREP_CONNECTED:
+ erts_remove_dist_link(&dld, BIF_P->id, BIF_ARG_1, dep);
+ code = erts_dsig_send_unlink(&dsd, BIF_P->id, BIF_ARG_1);
+ erts_destroy_dist_link(&dld);
+ if (code == ERTS_DSIG_SEND_YIELD)
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ BIF_RET(am_true);
+
+ default:
+ ASSERT(! "Invalid dsig prepare result");
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ }
+ }
+
+ /* Internal pid... */
+
+ /* process ok ? */
+ if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+
+ /* get process struct */
+ rp = erts_pid2proc_opt(BIF_P, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_LINK
+ | ERTS_PROC_LOCK_STATUS),
+ BIF_ARG_1, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+
+#ifdef ERTS_SMP
+ if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
+ if (rp && rp != BIF_P)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ goto handle_pending_exit;
+ }
+#endif
+
+ /* unlink and ignore errors */
+ l = erts_remove_link(&BIF_P->nlinks,BIF_ARG_1);
+ if (l != NULL)
+ erts_destroy_link(l);
+
+ if (!rp) {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ }
+ else {
+ rl = erts_remove_link(&(rp->nlinks),BIF_P->id);
+ if (rl != NULL)
+ erts_destroy_link(rl);
+
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) {
+ trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->id);
+ }
+
+ if (rp != BIF_P)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ }
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+
+ BIF_RET(am_true);
+
+#ifdef ERTS_SMP
+ handle_pending_exit:
+ erts_handle_pending_exit(BIF_P, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_LINK
+ | ERTS_PROC_LOCK_STATUS));
+ ASSERT(ERTS_PROC_IS_EXITING(BIF_P));
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ ERTS_BIF_EXITED(BIF_P);
+#endif
+}
+
+BIF_RETTYPE hibernate_3(BIF_ALIST_3)
+{
+ /*
+ * hibernate/3 is implemented as an instruction; therefore
+ * this function will never be called.
+ */
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/**********************************************************************/
+
+BIF_RETTYPE get_stacktrace_0(Process* p)
+{
+ Eterm t = build_stacktrace(p, p->ftrace);
+ BIF_RET(t);
+}
+
+/**********************************************************************/
+/*
+ * This is like exit/1, except that errors are logged if they terminate
+ * the process, and the final error value will be {Term,StackTrace}.
+ */
+
+BIF_RETTYPE error_1(Process* p, Eterm term)
+{
+ p->fvalue = term;
+ BIF_ERROR(p, EXC_ERROR);
+}
+
+/**********************************************************************/
+/*
+ * This is like error/1, except that the given 'args' will be included
+ * in the stacktrace.
+ */
+
+BIF_RETTYPE error_2(Process* p, Eterm value, Eterm args)
+{
+ Eterm* hp = HAlloc(p, 3);
+
+ p->fvalue = TUPLE2(hp, value, args);
+ BIF_ERROR(p, EXC_ERROR_2);
+}
+
+/**********************************************************************/
+/* this is like throw/1 except that we set freason to EXC_EXIT */
+
+BIF_RETTYPE exit_1(BIF_ALIST_1)
+{
+ BIF_P->fvalue = BIF_ARG_1; /* exit value */
+ BIF_ERROR(BIF_P, EXC_EXIT);
+}
+
+
+/**********************************************************************/
+/* raise an exception of given class, value and stacktrace.
+ *
+ * If there is an error in the argument format,
+ * return the atom 'badarg' instead.
+ */
+Eterm
+raise_3(Process *c_p, Eterm class, Eterm value, Eterm stacktrace) {
+ Eterm reason;
+ Eterm l, *hp, *hp_end, *tp;
+ int depth, cnt;
+ size_t sz;
+ struct StackTrace *s;
+
+ if (class == am_error) {
+ c_p->fvalue = value;
+ reason = EXC_ERROR;
+ } else if (class == am_exit) {
+ c_p->fvalue = value;
+ reason = EXC_EXIT;
+ } else if (class == am_throw) {
+ c_p->fvalue = value;
+ reason = EXC_THROWN;
+ } else goto error;
+ reason &= ~EXF_SAVETRACE;
+
+ /* Check syntax of stacktrace, and count depth.
+ * Accept anything that can be returned from erlang:get_stacktrace/0,
+ * as well as a 2-tuple with a fun as first element that the
+ * error_handler may need to give us.
+ */
+ for (l = stacktrace, depth = 0;
+ is_list(l);
+ l = CDR(list_val(l)), depth++) {
+ Eterm t = CAR(list_val(l));
+ int arity;
+ if (is_not_tuple(t)) goto error;
+ tp = tuple_val(t);
+ arity = arityval(tp[0]);
+ if ((arity == 3) && is_atom(tp[1]) && is_atom(tp[2])) continue;
+ if ((arity == 2) && is_fun(tp[1])) continue;
+ goto error;
+ }
+ if (is_not_nil(l)) goto error;
+
+ /* Create stacktrace and store */
+ if (depth <= erts_backtrace_depth) {
+ cnt = 0;
+ c_p->ftrace = stacktrace;
+ } else {
+ cnt = depth = erts_backtrace_depth;
+ c_p->ftrace = NIL;
+ }
+ tp = &c_p->ftrace;
+ sz = (offsetof(struct StackTrace, trace) + sizeof(Eterm) - 1)
+ / sizeof(Eterm);
+ hp = HAlloc(c_p, sz + 2*(cnt + 1));
+ hp_end = hp + sz + 2*(cnt + 1);
+ s = (struct StackTrace *) hp;
+ s->header = make_neg_bignum_header(sz - 1);
+ s->freason = reason;
+ s->pc = NULL;
+ s->current = NULL;
+ s->depth = 0;
+ hp += sz;
+ if (cnt > 0) {
+ /* Copy list up to depth */
+ for (cnt = 0, l = stacktrace;
+ cnt < depth;
+ cnt++, l = CDR(list_val(l))) {
+ ASSERT(*tp == NIL);
+ *tp = CONS(hp, CAR(list_val(l)), *tp);
+ tp = &CDR(list_val(*tp));
+ hp += 2;
+ }
+ }
+ c_p->ftrace = CONS(hp, c_p->ftrace, make_big((Eterm *) s));
+ hp += 2;
+ ASSERT(hp <= hp_end);
+
+ BIF_ERROR(c_p, reason);
+
+ error:
+ return am_badarg;
+}
+
+/**********************************************************************/
+/* send an exit message to another process (if trapping exits) or
+ exit the other process */
+
+BIF_RETTYPE exit_2(BIF_ALIST_2)
+{
+ Process *rp;
+
+ /*
+ * If the first argument is not a pid, or a local port it is an error.
+ */
+
+ if (is_internal_port(BIF_ARG_1)) {
+ Port *prt;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ prt = erts_id2port(BIF_ARG_1, NULL, 0);
+ if (prt) {
+ erts_do_exit_port(prt, BIF_P->id, BIF_ARG_2);
+ erts_port_release(prt);
+ }
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ ERTS_BIF_CHK_EXITED(BIF_P);
+ BIF_RET(am_true);
+ }
+ else if(is_external_port(BIF_ARG_1)
+ && external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ BIF_RET(am_true);
+
+ /*
+ * If it is a remote pid, send a signal to the remote node.
+ */
+
+ if (is_external_pid(BIF_ARG_1)) {
+ int code;
+ ErtsDSigData dsd;
+ DistEntry *dep;
+
+ dep = external_pid_dist_entry(BIF_ARG_1);
+ if(dep == erts_this_dist_entry)
+ BIF_RET(am_true);
+
+ code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_NO_LOCK, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ BIF_TRAP2(dexit_trap, BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_DSIG_PREP_CONNECTED:
+ code = erts_dsig_send_exit2(&dsd, BIF_P->id, BIF_ARG_1, BIF_ARG_2);
+ if (code == ERTS_DSIG_SEND_YIELD)
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ BIF_RET(am_true);
+ default:
+ ASSERT(! "Invalid dsig prepare result");
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ }
+ }
+ else if (is_not_internal_pid(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ else {
+ /*
+ * The pid is internal. Verify that it refers to an existing process.
+ */
+ ErtsProcLocks rp_locks;
+
+ if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
+ BIF_ERROR(BIF_P, BADARG);
+ if (BIF_ARG_1 == BIF_P->id) {
+ rp_locks = ERTS_PROC_LOCKS_ALL;
+ rp = BIF_P;
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ else {
+ rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ rp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, rp_locks,
+ ERTS_P2P_FLG_SMP_INC_REFC);
+ if (!rp) {
+ BIF_RET(am_true);
+ }
+ }
+
+ /*
+ * Send an exit signal.
+ */
+ erts_send_exit_signal(BIF_P,
+ BIF_P->id,
+ rp,
+ &rp_locks,
+ BIF_ARG_2,
+ NIL,
+ NULL,
+ BIF_P == rp ? ERTS_XSIG_FLG_NO_IGN_NORMAL : 0);
+#ifdef ERTS_SMP
+ if (rp == BIF_P)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+ else
+ erts_smp_proc_dec_refc(rp);
+ erts_smp_proc_unlock(rp, rp_locks);
+#endif
+ /*
+ * We may have exited ourselves and may have to take action.
+ */
+ ERTS_BIF_CHK_EXITED(BIF_P);
+ BIF_RET(am_true);
+ }
+}
+
+/**********************************************************************/
+/* this sets some process info- trapping exits or the error handler */
+
+
+/* Handle flags common to both process_flag_2 and process_flag_3. */
+static BIF_RETTYPE process_flag_aux(Process *BIF_P,
+ Process *rp,
+ Eterm flag,
+ Eterm val)
+{
+ Eterm old_value = NIL; /* shut up warning about use before set */
+ Sint i;
+ if (flag == am_save_calls) {
+ struct saved_calls *scb;
+ if (!is_small(val))
+ goto error;
+ i = signed_val(val);
+ if (i < 0 || i > 10000)
+ goto error;
+
+ if (i == 0)
+ scb = NULL;
+ else {
+ Uint sz = sizeof(*scb) + (i-1) * sizeof(scb->ct[0]);
+ scb = erts_alloc(ERTS_ALC_T_CALLS_BUF, sz);
+ scb->len = i;
+ scb->cur = 0;
+ scb->n = 0;
+ }
+
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(rp, ERTS_PROC_LOCK_MAIN, scb);
+
+ if (!scb)
+ old_value = make_small(0);
+ else {
+ old_value = make_small(scb->len);
+ erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
+ }
+
+ /* Make sure the process in question is rescheduled
+ immediately, if it's us, so the call saving takes effect. */
+ if (rp == BIF_P)
+ BIF_RET2(old_value, CONTEXT_REDS);
+ else
+ BIF_RET(old_value);
+ }
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE process_flag_2(BIF_ALIST_2)
+{
+ Eterm old_value;
+ if (BIF_ARG_1 == am_error_handler) {
+ if (is_not_atom(BIF_ARG_2)) {
+ goto error;
+ }
+ old_value = erts_proc_set_error_handler(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_2);
+ BIF_RET(old_value);
+ }
+ else if (BIF_ARG_1 == am_priority) {
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ old_value = erts_set_process_priority(BIF_P, BIF_ARG_2);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ if (old_value == THE_NON_VALUE)
+ goto error;
+ BIF_RET(old_value);
+ }
+ else if (BIF_ARG_1 == am_trap_exit) {
+ Uint trap_exit;
+ if (BIF_ARG_2 == am_true) {
+ trap_exit = 1;
+ } else if (BIF_ARG_2 == am_false) {
+ trap_exit = 0;
+ } else {
+ goto error;
+ }
+ /*
+ * NOTE: It is important that we check for pending exit signals
+ * and handle them before flag trap_exit is set to true.
+ * For more info, see implementation of erts_send_exit_signal().
+ */
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ old_value = ERTS_PROC_IS_TRAPPING_EXITS(BIF_P) ? am_true : am_false;
+ if (trap_exit) {
+ ERTS_PROC_SET_TRAP_EXIT(BIF_P);
+ } else {
+ ERTS_PROC_UNSET_TRAP_EXIT(BIF_P);
+ }
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ BIF_RET(old_value);
+ }
+ else if (BIF_ARG_1 == am_scheduler) {
+ int yield;
+ ErtsRunQueue *old;
+ ErtsRunQueue *new;
+ Sint sched;
+ if (erts_common_run_queue && erts_no_schedulers > 1)
+ goto error;
+ if (!is_small(BIF_ARG_2))
+ goto error;
+ sched = signed_val(BIF_ARG_2);
+ if (sched < 0 || erts_no_schedulers < sched)
+ goto error;
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ old = BIF_P->bound_runq;
+#ifdef ERTS_SMP
+ ASSERT(!old || old == BIF_P->run_queue);
+#endif
+ new = !sched ? NULL : erts_schedid2runq(sched);
+#ifndef ERTS_SMP
+ yield = 0;
+#else
+ if (new == old)
+ yield = 0;
+ else {
+ ErtsRunQueue *curr = BIF_P->run_queue;
+ if (!new)
+ erts_smp_runq_lock(curr);
+ else
+ erts_smp_runqs_lock(curr, new);
+ yield = new && BIF_P->run_queue != new;
+#endif
+ BIF_P->bound_runq = new;
+#ifdef ERTS_SMP
+ if (new)
+ BIF_P->run_queue = new;
+ if (!new)
+ erts_smp_runq_unlock(curr);
+ else
+ erts_smp_runqs_unlock(curr, new);
+ }
+#endif
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ old_value = old ? make_small(old->ix+1) : make_small(0);
+ if (yield)
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, old_value, am_scheduler);
+ else
+ BIF_RET(old_value);
+ }
+ else if (BIF_ARG_1 == am_min_heap_size) {
+ Sint i;
+ if (!is_small(BIF_ARG_2)) {
+ goto error;
+ }
+ i = signed_val(BIF_ARG_2);
+ if (i < 0) {
+ goto error;
+ }
+ old_value = make_small(BIF_P->min_heap_size);
+ if (i < H_MIN_SIZE) {
+ BIF_P->min_heap_size = H_MIN_SIZE;
+ } else {
+ BIF_P->min_heap_size = erts_next_heap_size(i, 0);
+ }
+ BIF_RET(old_value);
+ }
+ else if (BIF_ARG_1 == am_sensitive) {
+ Uint is_sensitive;
+ if (BIF_ARG_2 == am_true) {
+ is_sensitive = 1;
+ } else if (BIF_ARG_2 == am_false) {
+ is_sensitive = 0;
+ } else {
+ goto error;
+ }
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
+ old_value = BIF_P->trace_flags & F_SENSITIVE ? am_true : am_false;
+ if (is_sensitive) {
+ BIF_P->trace_flags |= F_SENSITIVE;
+ } else {
+ BIF_P->trace_flags &= ~F_SENSITIVE;
+ }
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
+ BIF_RET(old_value);
+ }
+ else if (BIF_ARG_1 == am_monitor_nodes) {
+ /*
+ * This argument is intentionally *not* documented. It is intended
+ * to be used by net_kernel:monitor_nodes/1.
+ */
+ old_value = erts_monitor_nodes(BIF_P, BIF_ARG_2, NIL);
+ if (old_value == THE_NON_VALUE)
+ goto error;
+ BIF_RET(old_value);
+ }
+ else if (is_tuple(BIF_ARG_1)) {
+ /*
+ * This argument is intentionally *not* documented. It is intended
+ * to be used by net_kernel:monitor_nodes/2.
+ */
+ Eterm *tp = tuple_val(BIF_ARG_1);
+ if (arityval(tp[0]) == 2) {
+ if (tp[1] == am_monitor_nodes) {
+ old_value = erts_monitor_nodes(BIF_P, BIF_ARG_2, tp[2]);
+ if (old_value == THE_NON_VALUE)
+ goto error;
+ BIF_RET(old_value);
+ }
+ }
+ /* Fall through and try process_flag_aux() ... */
+ }
+
+ BIF_RET(process_flag_aux(BIF_P, BIF_P, BIF_ARG_1, BIF_ARG_2));
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE process_flag_3(BIF_ALIST_3)
+{
+ Process *rp;
+ Eterm res;
+
+ if ((rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCK_MAIN)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3);
+
+ if (rp != BIF_P)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+
+ return res;
+}
+
+/**********************************************************************/
+
+/* register(atom, Process|Port) registers a global process or port
+ (for this node) */
+
+BIF_RETTYPE register_2(BIF_ALIST_2) /* (Atom, Pid|Port) */
+{
+ if (erts_register_name(BIF_P, BIF_ARG_1, BIF_ARG_2))
+ BIF_RET(am_true);
+ else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+
+/**********************************************************************/
+
+/* removes the registration of a process or port */
+
+BIF_RETTYPE unregister_1(BIF_ALIST_1)
+{
+ int res;
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ res = erts_unregister_name(BIF_P, ERTS_PROC_LOCK_MAIN, NULL, BIF_ARG_1);
+ if (res == 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(am_true);
+}
+
+/**********************************************************************/
+
+/* find out the pid of a registered process */
+/* this is a rather unsafe BIF as it allows users to do nasty things. */
+
+BIF_RETTYPE whereis_1(BIF_ALIST_1)
+{
+ Eterm res;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ res = erts_whereis_name_to_id(BIF_P, BIF_ARG_1);
+ BIF_RET(res);
+}
+
+/**********************************************************************/
+
+/*
+ * erlang:'!'/2
+ */
+
+Eterm
+ebif_bang_2(Process* p, Eterm To, Eterm Message)
+{
+ return send_2(p, To, Message);
+}
+
+
+/*
+ * Send a message to Process, Port or Registered Process.
+ * Returns non-negative reduction bump or negative result code.
+ */
+#define SEND_TRAP (-1)
+#define SEND_YIELD (-2)
+#define SEND_YIELD_RETURN (-3)
+#define SEND_BADARG (-4)
+#define SEND_USER_ERROR (-5)
+#define SEND_INTERNAL_ERROR (-6)
+
+Sint do_send(Process *p, Eterm to, Eterm msg, int suspend);
+
+static Sint remote_send(Process *p, DistEntry *dep,
+ Eterm to, Eterm full_to, Eterm msg, int suspend)
+{
+ Sint res;
+ int code;
+ ErtsDSigData dsd;
+
+ ASSERT(is_atom(to) || is_external_pid(to));
+
+ code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, !suspend);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ res = SEND_TRAP;
+ break;
+ case ERTS_DSIG_PREP_WOULD_SUSPEND:
+ ASSERT(!suspend);
+ res = SEND_YIELD;
+ break;
+ case ERTS_DSIG_PREP_CONNECTED: {
+
+ if (is_atom(to))
+ code = erts_dsig_send_reg_msg(&dsd, to, msg);
+ else
+ code = erts_dsig_send_msg(&dsd, to, msg);
+ /*
+ * Note that reductions have been bumped on calling
+ * process by erts_dsig_send_reg_msg() or
+ * erts_dsig_send_msg().
+ */
+ if (code == ERTS_DSIG_SEND_YIELD)
+ res = SEND_YIELD_RETURN;
+ else
+ res = 0;
+ break;
+ }
+ default:
+ ASSERT(! "Invalid dsig prepare result");
+ res = SEND_INTERNAL_ERROR;
+ }
+
+ if (res >= 0) {
+ if (IS_TRACED(p))
+ trace_send(p, full_to, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
+ }
+
+ return res;
+}
+
+Sint
+do_send(Process *p, Eterm to, Eterm msg, int suspend) {
+ Eterm portid;
+ Port *pt;
+ Process* rp;
+ DistEntry *dep;
+ Eterm* tp;
+
+ if (is_internal_pid(to)) {
+ if (IS_TRACED(p))
+ trace_send(p, to, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
+
+ if (internal_pid_index(to) >= erts_max_processes)
+ return SEND_BADARG;
+
+ rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN,
+ to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+
+ if (!rp) {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(p);
+ return 0;
+ }
+ } else if (is_external_pid(to)) {
+ dep = external_pid_dist_entry(to);
+ if(dep == erts_this_dist_entry) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "Discarding message %T from %T to %T in an old "
+ "incarnation (%d) of this node (%d)\n",
+ msg,
+ p->id,
+ to,
+ external_pid_creation(to),
+ erts_this_node->creation);
+ erts_send_error_to_logger(p->group_leader, dsbufp);
+ return 0;
+ }
+ return remote_send(p, dep, to, to, msg, suspend);
+ } else if (is_atom(to)) {
+
+ /* Need to virtual schedule out sending process
+ * because of lock wait. This is only necessary
+ * for internal port calling but the lock is bundled
+ * with name lookup.
+ */
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(p, am_out);
+ }
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(p, am_inactive);
+ }
+ erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
+ to,
+ &rp, 0, ERTS_P2P_FLG_SMP_INC_REFC,
+ &pt);
+
+ if (pt) {
+ portid = pt->id;
+ goto port_common;
+ }
+
+ /* Not a port virtually schedule the process back in */
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(p, am_in);
+ }
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(p, am_active);
+ }
+
+ if (IS_TRACED(p))
+ trace_send(p, to, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
+
+ if (!rp) {
+ return SEND_BADARG;
+ }
+ } else if (is_external_port(to)
+ && (external_port_dist_entry(to)
+ == erts_this_dist_entry)) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "Discarding message %T from %T to %T in an old "
+ "incarnation (%d) of this node (%d)\n",
+ msg,
+ p->id,
+ to,
+ external_port_creation(to),
+ erts_this_node->creation);
+ erts_send_error_to_logger(p->group_leader, dsbufp);
+ return 0;
+ } else if (is_internal_port(to)) {
+ portid = to;
+ /* schedule out calling process, waiting for lock*/
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(p, am_out);
+ }
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(p, am_inactive);
+ }
+ pt = erts_id2port(to, p, ERTS_PROC_LOCK_MAIN);
+ port_common:
+ ERTS_SMP_LC_ASSERT(!pt || erts_lc_is_port_locked(pt));
+
+ /* We have waited for locks, trace schedule ports */
+ if (pt && IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(pt, am_in, am_command);
+ }
+ if (pt && erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) {
+ profile_runnable_port(pt, am_active);
+ }
+
+ /* XXX let port_command handle the busy stuff !!! */
+ if (pt && (pt->status & ERTS_PORT_SFLG_PORT_BUSY)) {
+ if (suspend) {
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
+ if (erts_system_monitor_flags.busy_port) {
+ monitor_generic(p, am_busy_port, portid);
+ }
+ }
+ /* Virtually schedule out the port before releasing */
+ if (IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(pt, am_out, am_command);
+ }
+ if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) {
+ profile_runnable_port(pt, am_inactive);
+ }
+ erts_port_release(pt);
+ return SEND_YIELD;
+ }
+
+ if (IS_TRACED(p)) /* trace once only !! */
+ trace_send(p, portid, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
+
+ if (SEQ_TRACE_TOKEN(p) != NIL) {
+ seq_trace_update_send(p);
+ seq_trace_output(SEQ_TRACE_TOKEN(p), msg,
+ SEQ_TRACE_SEND, portid, p);
+ }
+
+ /* XXX NO GC in port command */
+ erts_port_command(p, p->id, pt, msg);
+ if (pt) {
+ /* Virtually schedule out the port before releasing */
+ if (IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(pt, am_out, am_command);
+ }
+ if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) {
+ profile_runnable_port(pt, am_inactive);
+ }
+ erts_port_release(pt);
+ }
+ /* Virtually schedule in process */
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(p, am_in);
+ }
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(p, am_active);
+ }
+ if (ERTS_PROC_IS_EXITING(p)) {
+ KILL_CATCHES(p); /* Must exit */
+ return SEND_USER_ERROR;
+ }
+ return 0;
+ } else if (is_tuple(to)) { /* Remote send */
+ int ret;
+ tp = tuple_val(to);
+ if (*tp != make_arityval(2))
+ return SEND_BADARG;
+ if (is_not_atom(tp[1]) || is_not_atom(tp[2]))
+ return SEND_BADARG;
+
+ /* sysname_to_connected_dist_entry will return NULL if there
+ is no dist_entry or the dist_entry has no port,
+ but remote_send() will handle that. */
+
+ dep = erts_sysname_to_connected_dist_entry(tp[2]);
+
+ if (dep == erts_this_dist_entry) {
+ erts_deref_dist_entry(dep);
+ if (IS_TRACED(p))
+ trace_send(p, to, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
+
+ /* Need to virtual schedule out sending process
+ * because of lock wait. This is only necessary
+ * for internal port calling but the lock is bundled.
+ */
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(p, am_out);
+ }
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(p, am_inactive);
+ }
+
+ erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
+ tp[1],
+ &rp, 0, ERTS_P2P_FLG_SMP_INC_REFC,
+ &pt);
+ if (pt) {
+ portid = pt->id;
+ goto port_common;
+ }
+ /* Port lookup failed, virtually schedule the process
+ * back in.
+ */
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(p, am_in);
+ }
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(p, am_active);
+ }
+
+ if (!rp) {
+ return 0;
+ }
+ goto send_message;
+ }
+
+ ret = remote_send(p, dep, tp[1], to, msg, suspend);
+ if (dep)
+ erts_deref_dist_entry(dep);
+ return ret;
+ } else {
+ if (IS_TRACED(p)) /* XXX Is this really neccessary ??? */
+ trace_send(p, to, msg);
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
+ save_calls(p, &exp_send);
+ return SEND_BADARG;
+ }
+
+ send_message: {
+ ErtsProcLocks rp_locks = 0;
+ Sint res;
+#ifdef ERTS_SMP
+ if (p == rp)
+ rp_locks |= ERTS_PROC_LOCK_MAIN;
+#endif
+ /* send to local process */
+ erts_send_message(p, rp, &rp_locks, msg, 0);
+ if (!erts_use_sender_punish)
+ res = 0;
+ else {
+#ifdef ERTS_SMP
+ res = rp->msg_inq.len*4;
+ if (ERTS_PROC_LOCK_MAIN & rp_locks)
+ res += rp->msg.len*4;
+#else
+ res = rp->msg.len*4;
+#endif
+ }
+ erts_smp_proc_unlock(rp,
+ p == rp
+ ? (rp_locks & ~ERTS_PROC_LOCK_MAIN)
+ : rp_locks);
+ erts_smp_proc_dec_refc(rp);
+ return res;
+ }
+}
+
+
+Eterm
+send_3(Process *p, Eterm to, Eterm msg, Eterm opts) {
+ int connect = !0;
+ int suspend = !0;
+ Eterm l = opts;
+ Sint result;
+
+ while (is_list(l)) {
+ if (CAR(list_val(l)) == am_noconnect) {
+ connect = 0;
+ } else if (CAR(list_val(l)) == am_nosuspend) {
+ suspend = 0;
+ } else {
+ BIF_ERROR(p, BADARG);
+ }
+ l = CDR(list_val(l));
+ }
+ if(!is_nil(l)) {
+ BIF_ERROR(p, BADARG);
+ }
+
+ result = do_send(p, to, msg, suspend);
+ if (result > 0) {
+ ERTS_VBUMP_REDS(p, result);
+ BIF_RET(am_ok);
+ } else switch (result) {
+ case 0:
+ BIF_RET(am_ok);
+ break;
+ case SEND_TRAP:
+ if (connect) {
+ BIF_TRAP3(dsend3_trap, p, to, msg, opts);
+ } else {
+ BIF_RET(am_noconnect);
+ }
+ break;
+ case SEND_YIELD:
+ if (suspend) {
+ ERTS_BIF_YIELD3(bif_export[BIF_send_3], p, to, msg, opts);
+ } else {
+ BIF_RET(am_nosuspend);
+ }
+ break;
+ case SEND_YIELD_RETURN:
+ if (suspend)
+ ERTS_BIF_YIELD_RETURN(p, am_ok);
+ else
+ BIF_RET(am_nosuspend);
+ case SEND_BADARG:
+ BIF_ERROR(p, BADARG);
+ break;
+ case SEND_USER_ERROR:
+ BIF_ERROR(p, EXC_ERROR);
+ break;
+ case SEND_INTERNAL_ERROR:
+ BIF_ERROR(p, EXC_INTERNAL_ERROR);
+ break;
+ default:
+ ASSERT(! "Illegal send result");
+ break;
+ }
+ ASSERT(! "Can not arrive here");
+ BIF_ERROR(p, BADARG);
+}
+
+Eterm
+send_2(Process *p, Eterm to, Eterm msg) {
+ Sint result = do_send(p, to, msg, !0);
+
+ if (result > 0) {
+ ERTS_VBUMP_REDS(p, result);
+ BIF_RET(msg);
+ } else switch (result) {
+ case 0:
+ BIF_RET(msg);
+ break;
+ case SEND_TRAP:
+ BIF_TRAP2(dsend2_trap, p, to, msg);
+ break;
+ case SEND_YIELD:
+ ERTS_BIF_YIELD2(bif_export[BIF_send_2], p, to, msg);
+ break;
+ case SEND_YIELD_RETURN:
+ ERTS_BIF_YIELD_RETURN(p, msg);
+ case SEND_BADARG:
+ BIF_ERROR(p, BADARG);
+ break;
+ case SEND_USER_ERROR:
+ BIF_ERROR(p, EXC_ERROR);
+ break;
+ case SEND_INTERNAL_ERROR:
+ BIF_ERROR(p, EXC_INTERNAL_ERROR);
+ break;
+ default:
+ ASSERT(! "Illegal send result");
+ break;
+ }
+ ASSERT(! "Can not arrive here");
+ BIF_ERROR(p, BADARG);
+}
+
+/**********************************************************************/
+/*
+ * apply/3 is implemented as an instruction and as erlang code in the
+ * erlang module.
+ *
+ * There is only one reason that apply/3 is included in the BIF table:
+ * The error handling code in the beam emulator passes the pointer to
+ * this function to the error handling code if the apply instruction
+ * fails. The error handling use the function pointer to lookup
+ * erlang:apply/3 in the BIF table.
+ *
+ * This function will never be called. (It could be if init did something
+ * like this: apply(erlang, apply, [M, F, A]). Not recommended.)
+ */
+
+BIF_RETTYPE apply_3(BIF_ALIST_3)
+{
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
+/**********************************************************************/
+
+/* integer to float */
+
+/**********************************************************************/
+
+/* returns the head of a list - this function is unecessary
+ and is only here to keep Robert happy (Even more, since it's OP as well) */
+BIF_RETTYPE hd_1(BIF_ALIST_1)
+{
+ if (is_not_list(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(CAR(list_val(BIF_ARG_1)));
+}
+
+/**********************************************************************/
+
+/* returns the tails of a list - same comment as above */
+
+BIF_RETTYPE tl_1(BIF_ALIST_1)
+{
+ if (is_not_list(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(CDR(list_val(BIF_ARG_1)));
+}
+
+
+/**********************************************************************/
+/* return the size of an I/O list */
+
+BIF_RETTYPE iolist_size_1(BIF_ALIST_1)
+{
+ Sint size = io_list_len(BIF_ARG_1);
+
+ if (size == -1) {
+ BIF_ERROR(BIF_P, BADARG);
+ } else if (IS_USMALL(0, (Uint) size)) {
+ BIF_RET(make_small(size));
+ } else {
+ Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(size, hp));
+ }
+}
+
+
+/**********************************************************************/
+
+/* return the N'th element of a tuple */
+
+BIF_RETTYPE element_2(BIF_ALIST_2)
+{
+ if (is_not_small(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (is_tuple(BIF_ARG_2)) {
+ Eterm* tuple_ptr = tuple_val(BIF_ARG_2);
+ Sint ix = signed_val(BIF_ARG_1);
+
+ if ((ix >= 1) && (ix <= arityval(*tuple_ptr)))
+ BIF_RET(tuple_ptr[ix]);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/**********************************************************************/
+
+/* return the arity of a tuple */
+
+BIF_RETTYPE tuple_size_1(BIF_ALIST_1)
+{
+ if (is_tuple(BIF_ARG_1)) {
+ return make_small(arityval(*tuple_val(BIF_ARG_1)));
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/**********************************************************************/
+
+/* set the n'th element in a tuple */
+
+BIF_RETTYPE setelement_3(BIF_ALIST_3)
+{
+ Eterm* ptr;
+ Eterm* hp;
+ Eterm* resp;
+ Uint ix;
+ Uint size;
+
+ if (is_not_small(BIF_ARG_1) || is_not_tuple(BIF_ARG_2)) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ ptr = tuple_val(BIF_ARG_2);
+ ix = signed_val(BIF_ARG_1);
+ size = arityval(*ptr) + 1; /* include arity */
+ if ((ix < 1) || (ix >= size)) {
+ goto error;
+ }
+
+ hp = HAlloc(BIF_P, size);
+
+ /* copy the tuple */
+ resp = hp;
+ while (size--) { /* XXX use memcpy? */
+ *hp++ = *ptr++;
+ }
+ resp[ix] = BIF_ARG_3;
+ BIF_RET(make_tuple(resp));
+}
+
+/**********************************************************************/
+
+BIF_RETTYPE make_tuple_2(BIF_ALIST_2)
+{
+ Sint n;
+ Eterm* hp;
+ Eterm res;
+
+ if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ hp = HAlloc(BIF_P, n+1);
+ res = make_tuple(hp);
+ *hp++ = make_arityval(n);
+ while (n--) {
+ *hp++ = BIF_ARG_2;
+ }
+ BIF_RET(res);
+}
+
+BIF_RETTYPE make_tuple_3(BIF_ALIST_3)
+{
+ Sint n;
+ Uint limit;
+ Eterm* hp;
+ Eterm res;
+ Eterm list = BIF_ARG_3;
+ Eterm* tup;
+
+ if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ limit = (Uint) n;
+ hp = HAlloc(BIF_P, n+1);
+ res = make_tuple(hp);
+ *hp++ = make_arityval(n);
+ tup = hp;
+ while (n--) {
+ *hp++ = BIF_ARG_2;
+ }
+ while(is_list(list)) {
+ Eterm* cons;
+ Eterm hd;
+ Eterm* tp;
+ Eterm index;
+ Uint index_val;
+
+ cons = list_val(list);
+ hd = CAR(cons);
+ list = CDR(cons);
+ if (is_not_tuple_arity(hd, 2)) {
+ goto error;
+ }
+ tp = tuple_val(hd);
+ if (is_not_small(index = tp[1])) {
+ goto error;
+ }
+ if ((index_val = unsigned_val(index) - 1) < limit) {
+ tup[index_val] = tp[2];
+ } else {
+ goto error;
+ }
+ }
+ if (is_not_nil(list)) {
+ goto error;
+ }
+ BIF_RET(res);
+}
+
+
+/**********************************************************************/
+
+BIF_RETTYPE append_element_2(BIF_ALIST_2)
+{
+ Eterm* ptr;
+ Eterm* hp;
+ Uint arity;
+ Eterm res;
+
+ if (is_not_tuple(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ ptr = tuple_val(BIF_ARG_1);
+ arity = arityval(*ptr);
+ hp = HAlloc(BIF_P, arity + 2);
+ res = make_tuple(hp);
+ *hp = make_arityval(arity+1);
+ while (arity--) {
+ *++hp = *++ptr;
+ }
+ *++hp = BIF_ARG_2;
+ BIF_RET(res);
+}
+
+/**********************************************************************/
+
+/* convert an atom to a list of ascii integer */
+
+BIF_RETTYPE atom_to_list_1(BIF_ALIST_1)
+{
+ Uint need;
+ Eterm* hp;
+ Atom* ap;
+
+ if (is_not_atom(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ /* read data from atom table */
+ ap = atom_tab(atom_val(BIF_ARG_1));
+ if (ap->len == 0)
+ BIF_RET(NIL); /* the empty atom */
+ need = ap->len*2;
+ hp = HAlloc(BIF_P, need);
+ BIF_RET(buf_to_intlist(&hp,(char*)ap->name,ap->len, NIL));
+}
+
+/**********************************************************************/
+
+/* convert a list of ascii integers to an atom */
+
+BIF_RETTYPE list_to_atom_1(BIF_ALIST_1)
+{
+ Eterm res;
+ char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_LENGTH);
+ int i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_LENGTH);
+
+ if (i < 0) {
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ i = list_length(BIF_ARG_1);
+ if (i > MAX_ATOM_LENGTH) {
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ res = am_atom_put(buf, i);
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_RET(res);
+}
+
+/* conditionally convert a list of ascii integers to an atom */
+
+BIF_RETTYPE list_to_existing_atom_1(BIF_ALIST_1)
+{
+ int i;
+ char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, MAX_ATOM_LENGTH);
+
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, MAX_ATOM_LENGTH)) < 0) {
+ error:
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_ERROR(BIF_P, BADARG);
+ } else {
+ Eterm a;
+
+ if (erts_atom_get(buf, i, &a)) {
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_RET(a);
+ } else {
+ goto error;
+ }
+ }
+}
+
+
+/**********************************************************************/
+
+/* convert an integer to a list of ascii integers */
+
+BIF_RETTYPE integer_to_list_1(BIF_ALIST_1)
+{
+ Eterm* hp;
+ Uint need;
+
+ if (is_not_integer(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (is_small(BIF_ARG_1)) {
+ char *c;
+ int n;
+ struct Sint_buf ibuf;
+
+ c = Sint_to_buf(signed_val(BIF_ARG_1), &ibuf);
+ n = sys_strlen(c);
+ need = 2*n;
+ hp = HAlloc(BIF_P, need);
+ BIF_RET(buf_to_intlist(&hp, c, n, NIL));
+ }
+ else {
+ int n = big_decimal_estimate(BIF_ARG_1);
+ Eterm res;
+ Eterm* hp_end;
+
+ need = 2*n;
+ hp = HAlloc(BIF_P, need);
+ hp_end = hp + need;
+ res = erts_big_to_list(BIF_ARG_1, &hp);
+ HRelease(BIF_P,hp_end,hp);
+ BIF_RET(res);
+ }
+}
+
+/**********************************************************************/
+
+/* convert a list of ascii ascii integer value to an integer */
+
+
+#define LTI_BAD_STRUCTURE 0
+#define LTI_NO_INTEGER 1
+#define LTI_SOME_INTEGER 2
+#define LTI_ALL_INTEGER 3
+
+static int do_list_to_integer(Process *p, Eterm orig_list,
+ Eterm *integer, Eterm *rest)
+{
+ Sint i = 0;
+ int skip = 0;
+ int neg = 0;
+ int n = 0;
+ int m;
+ int lg2;
+ Eterm res;
+ Eterm* hp;
+ Eterm *hp_end;
+ Eterm lst = orig_list;
+ Eterm tail = lst;
+ int error_res = LTI_BAD_STRUCTURE;
+
+ if (is_nil(lst)) {
+ error_res = LTI_NO_INTEGER;
+ error:
+ *rest = tail;
+ *integer = make_small(0);
+ return error_res;
+ }
+ if (is_not_list(lst))
+ goto error;
+
+ /* if first char is a '-' then it is a negative integer */
+ if (CAR(list_val(lst)) == make_small('-')) {
+ neg = 1;
+ skip = 1;
+ lst = CDR(list_val(lst));
+ if (is_not_list(lst)) {
+ tail = lst;
+ error_res = LTI_NO_INTEGER;
+ goto error;
+ }
+ } else if (CAR(list_val(lst)) == make_small('+')) {
+ /* ignore plus */
+ skip = 1;
+ lst = CDR(list_val(lst));
+ if (is_not_list(lst)) {
+ tail = lst;
+ error_res = LTI_NO_INTEGER;
+ goto error;
+ }
+ }
+
+ /* Calculate size and do type check */
+
+ while(1) {
+ if (is_not_small(CAR(list_val(lst)))) {
+ break;
+ }
+ if (unsigned_val(CAR(list_val(lst))) < '0' ||
+ unsigned_val(CAR(list_val(lst))) > '9') {
+ break;
+ }
+ i = i * 10;
+ i = i + unsigned_val(CAR(list_val(lst))) - '0';
+ n++;
+ lst = CDR(list_val(lst));
+ if (is_nil(lst)) {
+ break;
+ }
+ if (is_not_list(lst)) {
+ break;
+ }
+ }
+
+ tail = lst;
+ if (!n) {
+ error_res = LTI_NO_INTEGER;
+ goto error;
+ }
+
+
+ /* If n <= 8 then we know it's a small int
+ ** since 2^27 = 134217728. If n > 8 then we must
+ ** construct a bignum and let that routine do the checking
+ */
+
+ if (n <= SMALL_DIGITS) { /* It must be small */
+ if (neg) i = -i;
+ res = make_small(i);
+ } else {
+ lg2 = (n+1)*230/69+1;
+ m = (lg2+D_EXP-1)/D_EXP; /* number of digits */
+ m = BIG_NEED_SIZE(m); /* number of words + thing */
+
+ hp = HAlloc(p, m);
+ hp_end = hp + m;
+
+ lst = orig_list;
+ if (skip)
+ lst = CDR(list_val(lst));
+
+ /* load first digits (at least one digit) */
+ if ((i = (n % D_DECIMAL_EXP)) == 0)
+ i = D_DECIMAL_EXP;
+ n -= i;
+ m = 0;
+ while(i--) {
+ m = 10*m + (unsigned_val(CAR(list_val(lst))) - '0');
+ lst = CDR(list_val(lst));
+ }
+ res = small_to_big(m, hp); /* load first digits */
+
+ while(n) {
+ i = D_DECIMAL_EXP;
+ n -= D_DECIMAL_EXP;
+ m = 0;
+ while(i--) {
+ m = 10*m + (unsigned_val(CAR(list_val(lst))) - '0');
+ lst = CDR(list_val(lst));
+ }
+ if (is_small(res))
+ res = small_to_big(signed_val(res), hp);
+ res = big_times_small(res, D_DECIMAL_BASE, hp);
+ if (is_small(res))
+ res = small_to_big(signed_val(res), hp);
+ res = big_plus_small(res, m, hp);
+ }
+
+ if (is_big(res)) /* check if small */
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (neg) {
+ if (is_small(res))
+ res = make_small(-signed_val(res));
+ else {
+ Uint *big = big_val(res); /* point to thing */
+ *big = bignum_header_neg(*big);
+ }
+ }
+
+ if (is_big(res)) {
+ hp += (big_arity(res)+1);
+ }
+ HRelease(p,hp_end,hp);
+ }
+ *integer = res;
+ *rest = tail;
+ if (tail != NIL) {
+ return LTI_SOME_INTEGER;
+ }
+ return LTI_ALL_INTEGER;
+}
+BIF_RETTYPE string_to_integer_1(BIF_ALIST_1)
+{
+ Eterm res;
+ Eterm tail;
+ Eterm *hp;
+ /* must be a list */
+ switch (do_list_to_integer(BIF_P,BIF_ARG_1,&res,&tail)) {
+ /* HAlloc after do_list_to_integer as it
+ might HAlloc itself (bignum) */
+ case LTI_BAD_STRUCTURE:
+ hp = HAlloc(BIF_P,3);
+ BIF_RET(TUPLE2(hp, am_error, am_not_a_list));
+ case LTI_NO_INTEGER:
+ hp = HAlloc(BIF_P,3);
+ BIF_RET(TUPLE2(hp, am_error, am_no_integer));
+ default:
+ hp = HAlloc(BIF_P,3);
+ BIF_RET(TUPLE2(hp, res, tail));
+ }
+}
+
+
+BIF_RETTYPE list_to_integer_1(BIF_ALIST_1)
+{
+ Eterm res;
+ Eterm dummy;
+ /* must be a list */
+
+ if (do_list_to_integer(BIF_P,BIF_ARG_1,&res,&dummy) != LTI_ALL_INTEGER) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ BIF_RET(res);
+ }
+
+/**********************************************************************/
+
+/* convert a float to a list of ascii characters */
+
+BIF_RETTYPE float_to_list_1(BIF_ALIST_1)
+{
+ int i;
+ Uint need;
+ Eterm* hp;
+ FloatDef f;
+ char fbuf[30];
+
+ /* check the arguments */
+ if (is_not_float(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ GET_DOUBLE(BIF_ARG_1, f);
+ if ((i = sys_double_to_chars(f.fd, fbuf)) <= 0)
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ need = i*2;
+ hp = HAlloc(BIF_P, need);
+ BIF_RET(buf_to_intlist(&hp, fbuf, i, NIL));
+ }
+
+/**********************************************************************/
+
+/* convert a list of ascii integer values e's +'s and -'s to a float */
+
+
+#define SIGN 0
+#define INT 1
+#define FRAC 2
+#define EXP_SIGN 3
+#define EXP0 4
+#define EXP1 5
+#define END 6
+
+#define IS_DOT(x) (unsigned_val((x)) == '.' || unsigned_val((x)) == ',')
+#define IS_E(x) (unsigned_val((x)) == 'e' || unsigned_val((x)) == 'E')
+#define IS_DIGIT(x) (unsigned_val((x)) >= '0' && unsigned_val((x)) <= '9')
+#define SAVE_E(xi,xim,xl,xlm) ((xim)=(xi), (xlm)=(xl))
+#define LOAD_E(xi,xim,xl,xlm) ((xi)=(xim), (xl)=(xlm))
+
+#define STRING_TO_FLOAT_BUF_INC_SZ (128)
+BIF_RETTYPE string_to_float_1(BIF_ALIST_1)
+{
+ Eterm orig = BIF_ARG_1;
+ Eterm list = orig;
+ Eterm list_mem = list;
+ int i = 0;
+ int i_mem = 0;
+ Eterm* hp;
+ Eterm error_res = NIL;
+ int part = SIGN; /* expect a + or - (or a digit) first */
+ FloatDef f;
+ Eterm tup;
+ byte *buf = NULL;
+ Uint bufsz = STRING_TO_FLOAT_BUF_INC_SZ;
+
+ /* check it's a valid list to start with */
+ if (is_nil(list)) {
+ error_res = am_no_float;
+ error:
+ if (buf)
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ hp = HAlloc(BIF_P, 3);
+ BIF_RET(TUPLE2(hp, am_error, error_res));
+ }
+ if (is_not_list(list)) {
+ error_res = am_not_a_list;
+ goto error;
+ }
+
+ buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, bufsz);
+
+ /*
+ The float might start with a SIGN (+ | -). It must contain an integer
+ part, INT, followed by a delimiter (. | ,) and a fractional, FRAC,
+ part. The float might also contain an exponent. If e or E indicates
+ this we will look for a possible EXP_SIGN (+ | -) followed by the
+ exponential number, EXP. (EXP0 is the first digit and EXP1 the rest).
+
+ When we encounter an expected e or E, we can't tell if it's part of
+ the float or the rest of the string. We save the current position
+ with SAVE_E. If we later find out it was not part of the float, we
+ restore the position (end of the float) with LOAD_E.
+ */
+ while(1) {
+ if (is_not_small(CAR(list_val(list))))
+ goto back_to_e;
+ if (CAR(list_val(list)) == make_small('-')) {
+ switch (part) {
+ case SIGN: /* expect integer part next */
+ part = INT;
+ break;
+ case EXP_SIGN: /* expect first digit in exp */
+ part = EXP0;
+ break;
+ case EXP0: /* example: "2.3e--" */
+ LOAD_E(i, i_mem, list, list_mem);
+ default: /* unexpected - done */
+ part = END;
+ }
+ } else if (CAR(list_val(list)) == make_small('+')) {
+ switch (part) {
+ case SIGN: /* expect integer part next */
+ part = INT;
+ goto skip;
+ case EXP_SIGN: /* expect first digit in exp */
+ part = EXP0;
+ break;
+ case EXP0: /* example: "2.3e++" */
+ LOAD_E(i, i_mem, list, list_mem);
+ default: /* unexpected - done */
+ part = END;
+ }
+ } else if (IS_DOT(CAR(list_val(list)))) { /* . or , */
+ switch (part) {
+ case INT: /* expect fractional part next */
+ part = FRAC;
+ break;
+ case EXP_SIGN: /* example: "2.3e." */
+ LOAD_E(i, i_mem, list, list_mem);
+ case EXP0: /* example: "2.3e+." */
+ LOAD_E(i, i_mem, list, list_mem);
+ default: /* unexpected - done */
+ part = END;
+ }
+ } else if (IS_E(CAR(list_val(list)))) { /* e or E */
+ switch (part) {
+ case FRAC: /* expect a + or - (or a digit) next */
+ /*
+ remember the position of e in case we find out later
+ that it was not part of the float, e.g. "2.3eh?"
+ */
+ SAVE_E(i, i_mem, list, list_mem);
+ part = EXP_SIGN;
+ break;
+ case EXP0: /* example: "2.3e+e" */
+ case EXP_SIGN: /* example: "2.3ee" */
+ LOAD_E(i, i_mem, list, list_mem);
+ case INT: /* would like this to be ok, example "2e2",
+ but it's not compatible with list_to_float */
+ default: /* unexpected - done */
+ part = END;
+ }
+ } else if (IS_DIGIT(CAR(list_val(list)))) { /* digit */
+ switch (part) {
+ case SIGN: /* got initial digit in integer part */
+ part = INT; /* expect more digits to follow */
+ break;
+ case EXP_SIGN: /* expect exponential part */
+ case EXP0: /* expect rest of exponential */
+ part = EXP1;
+ break;
+ }
+ } else /* character not part of float - done */
+ goto back_to_e;
+
+ if (part == END) {
+ if (i < 3) { /* we require a fractional part */
+ error_res = am_no_float;
+ goto error;
+ }
+ break;
+ }
+
+ buf[i++] = unsigned_val(CAR(list_val(list)));
+
+ if (i == bufsz - 1)
+ buf = (byte *) erts_realloc(ERTS_ALC_T_TMP,
+ (void *) buf,
+ bufsz += STRING_TO_FLOAT_BUF_INC_SZ);
+ skip:
+ list = CDR(list_val(list)); /* next element */
+
+ if (is_nil(list))
+ goto back_to_e;
+
+ if (is_not_list(list)) {
+ back_to_e:
+ if (part == EXP_SIGN || part == EXP0) {
+ LOAD_E(i, i_mem, list, list_mem);
+ }
+ break;
+ }
+ }
+
+ if (i == 0) { /* no float first in list */
+ error_res = am_no_float;
+ goto error;
+ }
+
+ buf[i] = '\0'; /* null terminal */
+ ASSERT(bufsz >= i + 1);
+ if (sys_chars_to_double((char*) buf, &f.fd) != 0) {
+ error_res = am_no_float;
+ goto error;
+ }
+ hp = HAlloc(BIF_P, FLOAT_SIZE_OBJECT + 3);
+ tup = TUPLE2(hp+FLOAT_SIZE_OBJECT, make_float(hp), list);
+ PUT_DOUBLE(f, hp);
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_RET(tup);
+}
+
+
+BIF_RETTYPE list_to_float_1(BIF_ALIST_1)
+{
+ int i;
+ FloatDef f;
+ Eterm res;
+ Eterm* hp;
+ char *buf = NULL;
+
+ i = list_length(BIF_ARG_1);
+ if (i < 0) {
+ badarg:
+ if (buf)
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1);
+
+ if (intlist_to_buf(BIF_ARG_1, buf, i) < 0)
+ goto badarg;
+ buf[i] = '\0'; /* null terminal */
+
+ if (sys_chars_to_double(buf, &f.fd) != 0)
+ goto badarg;
+ hp = HAlloc(BIF_P, FLOAT_SIZE_OBJECT);
+ res = make_float(hp);
+ PUT_DOUBLE(f, hp);
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_RET(res);
+}
+
+/**********************************************************************/
+
+/* convert a tuple to a list */
+
+BIF_RETTYPE tuple_to_list_1(BIF_ALIST_1)
+{
+ Uint n;
+ Eterm *tupleptr;
+ Eterm list = NIL;
+ Eterm* hp;
+
+ if (is_not_tuple(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ tupleptr = tuple_val(BIF_ARG_1);
+ n = arityval(*tupleptr);
+ hp = HAlloc(BIF_P, 2 * n);
+ tupleptr++;
+
+ while(n--) {
+ list = CONS(hp, tupleptr[n], list);
+ hp += 2;
+ }
+ BIF_RET(list);
+}
+
+/**********************************************************************/
+
+/* convert a list to a tuple */
+
+BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1)
+{
+ Eterm list = BIF_ARG_1;
+ Eterm* cons;
+ Eterm res;
+ Eterm* hp;
+ int len;
+
+ if ((len = list_length(list)) < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ hp = HAlloc(BIF_P, len+1);
+ res = make_tuple(hp);
+ *hp++ = make_arityval(len);
+ while(is_list(list)) {
+ cons = list_val(list);
+ *hp++ = CAR(cons);
+ list = CDR(cons);
+ }
+ BIF_RET(res);
+}
+
+/**********************************************************************/
+
+/* return the pid of our own process, in most cases this has been replaced by
+ a machine instruction */
+
+BIF_RETTYPE self_0(BIF_ALIST_0)
+{
+ BIF_RET(BIF_P->id);
+}
+
+/**********************************************************************/
+
+/*
+ New representation of refs in R9, see erl_term.h
+
+ In the first data word, only the usual 18 bits are used. Ordinarily,
+ in "long refs" all words are used (in other words, practically never
+ wrap around), but for compatibility with older nodes, "short refs"
+ exist. Short refs come into being by being converted from the old
+ external format for refs (tag REFERENCE_EXT). Short refs are
+ converted back to the old external format.
+
+ When converting a long ref to the external format in the case of
+ preparing for sending to an older node, the ref is truncated by only
+ using the first word (with 18 significant bits), and using the old tag
+ REFERENCE_EXT.
+
+ When comparing refs or different size, only the parts up to the length
+ of the shorter operand are used. This has the desirable effect that a
+ long ref sent to an old node and back will be treated as equal to
+ the original, although some of the bits have been lost.
+
+ The hash value for a ref always considers only the first word, since
+ in the above scenario, the original and the copy should have the same
+ hash value.
+*/
+
+static Uint32 reference0; /* Initialized in erts_init_bif */
+static Uint32 reference1;
+static Uint32 reference2;
+static erts_smp_spinlock_t make_ref_lock;
+static erts_smp_mtx_t ports_snapshot_mtx;
+erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
+
+Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
+{
+ Eterm* hp = buffer;
+ Uint32 ref0, ref1, ref2;
+
+ erts_smp_spin_lock(&make_ref_lock);
+
+ reference0++;
+ if (reference0 >= MAX_REFERENCE) {
+ reference0 = 0;
+ reference1++;
+ if (reference1 == 0) {
+ reference2++;
+ }
+ }
+
+ ref0 = reference0;
+ ref1 = reference1;
+ ref2 = reference2;
+
+ erts_smp_spin_unlock(&make_ref_lock);
+
+ write_ref_thing(hp, ref0, ref1, ref2);
+ return make_internal_ref(hp);
+}
+
+Eterm erts_make_ref(Process *p)
+{
+ Eterm* hp;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+
+ hp = HAlloc(p, REF_THING_SIZE);
+ return erts_make_ref_in_buffer(hp);
+}
+
+BIF_RETTYPE make_ref_0(BIF_ALIST_0)
+{
+ return erts_make_ref(BIF_P);
+}
+
+/**********************************************************************/
+
+/* return the time of day */
+
+BIF_RETTYPE time_0(BIF_ALIST_0)
+{
+ int hour, minute, second;
+ Eterm* hp;
+
+ get_time(&hour, &minute, &second);
+ hp = HAlloc(BIF_P, 4); /* {hour, minute, second} + arity */
+ BIF_RET(TUPLE3(hp, make_small(hour), make_small(minute),
+ make_small(second)));
+}
+/**********************************************************************/
+
+/* return the date */
+
+BIF_RETTYPE date_0(BIF_ALIST_0)
+{
+ int year, month, day;
+ Eterm* hp;
+
+ get_date(&year, &month, &day);
+ hp = HAlloc(BIF_P, 4); /* {year, month, day} + arity */
+ BIF_RET(TUPLE3(hp, make_small(year), make_small(month), make_small(day)));
+}
+
+/**********************************************************************/
+
+/* return the universal time */
+
+BIF_RETTYPE universaltime_0(BIF_ALIST_0)
+{
+ int year, month, day;
+ int hour, minute, second;
+ Eterm res1, res2;
+ Eterm* hp;
+
+ /* read the clock */
+ get_universaltime(&year, &month, &day, &hour, &minute, &second);
+
+ hp = HAlloc(BIF_P, 4+4+3);
+
+ /* and return the tuple */
+ res1 = TUPLE3(hp,make_small(year),make_small(month),make_small(day));
+ hp += 4;
+ res2 = TUPLE3(hp,make_small(hour),make_small(minute),make_small(second));
+ hp += 4;
+ BIF_RET(TUPLE2(hp, res1, res2));
+ }
+
+/**********************************************************************/
+
+/* return the universal time */
+
+BIF_RETTYPE localtime_0(BIF_ALIST_0)
+{
+ int year, month, day;
+ int hour, minute, second;
+ Eterm res1, res2;
+ Eterm* hp;
+
+ /* read the clock */
+ get_localtime(&year, &month, &day, &hour, &minute, &second);
+
+ hp = HAlloc(BIF_P, 4+4+3);
+
+ /* and return the tuple */
+ res1 = TUPLE3(hp,make_small(year),make_small(month),make_small(day));
+ hp += 4;
+ res2 = TUPLE3(hp,make_small(hour),make_small(minute),make_small(second));
+ hp += 4;
+ BIF_RET(TUPLE2(hp, res1, res2));
+}
+/**********************************************************************/
+
+/* type check and extract components from a tuple on form: {{Y,M,D},{H,M,S}} */
+static int
+time_to_parts(Eterm date, Sint* year, Sint* month, Sint* day,
+ Sint* hour, Sint* minute, Sint* second)
+{
+ Eterm* t1;
+ Eterm* t2;
+
+ if (is_not_tuple(date)) {
+ return 0;
+ }
+ t1 = tuple_val(date);
+ if (arityval(t1[0]) !=2 ||
+ is_not_tuple(t1[1]) || is_not_tuple(t1[2]))
+ return 0;
+ t2 = tuple_val(t1[1]);
+ t1 = tuple_val(t1[2]);
+ if (arityval(t2[0]) != 3 ||
+ is_not_small(t2[1]) || is_not_small(t2[2]) || is_not_small(t2[3]))
+ return 0;
+ *year = signed_val(t2[1]);
+ *month = signed_val(t2[2]);
+ *day = signed_val(t2[3]);
+ if (arityval(t1[0]) != 3 ||
+ is_not_small(t1[1]) || is_not_small(t1[2]) || is_not_small(t1[3]))
+ return 0;
+ *hour = signed_val(t1[1]);
+ *minute = signed_val(t1[2]);
+ *second = signed_val(t1[3]);
+ return 1;
+}
+
+
+/* return the universal time */
+
+BIF_RETTYPE
+localtime_to_universaltime_2(Process *p, Eterm localtime, Eterm dst)
+{
+ Sint year, month, day;
+ Sint hour, minute, second;
+ int isdst;
+ Eterm res1, res2;
+ Eterm* hp;
+
+ if (dst == am_true) isdst = 1;
+ else if (dst == am_false) isdst = 0;
+ else if (dst == am_undefined) isdst = -1;
+ else goto error;
+
+ if (!time_to_parts(localtime, &year, &month, &day,
+ &hour, &minute, &second)) goto error;
+ if (!local_to_univ(&year, &month, &day,
+ &hour, &minute, &second, isdst)) goto error;
+
+ hp = HAlloc(p, 4+4+3);
+ res1 = TUPLE3(hp,make_small(year),make_small(month),
+ make_small(day));
+ hp += 4;
+ res2 = TUPLE3(hp,make_small(hour),make_small(minute),
+ make_small(second));
+ hp += 4;
+ BIF_RET(TUPLE2(hp, res1, res2));
+ error:
+ BIF_ERROR(p, BADARG);
+ }
+
+
+/**********************************************************************/
+
+/* return the universal time */
+
+BIF_RETTYPE universaltime_to_localtime_1(BIF_ALIST_1)
+{
+ Sint year, month, day;
+ Sint hour, minute, second;
+ Eterm res1, res2;
+ Eterm* hp;
+
+ if (!time_to_parts(BIF_ARG_1, &year, &month, &day,
+ &hour, &minute, &second))
+ BIF_ERROR(BIF_P, BADARG);
+ if (!univ_to_local(&year, &month, &day,
+ &hour, &minute, &second))
+ BIF_ERROR(BIF_P, BADARG);
+
+ hp = HAlloc(BIF_P, 4+4+3);
+ res1 = TUPLE3(hp,make_small(year),make_small(month),
+ make_small(day));
+ hp += 4;
+ res2 = TUPLE3(hp,make_small(hour),make_small(minute),
+ make_small(second));
+ hp += 4;
+ BIF_RET(TUPLE2(hp, res1, res2));
+}
+
+/**********************************************************************/
+
+
+ /* return a timestamp */
+BIF_RETTYPE now_0(BIF_ALIST_0)
+{
+ Uint megasec, sec, microsec;
+ Eterm* hp;
+
+ get_now(&megasec, &sec, &microsec);
+ hp = HAlloc(BIF_P, 4);
+ BIF_RET(TUPLE3(hp, make_small(megasec), make_small(sec),
+ make_small(microsec)));
+}
+
+/**********************************************************************/
+
+BIF_RETTYPE garbage_collect_1(BIF_ALIST_1)
+{
+ int reds;
+ Process *rp;
+
+ if (is_not_pid(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
+ if (!rp)
+ BIF_RET(am_false);
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD1(bif_export[BIF_garbage_collect_1], BIF_P, BIF_ARG_1);
+
+ /* The GC cost is taken for the process executing this BIF. */
+
+ FLAGS(rp) |= F_NEED_FULLSWEEP;
+ reds = erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+
+ if (BIF_P != rp)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+
+ BIF_RET2(am_true, reds);
+}
+
+BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
+{
+ int reds;
+
+ FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
+ reds = erts_garbage_collect(BIF_P, 0, NULL, 0);
+ BIF_RET2(am_true, reds);
+}
+
+/**********************************************************************/
+/* Perform garbage collection of the message area */
+
+BIF_RETTYPE garbage_collect_message_area_0(BIF_ALIST_0)
+{
+#if defined(HYBRID) && !defined(INCREMENTAL)
+ int reds = 0;
+
+ FLAGS(BIF_P) |= F_NEED_FULLSWEEP;
+ reds = erts_global_garbage_collect(BIF_P, 0, NULL, 0);
+ BIF_RET2(am_true, reds);
+#else
+ BIF_RET(am_false);
+#endif
+}
+
+/**********************************************************************/
+/* Return a list of active ports */
+
+BIF_RETTYPE ports_0(BIF_ALIST_0)
+{
+ Eterm res = NIL;
+ Eterm* port_buf = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(Eterm)*erts_max_ports);
+ Eterm* pp = port_buf;
+ Eterm* dead_ports;
+ int alive, dead;
+ Uint32 next_ss;
+
+ /* To get a consistent snapshot...
+ * We add alive ports from start of the buffer
+ * while dying ports are added from the other end by the killing threads.
+ */
+
+ erts_smp_mtx_lock(&ports_snapshot_mtx); /* One snapshot at a time */
+
+ erts_smp_atomic_set(&erts_dead_ports_ptr, (long) (port_buf + erts_max_ports));
+
+ next_ss = erts_smp_atomic_inctest(&erts_ports_snapshot);
+
+ if (erts_smp_atomic_read(&erts_ports_alive) > 0) {
+ long i;
+ for (i = erts_max_ports-1; i >= 0; i--) {
+ Port* prt = &erts_port[i];
+ erts_smp_port_state_lock(prt);
+ if (!(prt->status & ERTS_PORT_SFLGS_DEAD)
+ && prt->snapshot != next_ss) {
+ ASSERT(prt->snapshot == next_ss - 1);
+ *pp++ = prt->id;
+ prt->snapshot = next_ss; /* Consumed by this snapshot */
+ }
+ erts_smp_port_state_unlock(prt);
+ }
+ }
+
+ dead_ports = (Eterm*)erts_smp_atomic_xchg(&erts_dead_ports_ptr,
+ (long)NULL);
+ erts_smp_mtx_unlock(&ports_snapshot_mtx);
+
+ ASSERT(pp <= dead_ports);
+
+ alive = pp - port_buf;
+ dead = port_buf + erts_max_ports - dead_ports;
+
+ ASSERT((alive+dead) <= erts_max_ports);
+
+ if (alive+dead > 0) {
+ long i;
+ Eterm *hp = HAlloc(BIF_P, (alive+dead)*2);
+
+ for (i = 0; i < alive; i++) {
+ res = CONS(hp, port_buf[i], res);
+ hp += 2;
+ }
+ for (i = 0; i < dead; i++) {
+ res = CONS(hp, dead_ports[i], res);
+ hp += 2;
+ }
+ }
+
+ erts_free(ERTS_ALC_T_TMP, port_buf);
+
+ BIF_RET(res);
+}
+
+/**********************************************************************/
+
+BIF_RETTYPE throw_1(BIF_ALIST_1)
+{
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, EXC_THROWN);
+}
+
+/**********************************************************************/
+
+
+/*
+ * Non-standard, undocumented, dirty BIF, meant for debugging.
+ *
+ */
+BIF_RETTYPE display_1(BIF_ALIST_1)
+{
+ erts_printf("%.*T\n", INT_MAX, BIF_ARG_1);
+ BIF_RET(am_true);
+}
+
+/*
+ * erts_debug:display/1 is for debugging erlang:display/1
+ */
+BIF_RETTYPE erts_debug_display_1(BIF_ALIST_1)
+{
+ int pres;
+ Eterm res;
+ Eterm *hp;
+ erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(64);
+ pres = erts_dsprintf(dsbufp, "%.*T\n", INT_MAX, BIF_ARG_1);
+ if (pres < 0)
+ erl_exit(1, "Failed to convert term to string: %d (s)\n",
+ -pres, erl_errno_id(-pres));
+ hp = HAlloc(BIF_P, 2*dsbufp->str_len); /* we need length * 2 heap words */
+ res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
+ erts_printf("%s", dsbufp->str);
+ erts_destroy_tmp_dsbuf(dsbufp);
+ BIF_RET(res);
+}
+
+
+Eterm
+display_string_1(Process* p, Eterm string)
+{
+ int len = is_string(string);
+ char *str;
+
+ if (len <= 0) {
+ BIF_ERROR(p, BADARG);
+ }
+ str = (char *) erts_alloc(ERTS_ALC_T_TMP, sizeof(char)*(len + 1));
+ if (intlist_to_buf(string, str, len) != len)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ str[len] = '\0';
+ erts_fprintf(stderr, "%s", str);
+ erts_free(ERTS_ALC_T_TMP, (void *) str);
+ BIF_RET(am_true);
+}
+
+Eterm
+display_nl_0(Process* p)
+{
+ erts_fprintf(stderr, "\n");
+ BIF_RET(am_true);
+}
+
+/**********************************************************************/
+
+/* stop the system */
+/* ARGSUSED */
+BIF_RETTYPE halt_0(BIF_ALIST_0)
+{
+ VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt/0\n"));
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erl_exit(0, "");
+ return NIL; /* Pedantic (lint does not know about erl_exit) */
+}
+
+/**********************************************************************/
+
+#define MSG_SIZE 200
+
+/* stop the system with exit code */
+/* ARGSUSED */
+BIF_RETTYPE halt_1(BIF_ALIST_1)
+{
+ Sint code;
+ static char msg[MSG_SIZE];
+ int i;
+
+ if (is_small(BIF_ARG_1) && (code = signed_val(BIF_ARG_1)) >= 0) {
+ VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%d)\n", code));
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erl_exit(-code, "");
+ } else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
+ if ((i = intlist_to_buf(BIF_ARG_1, msg, MSG_SIZE-1)) < 0) {
+ goto error;
+ }
+ msg[i] = '\0';
+ VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%s)\n", msg));
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erl_exit(ERTS_DUMP_EXIT, "%s\n", msg);
+ } else {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return NIL; /* Pedantic (lint does not know about erl_exit) */
+}
+
+BIF_RETTYPE function_exported_3(BIF_ALIST_3)
+{
+ if (is_not_atom(BIF_ARG_1) ||
+ is_not_atom(BIF_ARG_2) ||
+ is_not_small(BIF_ARG_3)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3)) == NULL) {
+ BIF_RET(am_false);
+ }
+ BIF_RET(am_true);
+}
+
+/**********************************************************************/
+
+BIF_RETTYPE is_builtin_3(Process* p, Eterm Mod, Eterm Name, Eterm Arity)
+{
+ if (is_not_atom(Mod) || is_not_atom(Name) || is_not_small(Arity)) {
+ BIF_ERROR(p, BADARG);
+ }
+ BIF_RET(erts_is_builtin(Mod, Name, signed_val(Arity)) ?
+ am_true : am_false);
+}
+
+/**********************************************************************/
+
+/* NOTE: Cannot be used in all *_to_list() bifs. erts_dsprintf() prints
+ * some terms on other formats than what is desired as results
+ * from *_to_list() bifs.
+ */
+
+static Eterm
+term2list_dsprintf(Process *p, Eterm term)
+{
+ int pres;
+ Eterm res;
+ Eterm *hp;
+ erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(64);
+ pres = erts_dsprintf(dsbufp, "%T", term);
+ if (pres < 0)
+ erl_exit(1, "Failed to convert term to list: %d (s)\n",
+ -pres, erl_errno_id(-pres));
+ hp = HAlloc(p, 2*dsbufp->str_len); /* we need length * 2 heap words */
+ res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
+ erts_destroy_tmp_dsbuf(dsbufp);
+ return res;
+}
+
+BIF_RETTYPE ref_to_list_1(BIF_ALIST_1)
+{
+ if (is_not_ref(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ BIF_RET(term2list_dsprintf(BIF_P, BIF_ARG_1));
+}
+
+BIF_RETTYPE make_fun_3(BIF_ALIST_3)
+{
+ Eterm* hp;
+ Sint arity;
+
+ if (is_not_atom(BIF_ARG_1) || is_not_atom(BIF_ARG_2) || is_not_small(BIF_ARG_3)) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ arity = signed_val(BIF_ARG_3);
+ if (arity < 0) {
+ goto error;
+ }
+ hp = HAlloc(BIF_P, 2);
+ hp[0] = HEADER_EXPORT;
+ hp[1] = (Eterm) erts_export_get_or_make_stub(BIF_ARG_1, BIF_ARG_2, (Uint) arity);
+ BIF_RET(make_export(hp));
+}
+
+Eterm
+fun_to_list_1(Process* p, Eterm fun)
+{
+ if (is_not_any_fun(fun))
+ BIF_ERROR(p, BADARG);
+ BIF_RET(term2list_dsprintf(p, fun));
+}
+
+/**********************************************************************/
+
+/* convert a pid to an erlang list (for the linked cons cells) of the form
+ <node.number.serial> to a PID
+ */
+
+BIF_RETTYPE pid_to_list_1(BIF_ALIST_1)
+{
+ if (is_not_pid(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ BIF_RET(term2list_dsprintf(BIF_P, BIF_ARG_1));
+}
+
+BIF_RETTYPE port_to_list_1(BIF_ALIST_1)
+{
+ if (is_not_port(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ BIF_RET(term2list_dsprintf(BIF_P, BIF_ARG_1));
+}
+
+/**********************************************************************/
+
+/* convert a list of ascii characeters of the form
+ <node.number.serial> to a PID
+*/
+
+BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
+{
+ Uint a = 0, b = 0, c = 0;
+ char* cp;
+ int i;
+ DistEntry *dep = NULL;
+ char *buf = (char *) erts_alloc(ERTS_ALC_T_TMP, 65);
+ /*
+ * Max 'Uint64' has 20 decimal digits. If X, Y, Z in <X.Y.Z>
+ * are 'Uint64's. Max chars are 1 + 20 + 1 + 20 + 1 + 20 + 1 = 64,
+ * i.e, if the input list is longer than 64 it does not represent
+ * a pid.
+ */
+
+ /* walk down the list and create a C string */
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, 64)) < 0)
+ goto bad;
+
+ buf[i] = '\0'; /* null terminal */
+
+ cp = buf;
+ if (*cp++ != '<') goto bad;
+
+ if (*cp < '0' || *cp > '9') goto bad;
+ while(*cp >= '0' && *cp <= '9') { a = 10*a + (*cp - '0'); cp++; }
+
+ if (*cp++ != '.') goto bad;
+
+ if (*cp < '0' || *cp > '9') goto bad;
+ while(*cp >= '0' && *cp <= '9') { b = 10*b + (*cp - '0'); cp++; }
+
+ if (*cp++ != '.') goto bad;
+
+ if (*cp < '0' || *cp > '9') goto bad;
+ while(*cp >= '0' && *cp <= '9') { c = 10*c + (*cp - '0'); cp++; }
+
+ if (*cp++ != '>') goto bad;
+ if (*cp != '\0') goto bad;
+
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ buf = NULL;
+
+ /* <a.b.c> a = node, b = process number, c = serial */
+
+ dep = erts_channel_no_to_dist_entry(a);
+
+ if (!dep)
+ goto bad;
+
+
+ if (c > ERTS_MAX_PID_SERIAL || b > ERTS_MAX_PID_NUMBER)
+ goto bad;
+
+ if(dep == erts_this_dist_entry) {
+ erts_deref_dist_entry(dep);
+ BIF_RET(make_internal_pid(make_pid_data(c, b)));
+ }
+ else {
+ ExternalThing *etp;
+ ErlNode *enp;
+
+ if (is_nil(dep->cid))
+ goto bad;
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+
+ etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
+ etp->header = make_external_pid_header(1);
+ etp->next = MSO(BIF_P).externals;
+ etp->node = enp;
+ etp->data.ui[0] = make_pid_data(c, b);
+
+ MSO(BIF_P).externals = etp;
+ erts_deref_dist_entry(dep);
+ BIF_RET(make_external_pid(etp));
+ }
+
+ bad:
+ if (dep)
+ erts_deref_dist_entry(dep);
+ if (buf)
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/**********************************************************************/
+
+BIF_RETTYPE group_leader_0(BIF_ALIST_0)
+{
+ BIF_RET(BIF_P->group_leader);
+}
+
+/**********************************************************************/
+/* arg1 == leader, arg2 == new member */
+
+BIF_RETTYPE group_leader_2(BIF_ALIST_2)
+{
+ Process* new_member;
+
+ if (is_not_pid(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (is_external_pid(BIF_ARG_2)) {
+ DistEntry *dep;
+ int code;
+ ErtsDSigData dsd;
+ dep = external_pid_dist_entry(BIF_ARG_2);
+ if(dep == erts_this_dist_entry)
+ BIF_ERROR(BIF_P, BADARG);
+
+ code = erts_dsig_prepare(&dsd, dep, BIF_P, ERTS_DSP_NO_LOCK, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ BIF_RET(am_true);
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ BIF_TRAP2(dgroup_leader_trap, BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_DSIG_PREP_CONNECTED:
+ code = erts_dsig_send_group_leader(&dsd, BIF_ARG_1, BIF_ARG_2);
+ if (code == ERTS_DSIG_SEND_YIELD)
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ BIF_RET(am_true);
+ default:
+ ASSERT(! "Invalid dsig prepare result");
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ }
+ }
+ else if (is_internal_pid(BIF_ARG_2)) {
+ int await_x;
+ ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS;
+ new_member = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_2, locks);
+ if (!new_member)
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (new_member == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD2(bif_export[BIF_group_leader_2], BIF_P,
+ BIF_ARG_1, BIF_ARG_2);
+
+ await_x = (new_member != BIF_P
+ && ERTS_PROC_PENDING_EXIT(new_member));
+ if (!await_x) {
+ if (is_immed(BIF_ARG_1))
+ new_member->group_leader = BIF_ARG_1;
+ else {
+ locks &= ~ERTS_PROC_LOCK_STATUS;
+ erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS);
+ new_member->group_leader = STORE_NC_IN_PROC(new_member,
+ BIF_ARG_1);
+ }
+ }
+
+ if (new_member == BIF_P)
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (locks)
+ erts_smp_proc_unlock(new_member, locks);
+
+ if (await_x) {
+ /* Wait for new_member to terminate; then badarg */
+ Eterm args[2] = {BIF_ARG_1, BIF_ARG_2};
+ ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P,
+ BIF_ARG_2,
+ am_erlang,
+ am_group_leader,
+ args,
+ 2);
+ }
+
+ BIF_RET(am_true);
+ }
+ else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+BIF_RETTYPE system_flag_2(BIF_ALIST_2)
+{
+ Sint n;
+
+ if (BIF_ARG_1 == am_multi_scheduling) {
+ if (BIF_ARG_2 == am_block || BIF_ARG_2 == am_unblock) {
+#ifndef ERTS_SMP
+ BIF_RET(am_disabled);
+#else
+ if (erts_no_schedulers == 1)
+ BIF_RET(am_disabled);
+ else {
+ switch (erts_block_multi_scheduling(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_2 == am_block,
+ 0)) {
+ case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ BIF_RET(am_blocked);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_blocked,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(am_enabled);
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_system_flag_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, am_enabled,
+ am_multi_scheduling);
+ case ERTS_SCHDLR_SSPND_EINVAL:
+ goto error;
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ break;
+ }
+ }
+#endif
+ }
+ } else if (BIF_ARG_1 == am_schedulers_online) {
+#ifndef ERTS_SMP
+ if (BIF_ARG_2 != make_small(1))
+ goto error;
+ else
+ BIF_RET(make_small(1));
+#else
+ Sint old_no;
+ if (!is_small(BIF_ARG_2))
+ goto error;
+ switch (erts_set_schedulers_online(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ signed_val(BIF_ARG_2),
+ &old_no)) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(make_small(old_no));
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_system_flag_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ case ERTS_SCHDLR_SSPND_YIELD_DONE:
+ ERTS_BIF_YIELD_RETURN_X(BIF_P, make_small(old_no),
+ am_schedulers_online);
+ case ERTS_SCHDLR_SSPND_EINVAL:
+ goto error;
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ break;
+ }
+#endif
+ } else if (BIF_ARG_1 == am_fullsweep_after) {
+ Uint16 nval;
+ Uint oval;
+ if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
+ goto error;
+ }
+ nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n);
+ oval = (Uint) erts_smp_atomic_xchg(&erts_max_gen_gcs, (long) nval);
+ BIF_RET(make_small(oval));
+ } else if (BIF_ARG_1 == am_min_heap_size) {
+ int oval = H_MIN_SIZE;
+ if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
+ goto error;
+ }
+ H_MIN_SIZE = erts_next_heap_size(n, 0);
+ BIF_RET(make_small(oval));
+ } else if (BIF_ARG_1 == am_display_items) {
+ int oval = display_items;
+ if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
+ goto error;
+ }
+ display_items = n < 32 ? 32 : n;
+ BIF_RET(make_small(oval));
+ } else if (BIF_ARG_1 == am_debug_flags) {
+ BIF_RET(am_true);
+ } else if (BIF_ARG_1 == am_backtrace_depth) {
+ int oval = erts_backtrace_depth;
+ if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
+ goto error;
+ }
+ if (n > MAX_BACKTRACE_SIZE) n = MAX_BACKTRACE_SIZE;
+ erts_backtrace_depth = n;
+ BIF_RET(make_small(oval));
+ } else if (BIF_ARG_1 == am_trace_control_word) {
+ BIF_RET(db_set_trace_control_word_1(BIF_P, BIF_ARG_2));
+ } else if (BIF_ARG_1 == am_sequential_tracer) {
+ Eterm old_value = erts_set_system_seq_tracer(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_2);
+ if (old_value != THE_NON_VALUE) {
+ BIF_RET(old_value);
+ }
+ } else if (BIF_ARG_1 == make_small(1)) {
+ Uint i;
+ ErlMessage* mp;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ for (i = 0; i < erts_max_processes; i++) {
+ if (process_tab[i] != (Process*) 0) {
+ Process* p = process_tab[i];
+ p->seq_trace_token = NIL;
+ p->seq_trace_clock = 0;
+ p->seq_trace_lastcnt = 0;
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+ mp = p->msg.first;
+ while(mp != NULL) {
+ ERL_MESSAGE_TOKEN(mp) = NIL;
+ mp = mp->next;
+ }
+ }
+ }
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ BIF_RET(am_true);
+ } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
+ int what;
+ if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2))
+ what = ERTS_SCHED_STAT_MODIFY_DISABLE;
+ else if (ERTS_IS_ATOM_STR("enable", BIF_ARG_2))
+ what = ERTS_SCHED_STAT_MODIFY_ENABLE;
+ else if (ERTS_IS_ATOM_STR("clear", BIF_ARG_2))
+ what = ERTS_SCHED_STAT_MODIFY_CLEAR;
+ else
+ goto error;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_sched_stat_modify(what);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(am_true);
+ } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", BIF_ARG_1)) {
+ Eterm res = erts_set_cpu_topology(BIF_P, BIF_ARG_2);
+ if (is_value(res))
+ BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
+ BIF_TRAP1(set_cpu_topology_trap, BIF_P, BIF_ARG_2);
+ } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
+ return erts_bind_schedulers(BIF_P, BIF_ARG_2);
+ }
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/**********************************************************************/
+
+BIF_RETTYPE hash_2(BIF_ALIST_2)
+{
+ Uint32 hash;
+ Sint range;
+
+ if (is_not_small(BIF_ARG_2)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if ((range = signed_val(BIF_ARG_2)) <= 0) { /* [1..MAX_SMALL] */
+ BIF_ERROR(BIF_P, BADARG);
+ }
+#ifdef ARCH_64
+ if (range > ((1L << 27) - 1))
+ BIF_ERROR(BIF_P, BADARG);
+#endif
+ hash = make_broken_hash(BIF_ARG_1);
+ BIF_RET(make_small(1 + (hash % range))); /* [1..range] */
+}
+
+BIF_RETTYPE phash_2(BIF_ALIST_2)
+{
+ Uint32 hash;
+ Uint32 final_hash;
+ Uint32 range;
+
+ /* Check for special case 2^32 */
+ if (term_equals_2pow32(BIF_ARG_2)) {
+ range = 0;
+ } else {
+ Uint u;
+ if (!term_to_Uint(BIF_ARG_2, &u) || ((u >> 16) >> 16) != 0 || !u) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ range = (Uint32) u;
+ }
+ hash = make_hash(BIF_ARG_1);
+ if (range) {
+ final_hash = 1 + (hash % range); /* [1..range] */
+ } else if ((final_hash = hash + 1) == 0) {
+ /*
+ * XXX In this case, there will still be a ArithAlloc() in erts_mixed_plus().
+ */
+ BIF_RET(erts_mixed_plus(BIF_P,
+ erts_make_integer(hash, BIF_P),
+ make_small(1)));
+ }
+
+ BIF_RET(erts_make_integer(final_hash, BIF_P));
+}
+
+BIF_RETTYPE phash2_1(BIF_ALIST_1)
+{
+ Uint32 hash;
+
+ hash = make_hash2(BIF_ARG_1);
+ BIF_RET(make_small(hash & ((1L << 27) - 1)));
+}
+
+BIF_RETTYPE phash2_2(BIF_ALIST_2)
+{
+ Uint32 hash;
+ Uint32 final_hash;
+ Uint32 range;
+
+ /* Check for special case 2^32 */
+ if (term_equals_2pow32(BIF_ARG_2)) {
+ range = 0;
+ } else {
+ Uint u;
+ if (!term_to_Uint(BIF_ARG_2, &u) || ((u >> 16) >> 16) != 0 || !u) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ range = (Uint32) u;
+ }
+ hash = make_hash2(BIF_ARG_1);
+ if (range) {
+ final_hash = hash % range; /* [0..range-1] */
+ } else {
+ final_hash = hash;
+ }
+ /*
+ * Return either a small or a big. Use the heap for bigs if there is room.
+ */
+#ifdef ARCH_64
+ BIF_RET(make_small(final_hash));
+#else
+ if (IS_USMALL(0, final_hash)) {
+ BIF_RET(make_small(final_hash));
+ } else {
+ Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(final_hash, hp));
+ }
+#endif
+}
+
+BIF_RETTYPE bump_reductions_1(BIF_ALIST_1)
+{
+ Sint reds;
+
+ if (is_not_small(BIF_ARG_1) || ((reds = signed_val(BIF_ARG_1)) < 0)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (reds > CONTEXT_REDS) {
+ reds = CONTEXT_REDS;
+ }
+ BIF_RET2(am_true, reds);
+}
+
+/*
+ * Processes doing yield on return in a bif ends up in bif_return_trap().
+ */
+static BIF_RETTYPE bif_return_trap(
+#ifdef DEBUG
+ BIF_ALIST_2
+#else
+ BIF_ALIST_1
+#endif
+ )
+{
+#ifdef DEBUG
+ switch (BIF_ARG_2) {
+ case am_multi_scheduling:
+#ifdef ERTS_SMP
+ erts_dbg_multi_scheduling_return_trap(BIF_P, BIF_ARG_1);
+#endif
+ break;
+ case am_schedulers_online:
+ break;
+ default:
+ break;
+ }
+#endif
+
+ BIF_RET(BIF_ARG_1);
+}
+
+/*
+ * NOTE: The erts_bif_prep_await_proc_exit_*() functions are
+ * tightly coupled with the implementation of erlang:await_proc_exit/3.
+ * The erts_bif_prep_await_proc_exit_*() functions can safely call
+ * skip_current_msgq() since they know that erlang:await_proc_exit/3
+ * unconditionally will do a monitor and then unconditionally will
+ * wait for the corresponding 'DOWN' message in a receive, and no other
+ * receive is done before this receive. This optimization removes an
+ * unnecessary scan of the currently existing message queue (which
+ * can be large). If the erlang:await_proc_exit/3 implementation
+ * is changed so that the above isn't true, nasty bugs in later
+ * receives, etc, may appear.
+ */
+
+static ERTS_INLINE int
+skip_current_msgq(Process *c_p)
+{
+ int res;
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ erts_proc_lc_chk_only_proc_main(c_p);
+#endif
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ if (ERTS_PROC_PENDING_EXIT(c_p)) {
+ KILL_CATCHES(c_p);
+ c_p->freason = EXC_EXIT;
+ res = 0;
+ }
+ else {
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ c_p->msg.save = c_p->msg.last;
+ res = 1;
+ }
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ return res;
+}
+
+void
+erts_bif_prep_await_proc_exit_data_trap(Process *c_p, Eterm pid, Eterm ret)
+{
+ if (skip_current_msgq(c_p)) {
+ Eterm unused;
+ ERTS_BIF_PREP_TRAP3(unused, await_proc_exit_trap, c_p, pid, am_data, ret);
+ }
+}
+
+void
+erts_bif_prep_await_proc_exit_reason_trap(Process *c_p, Eterm pid)
+{
+ if (skip_current_msgq(c_p)) {
+ Eterm unused;
+ ERTS_BIF_PREP_TRAP3(unused, await_proc_exit_trap, c_p,
+ pid, am_reason, am_undefined);
+ }
+}
+
+void
+erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
+ Eterm pid,
+ Eterm module,
+ Eterm function,
+ Eterm args[],
+ int nargs)
+{
+ ASSERT(is_atom(module) && is_atom(function));
+ if (skip_current_msgq(c_p)) {
+ Eterm unused;
+ Eterm term;
+ Eterm *hp;
+ int i;
+
+ hp = HAlloc(c_p, 4+2*nargs);
+ term = NIL;
+ for (i = nargs-1; i >= 0; i--) {
+ term = CONS(hp, args[i], term);
+ hp += 2;
+ }
+ term = TUPLE3(hp, module, function, term);
+ ERTS_BIF_PREP_TRAP3(unused, await_proc_exit_trap, c_p, pid, am_apply, term);
+ }
+}
+
+Export bif_return_trap_export;
+
+void erts_init_bif(void)
+{
+ reference0 = 0;
+ reference1 = 0;
+ reference2 = 0;
+
+ erts_smp_spinlock_init(&make_ref_lock, "make_ref");
+ erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
+ erts_smp_atomic_init(&erts_dead_ports_ptr, (long)NULL);
+
+ /*
+ * bif_return_trap/1 is a hidden BIF that bifs that need to
+ * yield the calling process traps to. The only thing it does:
+ * return the value passed as argument.
+ */
+ sys_memset((void *) &bif_return_trap_export, 0, sizeof(Export));
+ bif_return_trap_export.address = &bif_return_trap_export.code[3];
+ bif_return_trap_export.code[0] = am_erlang;
+ bif_return_trap_export.code[1] = am_bif_return_trap;
+#ifdef DEBUG
+ bif_return_trap_export.code[2] = 2;
+#else
+ bif_return_trap_export.code[2] = 1;
+#endif
+ bif_return_trap_export.code[3] = (Eterm) em_apply_bif;
+ bif_return_trap_export.code[4] = (Eterm) &bif_return_trap;
+
+ flush_monitor_message_trap = erts_export_put(am_erlang,
+ am_flush_monitor_message,
+ 2);
+
+ set_cpu_topology_trap = erts_export_put(am_erlang,
+ am_set_cpu_topology,
+ 1);
+ erts_format_cpu_topology_trap = erts_export_put(am_erlang,
+ am_format_cpu_topology,
+ 1);
+ await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3);
+}
+
+BIF_RETTYPE blocking_read_file_1(BIF_ALIST_1)
+{
+ Eterm bin;
+ Eterm* hp;
+ byte *buff;
+ int i, buff_size;
+ FILE *file;
+ struct stat file_info;
+ char *filename = NULL;
+
+ i = list_length(BIF_ARG_1);
+ if (i < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ filename = erts_alloc(ERTS_ALC_T_TMP, i + 1);
+ if (intlist_to_buf(BIF_ARG_1, filename, i) != i)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ filename[i] = '\0';
+
+ hp = HAlloc(BIF_P, 3);
+
+ file = fopen(filename, "r");
+ if(file == NULL){
+ erts_free(ERTS_ALC_T_TMP, (void *) filename);
+ BIF_RET(TUPLE2(hp, am_error, am_nofile));
+ }
+
+ stat(filename, &file_info);
+ erts_free(ERTS_ALC_T_TMP, (void *) filename);
+
+ buff_size = file_info.st_size;
+ buff = (byte *) erts_alloc_fnf(ERTS_ALC_T_TMP, buff_size);
+ if (!buff) {
+ fclose(file);
+ BIF_RET(TUPLE2(hp, am_error, am_allocator));
+ }
+ fread(buff, 1, buff_size, file);
+ fclose(file);
+ bin = new_binary(BIF_P, buff, buff_size);
+ erts_free(ERTS_ALC_T_TMP, (void *) buff);
+
+ BIF_RET(TUPLE2(hp, am_ok, bin));
+}
+#ifdef HARDDEBUG
+/*
+You'll need this line in bif.tab to be able to use this debug bif
+
+bif erlang:send_to_logger/2
+
+*/
+BIF_RETTYPE send_to_logger_2(BIF_ALIST_2)
+{
+ byte *buf;
+ int len;
+ if (!is_atom(BIF_ARG_1) || !(is_list(BIF_ARG_2) ||
+ is_nil(BIF_ARG_1))) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ len = io_list_len(BIF_ARG_2);
+ if (len < 0)
+ BIF_ERROR(BIF_P,BADARG);
+ else if (len == 0)
+ buf = "";
+ else {
+#ifdef DEBUG
+ int len2;
+#endif
+ buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, len+1);
+#ifdef DEBUG
+ len2 =
+#else
+ (void)
+#endif
+ io_list_to_buf(BIF_ARG_2, buf, len);
+ ASSERT(len2 == len);
+ buf[len] = '\0';
+ switch (BIF_ARG_1) {
+ case am_info:
+ erts_send_info_to_logger(BIF_P->group_leader, buf, len);
+ break;
+ case am_warning:
+ erts_send_warning_to_logger(BIF_P->group_leader, buf, len);
+ break;
+ case am_error:
+ erts_send_error_to_logger(BIF_P->group_leader, buf, len);
+ break;
+ default:
+ {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ }
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ }
+ BIF_RET(am_true);
+}
+#endif /* HARDDEBUG */
+
+BIF_RETTYPE get_module_info_1(BIF_ALIST_1)
+{
+ Eterm ret = erts_module_info_0(BIF_P, BIF_ARG_1);
+
+ if (is_non_value(ret)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(ret);
+}
+
+
+BIF_RETTYPE get_module_info_2(BIF_ALIST_2)
+{
+ Eterm ret = erts_module_info_1(BIF_P, BIF_ARG_1, BIF_ARG_2);
+
+ if (is_non_value(ret)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(ret);
+}
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
new file mode 100644
index 0000000000..05e9b78c28
--- /dev/null
+++ b/erts/emulator/beam/bif.h
@@ -0,0 +1,386 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __BIF_H__
+#define __BIF_H__
+
+extern Export* erts_format_cpu_topology_trap;
+
+#define BIF_RETTYPE Eterm
+
+#define BIF_P A__p
+
+#define BIF_ALIST_0 Process* A__p
+#define BIF_ALIST_1 Process* A__p, Eterm A_1
+#define BIF_ALIST_2 Process* A__p, Eterm A_1, Eterm A_2
+#define BIF_ALIST_3 Process* A__p, Eterm A_1, Eterm A_2, Eterm A_3
+
+#define BIF_ARG_1 A_1
+#define BIF_ARG_2 A_2
+#define BIF_ARG_3 A_3
+
+#define BUMP_ALL_REDS(p) do { \
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \
+ (p)->fcalls = 0; \
+ else \
+ (p)->fcalls = -CONTEXT_REDS; \
+} while(0)
+
+
+#define ERTS_VBUMP_ALL_REDS(p) \
+do { \
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \
+ if ((p)->fcalls > 0) \
+ ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (p)->fcalls; \
+ (p)->fcalls = 0; \
+ } \
+ else { \
+ if ((p)->fcalls > -CONTEXT_REDS) \
+ ERTS_PROC_GET_SCHDATA((p))->virtual_reds \
+ += ((p)->fcalls - (-CONTEXT_REDS)); \
+ (p)->fcalls = -CONTEXT_REDS; \
+ } \
+} while(0)
+
+#define BUMP_REDS(p, gc) do { \
+ (p)->fcalls -= (gc); \
+ if ((p)->fcalls < 0) { \
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \
+ (p)->fcalls = 0; \
+ else if ((p)->fcalls < -CONTEXT_REDS) \
+ (p)->fcalls = -CONTEXT_REDS; \
+ } \
+} while(0)
+
+
+#define ERTS_VBUMP_REDS(p, reds) \
+do { \
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \
+ if ((p)->fcalls >= reds) { \
+ (p)->fcalls -= reds; \
+ ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \
+ } \
+ else { \
+ if ((p)->fcalls > 0) \
+ ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (p)->fcalls;\
+ (p)->fcalls = 0; \
+ } \
+ } \
+ else { \
+ if ((p)->fcalls >= reds - CONTEXT_REDS) { \
+ (p)->fcalls -= reds; \
+ ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \
+ } \
+ else { \
+ if ((p)->fcalls > -CONTEXT_REDS) \
+ ERTS_PROC_GET_SCHDATA((p))->virtual_reds \
+ += (p)->fcalls - (-CONTEXT_REDS); \
+ (p)->fcalls = -CONTEXT_REDS; \
+ } \
+ } \
+} while(0)
+
+#define ERTS_BIF_REDS_LEFT(p) \
+ (ERTS_PROC_GET_SAVED_CALLS_BUF((p)) \
+ ? ((p)->fcalls > -CONTEXT_REDS ? ((p)->fcalls - (-CONTEXT_REDS)) : 0)\
+ : ((p)->fcalls > 0 ? (p)->fcalls : 0))
+
+#define BIF_RET2(x, gc) do { \
+ BUMP_REDS(BIF_P, (gc)); \
+ return (x); \
+} while(0)
+
+#define BIF_RET(x) return (x)
+
+#define ERTS_BIF_PREP_RET(Ret, Val) ((Ret) = (Val))
+
+#define BIF_ERROR(p,r) do { \
+ (p)->freason = r; \
+ return THE_NON_VALUE; \
+} while(0)
+
+#define ERTS_BIF_PREP_ERROR(Ret, Proc, Reason) \
+do { \
+ (Proc)->freason = (Reason); \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+
+#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
+do { \
+ (Proc)->arity = 0; \
+ (Proc)->def_arg_reg[3] = (Eterm) (Trap->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_TRAP1(Ret, Trap, Proc, A0) \
+do { \
+ (Proc)->arity = 1; \
+ (Proc)->def_arg_reg[0] = (Eterm) (A0); \
+ (Proc)->def_arg_reg[3] = (Eterm) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_TRAP2(Ret, Trap, Proc, A0, A1) \
+do { \
+ (Proc)->arity = 2; \
+ (Proc)->def_arg_reg[0] = (Eterm) (A0); \
+ (Proc)->def_arg_reg[1] = (Eterm) (A1); \
+ (Proc)->def_arg_reg[3] = (Eterm) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_TRAP3(Ret, Trap, Proc, A0, A1, A2)\
+do { \
+ (Proc)->arity = 3; \
+ (Proc)->def_arg_reg[0] = (Eterm) (A0); \
+ (Proc)->def_arg_reg[1] = (Eterm) (A1); \
+ (Proc)->def_arg_reg[2] = (Eterm) (A2); \
+ (Proc)->def_arg_reg[3] = (Eterm) ((Trap)->address); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
+#define BIF_TRAP0(p, Trap_) do { \
+ (p)->arity = 0; \
+ (p)->def_arg_reg[3] = (Eterm) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
+ } while(0)
+
+#define BIF_TRAP1(Trap_, p, A0) do { \
+ (p)->arity = 1; \
+ (p)->def_arg_reg[0] = (A0); \
+ (p)->def_arg_reg[3] = (Eterm) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
+ } while(0)
+
+#define BIF_TRAP2(Trap_, p, A0, A1) do { \
+ (p)->arity = 2; \
+ (p)->def_arg_reg[0] = (A0); \
+ (p)->def_arg_reg[1] = (A1); \
+ (p)->def_arg_reg[3] = (Eterm) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
+ } while(0)
+
+#define BIF_TRAP3(Trap_, p, A0, A1, A2) do { \
+ (p)->arity = 3; \
+ (p)->def_arg_reg[0] = (A0); \
+ (p)->def_arg_reg[1] = (A1); \
+ (p)->def_arg_reg[2] = (A2); \
+ (p)->def_arg_reg[3] = (Eterm) ((Trap_)->address); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
+ } while(0)
+
+#define BIF_TRAP_CODE_PTR_0(p, Code_) do { \
+ (p)->arity = 0; \
+ (p)->def_arg_reg[3] = (Eterm) (Code_); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
+ } while(0)
+
+extern Export bif_return_trap_export;
+#ifdef DEBUG
+#define ERTS_BIF_PREP_YIELD_RETURN_X(RET, P, VAL, DEBUG_VAL) \
+do { \
+ ERTS_VBUMP_ALL_REDS(P); \
+ ERTS_BIF_PREP_TRAP2(RET, &bif_return_trap_export, (P), (VAL), \
+ (DEBUG_VAL)); \
+} while (0)
+#else
+#define ERTS_BIF_PREP_YIELD_RETURN_X(RET, P, VAL, DEBUG_VAL) \
+do { \
+ ERTS_VBUMP_ALL_REDS(P); \
+ ERTS_BIF_PREP_TRAP1(RET, &bif_return_trap_export, (P), (VAL)); \
+} while (0)
+#endif
+
+#define ERTS_BIF_PREP_YIELD_RETURN(RET, P, VAL) \
+ ERTS_BIF_PREP_YIELD_RETURN_X(RET, (P), (VAL), am_undefined)
+
+#ifdef DEBUG
+#define ERTS_BIF_YIELD_RETURN_X(P, VAL, DEBUG_VAL) \
+do { \
+ ERTS_VBUMP_ALL_REDS(P); \
+ BIF_TRAP2(&bif_return_trap_export, (P), (VAL), (DEBUG_VAL)); \
+} while (0)
+#else
+#define ERTS_BIF_YIELD_RETURN_X(P, VAL, DEBUG_VAL) \
+do { \
+ ERTS_VBUMP_ALL_REDS(P); \
+ BIF_TRAP1(&bif_return_trap_export, (P), (VAL)); \
+} while (0)
+#endif
+
+#define ERTS_BIF_RETURN_YIELD(P) ERTS_VBUMP_ALL_REDS((P))
+
+#define ERTS_BIF_YIELD_RETURN(P, VAL) \
+ ERTS_BIF_YIELD_RETURN_X((P), (VAL), am_undefined)
+
+#define ERTS_BIF_PREP_YIELD0(RET, TRP, P) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ ERTS_BIF_PREP_TRAP0(RET, (TRP), (P)); \
+} while (0)
+
+#define ERTS_BIF_PREP_YIELD1(RET, TRP, P, A0) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ ERTS_BIF_PREP_TRAP1(RET, (TRP), (P), (A0)); \
+} while (0)
+
+#define ERTS_BIF_PREP_YIELD2(RET, TRP, P, A0, A1) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ ERTS_BIF_PREP_TRAP2(RET, (TRP), (P), (A0), (A1)); \
+} while (0)
+
+#define ERTS_BIF_PREP_YIELD3(RET, TRP, P, A0, A1, A2) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ ERTS_BIF_PREP_TRAP3(RET, (TRP), (P), (A0), (A1), (A2)); \
+} while (0)
+
+#define ERTS_BIF_YIELD0(TRP, P) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ BIF_TRAP0((TRP), (P)); \
+} while (0)
+
+#define ERTS_BIF_YIELD1(TRP, P, A0) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ BIF_TRAP1((TRP), (P), (A0)); \
+} while (0)
+
+#define ERTS_BIF_YIELD2(TRP, P, A0, A1) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ BIF_TRAP2((TRP), (P), (A0), (A1)); \
+} while (0)
+
+#define ERTS_BIF_YIELD3(TRP, P, A0, A1, A2) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ BIF_TRAP3((TRP), (P), (A0), (A1), (A2)); \
+} while (0)
+
+#define ERTS_BIF_EXITED(PROC) \
+do { \
+ KILL_CATCHES((PROC)); \
+ BIF_ERROR((PROC), EXC_EXIT); \
+} while (0)
+
+#define ERTS_BIF_CHK_EXITED(PROC) \
+do { \
+ if (ERTS_PROC_IS_EXITING((PROC))) \
+ ERTS_BIF_EXITED((PROC)); \
+} while (0)
+
+#ifdef ERTS_SMP
+#define ERTS_SMP_BIF_CHK_PENDING_EXIT(P, L) \
+do { \
+ ERTS_SMP_LC_ASSERT((L) == erts_proc_lc_my_proc_locks((P))); \
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & (L)); \
+ if (!((L) & ERTS_PROC_LOCK_STATUS)) \
+ erts_smp_proc_lock((P), ERTS_PROC_LOCK_STATUS); \
+ if (ERTS_PROC_PENDING_EXIT((P))) { \
+ erts_handle_pending_exit((P), (L)|ERTS_PROC_LOCK_STATUS); \
+ erts_smp_proc_unlock((P), \
+ (((L)|ERTS_PROC_LOCK_STATUS) \
+ & ~ERTS_PROC_LOCK_MAIN)); \
+ ERTS_BIF_EXITED((P)); \
+ } \
+ if (!((L) & ERTS_PROC_LOCK_STATUS)) \
+ erts_smp_proc_unlock((P), ERTS_PROC_LOCK_STATUS); \
+} while (0)
+#else
+#define ERTS_SMP_BIF_CHK_PENDING_EXIT(P, L)
+#endif
+
+/*
+ * The ERTS_BIF_*_AWAIT_X_*_TRAP makros either exits the caller, or
+ * sets up a trap to erlang:await_proc_exit/3.
+ *
+ * The caller is acquired to hold the 'main' lock on C_P. No other locks
+ * are allowed to be held.
+ */
+
+#define ERTS_BIF_PREP_AWAIT_X_DATA_TRAP(RET, C_P, PID, DATA) \
+do { \
+ erts_bif_prep_await_proc_exit_data_trap((C_P), (PID), (DATA)); \
+ (RET) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_AWAIT_X_REASON_TRAP(RET, C_P, PID) \
+do { \
+ erts_bif_prep_await_proc_exit_reason_trap((C_P), (PID)); \
+ (RET) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_PREP_AWAIT_X_APPLY_TRAP(RET, C_P, PID, M, F, A, AN) \
+do { \
+ erts_bif_prep_await_proc_exit_apply_trap((C_P), (PID), \
+ (M), (F), (A), (AN)); \
+ (RET) = THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_AWAIT_X_DATA_TRAP(C_P, PID, DATA) \
+do { \
+ erts_bif_prep_await_proc_exit_data_trap((C_P), (PID), (DATA)); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_AWAIT_X_REASON_TRAP(C_P, PID) \
+do { \
+ erts_bif_prep_await_proc_exit_reason_trap((C_P), (PID)); \
+ return THE_NON_VALUE; \
+} while (0)
+
+#define ERTS_BIF_AWAIT_X_APPLY_TRAP(C_P, PID, M, F, A, AN) \
+do { \
+ erts_bif_prep_await_proc_exit_apply_trap((C_P), (PID), \
+ (M), (F), (A), (AN)); \
+ return THE_NON_VALUE; \
+} while (0)
+
+void
+erts_bif_prep_await_proc_exit_data_trap(Process *c_p,
+ Eterm pid,
+ Eterm data);
+void
+erts_bif_prep_await_proc_exit_reason_trap(Process *c_p,
+ Eterm pid);
+void
+erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
+ Eterm pid,
+ Eterm module,
+ Eterm function,
+ Eterm args[],
+ int nargs);
+
+#include "erl_bif_table.h"
+
+#endif
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
new file mode 100644
index 0000000000..85a729208f
--- /dev/null
+++ b/erts/emulator/beam/bif.tab
@@ -0,0 +1,761 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 1996-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# File format:
+#
+# Lines starting with '#' are ignored.
+#
+# <bif-decl> ::= "bif" <bif> <C-name>* | "ubif" <bif> <C-name>*
+# <bif> ::= <module> ":" <name> "/" <arity>
+#
+# "ubif" is an unwrapped bif, i.e. a bif without a trace wrapper,
+# or rather; the trace entry point in the export entry is the same
+# as the normal entry point, and no trace wrapper is generated.
+#
+# Important: Use "ubif" for guard BIFs and operators; use "bif" for ordinary BIFs.
+#
+# Add new BIFs to the end of the file. Do not bother adding a "packaged BIF name"
+# (such as 'erl.lang.number'); if/when packages will be supported we will add
+# all those names.
+#
+# Note: Guards BIFs require special support in the compiler (to be able to actually
+# call them from within a guard).
+#
+
+ubif erlang:abs/1
+ubif 'erl.lang.number':abs/1 ebif_abs_1
+bif erlang:adler32/1
+bif 'erl.util.crypt.adler32':sum/1 ebif_adler32_1
+bif erlang:adler32/2
+bif 'erl.util.crypt.adler32':sum/2 ebif_adler32_2
+bif erlang:adler32_combine/3
+bif 'erl.util.crypt.adler32':combine/3 ebif_adler32_combine_3
+bif erlang:apply/3
+bif 'erl.lang':apply/3 ebif_apply_3
+bif erlang:atom_to_list/1
+bif 'erl.lang.atom':to_string/1 ebif_atom_to_string_1 atom_to_list_1
+bif erlang:binary_to_list/1
+bif 'erl.lang.binary':to_list/1 ebif_binary_to_list_1
+bif erlang:binary_to_list/3
+bif 'erl.lang.binary':to_list/3 ebif_binary_to_list_3
+bif erlang:binary_to_term/1
+bif 'erl.lang.binary':to_term/1 ebif_binary_to_term_1
+bif erlang:check_process_code/2
+bif 'erl.system.code':check_process/2 ebif_check_process_code_2
+bif erlang:crc32/1
+bif 'erl.util.crypt.crc32':sum/1 ebif_crc32_1
+bif erlang:crc32/2
+bif 'erl.util.crypt.crc32':sum/2 ebif_crc32_2
+bif erlang:crc32_combine/3
+bif 'erl.util.crypt.crc32':combine/3 ebif_crc32_combine_3
+bif erlang:date/0
+bif 'erl.util.date':today/0 ebif_date_0
+bif erlang:delete_module/1
+bif 'erl.system.code':delete/1 ebif_delete_module_1
+bif erlang:display/1
+bif 'erl.system.debug':display/1 ebif_display_1
+bif erlang:display_string/1
+bif 'erl.system.debug':display_string/1 ebif_display_string_1
+bif erlang:display_nl/0
+bif 'erl.system.debug':display_nl/0 ebif_display_nl_0
+ubif erlang:element/2
+ubif 'erl.lang.tuple':element/2 ebif_element_2
+bif erlang:erase/0
+bif 'erl.lang.proc.pdict':erase/0 ebif_erase_0
+bif erlang:erase/1
+bif 'erl.lang.proc.pdict':erase/1 ebif_erase_1
+bif erlang:exit/1
+bif 'erl.lang':exit/1 ebif_exit_1
+bif erlang:exit/2
+bif 'erl.lang.proc':signal/2 ebif_signal_2 exit_2
+bif erlang:external_size/1
+bif 'erl.lang.term':external_size/1 ebif_external_size_1
+ubif erlang:float/1
+ubif 'erl.lang.number':to_float/1 ebif_to_float_1 float_1
+bif erlang:float_to_list/1
+bif 'erl.lang.float':to_string/1 ebif_float_to_string_1 float_to_list_1
+bif erlang:fun_info/2
+bif 'erl.lang.function':info/2 ebif_fun_info_2
+bif erlang:garbage_collect/0
+bif 'erl.system':garbage_collect/0 ebif_garbage_collect_0
+bif erlang:garbage_collect/1
+bif 'erl.system':garbage_collect/1 ebif_garbage_collect_1
+bif erlang:garbage_collect_message_area/0
+bif 'erl.system':garbage_collect_message_area/0 ebif_garbage_collect_message_area_0
+bif erlang:get/0
+bif 'erl.lang.proc.pdict':get/0 ebif_get_0
+bif erlang:get/1
+bif 'erl.lang.proc.pdict':get/1 ebif_get_1
+bif erlang:get_keys/1
+bif 'erl.lang.proc.pdict':get_keys/1 ebif_get_keys_1
+bif erlang:group_leader/0
+bif 'erl.lang.proc':group_leader/0 ebif_group_leader_0
+bif erlang:group_leader/2
+bif 'erl.lang.proc':set_group_leader/2 ebif_group_leader_2
+bif erlang:halt/0
+bif 'erl.lang.system':halt/0 ebif_halt_0
+bif erlang:halt/1
+bif 'erl.lang.system':halt/1 ebif_halt_1
+bif erlang:phash/2
+bif erlang:phash2/1
+bif erlang:phash2/2
+bif 'erl.lang.term':hash/1 ebif_phash2_1
+bif 'erl.lang.term':hash/2 ebif_phash2_2
+ubif erlang:hd/1
+ubif 'erl.lang.list':hd/1 ebif_hd_1
+bif erlang:integer_to_list/1
+bif 'erl.lang.integer':to_string/1 ebif_integer_to_string_1 integer_to_list_1
+bif erlang:is_alive/0
+bif 'erl.lang.node':is_alive/0 ebif_is_alive_0
+ubif erlang:length/1
+ubif 'erl.lang.list':length/1 ebif_length_1
+bif erlang:link/1
+bif 'erl.lang.proc':link/1 ebif_link_1
+bif erlang:list_to_atom/1
+bif 'erl.lang.atom':from_string/1 ebif_string_to_atom_1 list_to_atom_1
+bif erlang:list_to_binary/1
+bif 'erl.lang.binary':from_list/1 ebif_list_to_binary_1
+bif erlang:list_to_float/1
+bif 'erl.lang.float':from_string/1 ebif_string_to_float_1 list_to_float_1
+bif erlang:list_to_integer/1
+bif 'erl.lang.integer':from_string/1 ebif_string_to_integer_1 list_to_integer_1
+bif erlang:list_to_pid/1
+bif 'erl.lang.proc':string_to_pid/1 ebif_string_to_pid_1 list_to_pid_1
+bif erlang:list_to_tuple/1
+bif 'erl.lang.tuple':from_list/1 ebif_list_to_tuple_1
+bif erlang:load_module/2
+bif 'erl.system.code':load/2 ebif_load_module_2
+bif erlang:loaded/0
+bif 'erl.system.code':loaded/0 ebif_loaded_0
+bif erlang:localtime/0
+bif 'erl.util.date':local/0 ebif_localtime_0
+bif erlang:localtime_to_universaltime/2
+bif 'erl.util.date':local_to_utc/2 ebif_localtime_to_universaltime_2
+bif erlang:make_ref/0
+bif 'erl.lang.ref':new/0 ebif_make_ref_0
+bif erlang:md5/1
+bif 'erl.util.crypt.md5':digest/1 ebif_md5_1
+bif erlang:md5_init/0
+bif 'erl.util.crypt.md5':init/0 ebif_md5_init_0
+bif erlang:md5_update/2
+bif 'erl.util.crypt.md5':update/2 ebif_md5_update_2
+bif erlang:md5_final/1
+bif 'erl.util.crypt.md5':final/1 ebif_md5_final_1
+bif erlang:memory/0
+bif 'erl.lang':memory/0 ebif_memory_0
+bif erlang:memory/1
+bif 'erl.lang':memory/1 ebif_memory_1
+bif erlang:module_loaded/1
+bif 'erl.system.code':is_loaded/1 ebif_is_loaded_1 module_loaded_1
+bif erlang:function_exported/3
+bif 'erl.system.code':is_loaded/3 ebif_is_loaded_3 function_exported_3
+bif erlang:monitor_node/2
+bif 'erl.lang.node':monitor/2 ebif_monitor_node_2
+bif erlang:monitor_node/3
+bif 'erl.lang.node':monitor/3 ebif_monitor_node_3
+ubif erlang:node/1
+ubif 'erl.lang.node':node/1 ebif_node_1
+ubif erlang:node/0
+ubif 'erl.lang.node':node/0 ebif_node_0
+bif erlang:nodes/1
+bif 'erl.lang.node':nodes/1 ebif_nodes_1
+bif erlang:now/0
+bif 'erl.system':now/0 ebif_now_0
+
+bif erlang:open_port/2
+bif 'erl.lang.port':open/2 ebif_open_port_2 open_port_2
+
+bif erlang:pid_to_list/1
+bif 'erl.lang.proc':pid_to_string/1 ebif_pid_to_string_1 pid_to_list_1
+bif erlang:port_info/1
+bif 'erl.lang.port':info/1 ebif_port_info_1
+bif erlang:port_info/2
+bif 'erl.lang.port':info/2 ebif_port_info_2
+bif erlang:ports/0
+bif 'erl.lang.node':ports/0 ebif_ports_0
+bif erlang:pre_loaded/0
+bif 'erl.system.code':preloaded/0 ebif_pre_loaded_0
+bif erlang:process_flag/2
+bif 'erl.lang.proc':set_flag/2 ebif_process_flag_2
+bif erlang:process_flag/3
+bif 'erl.lang.proc':set_flag/3 ebif_process_flag_3
+bif erlang:process_info/1
+bif 'erl.lang.proc':info/1 ebif_process_info_1
+bif erlang:process_info/2
+bif 'erl.lang.proc':info/2 ebif_process_info_2
+bif erlang:processes/0
+bif 'erl.lang.node':processes/0 ebif_processes_0
+bif erlang:purge_module/1
+bif 'erl.system.code':purge/1 ebif_purge_module_1
+bif erlang:put/2
+bif 'erl.lang.proc.pdict':put/2 ebif_put_2
+bif erlang:register/2
+bif 'erl.lang.node':register/2 ebif_register_2
+bif erlang:registered/0
+bif 'erl.lang.node':registered/0 ebif_registered_0
+ubif erlang:round/1
+ubif 'erl.lang.number':round/1 ebif_round_1
+ubif erlang:self/0
+ubif 'erl.lang.proc':self/0 ebif_self_0
+bif erlang:setelement/3
+bif 'erl.lang.tuple':setelement/3 ebif_setelement_3
+ubif erlang:size/1
+ubif 'erl.lang.term':size/1 ebif_size_1
+bif erlang:spawn/3
+bif 'erl.lang.proc':spawn/3 ebif_spawn_3
+bif erlang:spawn_link/3
+bif 'erl.lang.proc':spawn_link/3 ebif_spawn_link_3
+bif erlang:split_binary/2
+bif 'erl.lang.binary':split/2 ebif_split_binary_2
+bif erlang:statistics/1
+bif 'erl.system':statistics/1 ebif_statistics_1
+bif erlang:term_to_binary/1
+bif 'erl.lang.binary':from_term/1 ebif_term_to_binary_1
+bif erlang:term_to_binary/2
+bif 'erl.lang.binary':from_term/2 ebif_term_to_binary_2
+bif erlang:throw/1
+bif 'erl.lang':throw/1 ebif_throw_1
+bif erlang:time/0
+bif 'erl.util.date':time_of_day/0 ebif_time_0
+ubif erlang:tl/1
+ubif 'erl.lang.list':tl/1 ebif_tl_1
+ubif erlang:trunc/1
+ubif 'erl.lang.number':trunc/1 ebif_trunc_1
+bif erlang:tuple_to_list/1
+bif 'erl.lang.tuple':to_list/1 ebif_tuple_to_list_1
+bif erlang:universaltime/0
+bif 'erl.util.date':utc/0 ebif_universaltime_0
+bif erlang:universaltime_to_localtime/1
+bif 'erl.util.date':utc_to_local/1 ebif_universaltime_to_localtime_1
+bif erlang:unlink/1
+bif 'erl.lang.proc':unlink/1 ebif_unlink_1
+bif erlang:unregister/1
+bif 'erl.lang.node':unregister/1 ebif_unregister_1
+bif erlang:whereis/1
+bif 'erl.lang.node':whereis/1 ebif_whereis_1
+bif erlang:spawn_opt/1
+bif 'erl.lang.proc':spawn_opt/1 ebif_spawn_opt_1
+bif erlang:setnode/2
+bif erlang:setnode/3
+bif erlang:dist_exit/3
+
+bif erlang:port_call/2
+bif 'erl.lang.port':call/2 ebif_port_call_2
+bif erlang:port_call/3
+bif 'erl.lang.port':call/3 ebif_port_call_3
+bif erlang:port_command/2
+bif 'erl.lang.port':command/2 ebif_port_command_2
+bif erlang:port_command/3
+bif 'erl.lang.port':command/3 ebif_port_command_3
+bif erlang:port_control/3
+bif 'erl.lang.port':control/3 ebif_port_control_3
+bif erlang:port_close/1
+bif 'erl.lang.port':close/1 ebif_port_close_1
+bif erlang:port_connect/2
+bif 'erl.lang.port':connect/2 ebif_port_connect_2
+bif erlang:port_set_data/2
+bif 'erl.lang.port':set_data/2 ebif_port_set_data_2
+bif erlang:port_get_data/1
+bif 'erl.lang.port':get_data/1 ebif_port_get_data_1
+
+# Tracing & debugging.
+bif erlang:trace_pattern/2
+bif 'erl.system.debug':trace_pattern/2 ebif_trace_pattern_2
+bif erlang:trace_pattern/3
+bif 'erl.system.debug':trace_pattern/3 ebif_trace_pattern_3
+bif erlang:trace/3
+bif 'erl.system.debug':trace/3 ebif_trace_3
+bif erlang:trace_info/2
+bif 'erl.system.debug':trace_info/2 ebif_trace_info_2
+bif erlang:trace_delivered/1
+bif 'erl.system.debug':trace_delivered/1 ebif_trace_delivered_1
+bif erlang:seq_trace/2
+bif 'erl.system.debug':seq_trace/2 ebif_seq_trace_2
+bif erlang:seq_trace_info/1
+bif 'erl.system.debug':seq_trace_info/1 ebif_seq_trace_info_1
+bif erlang:seq_trace_print/1
+bif 'erl.system.debug':seq_trace_print/1 ebif_seq_trace_print_1
+bif erlang:seq_trace_print/2
+bif 'erl.system.debug':seq_trace_print/2 ebif_seq_trace_print_2
+bif erlang:suspend_process/2
+bif 'erl.system.debug':suspend_process/2 ebif_suspend_process_2
+bif erlang:resume_process/1
+bif 'erl.system.debug':resume_process/1 ebif_resume_process_1
+bif erlang:process_display/2
+bif 'erl.system.debug':process_display/2 ebif_process_display_2
+
+bif erlang:bump_reductions/1
+bif 'erl.lang.proc':bump_reductions/1 ebif_bump_reductions_1
+
+bif math:cos/1
+bif 'erl.lang.math':cos/1 ebif_math_cos_1
+bif math:cosh/1
+bif 'erl.lang.math':cosh/1 ebif_math_cosh_1
+bif math:sin/1
+bif 'erl.lang.math':sin/1 ebif_math_sin_1
+bif math:sinh/1
+bif 'erl.lang.math':sinh/1 ebif_math_sinh_1
+bif math:tan/1
+bif 'erl.lang.math':tan/1 ebif_math_tan_1
+bif math:tanh/1
+bif 'erl.lang.math':tanh/1 ebif_math_tanh_1
+bif math:acos/1
+bif 'erl.lang.math':acos/1 ebif_math_acos_1
+bif math:acosh/1
+bif 'erl.lang.math':acosh/1 ebif_math_acosh_1
+bif math:asin/1
+bif 'erl.lang.math':asin/1 ebif_math_asin_1
+bif math:asinh/1
+bif 'erl.lang.math':asinh/1 ebif_math_asinh_1
+bif math:atan/1
+bif 'erl.lang.math':atan/1 ebif_math_atan_1
+bif math:atanh/1
+bif 'erl.lang.math':atanh/1 ebif_math_atanh_1
+bif math:erf/1
+bif 'erl.lang.math':erf/1 ebif_math_erf_1
+bif math:erfc/1
+bif 'erl.lang.math':erfc/1 ebif_math_erfc_1
+bif math:exp/1
+bif 'erl.lang.math':exp/1 ebif_math_exp_1
+bif math:log/1
+bif 'erl.lang.math':log/1 ebif_math_log_1
+bif math:log10/1
+bif 'erl.lang.math':log10/1 ebif_math_log10_1
+bif math:sqrt/1
+bif 'erl.lang.math':sqrt/1 ebif_math_sqrt_1
+bif math:atan2/2
+bif 'erl.lang.math':atan2/2 ebif_math_atan2_2
+bif math:pow/2
+bif 'erl.lang.math':pow/2 ebif_math_pow_2
+
+bif erlang:start_timer/3
+bif 'erl.lang.timer':start/3 ebif_start_timer_3
+bif erlang:send_after/3
+bif 'erl.lang.timer':send_after/3 ebif_send_after_3
+bif erlang:cancel_timer/1
+bif 'erl.lang.timer':cancel/1 ebif_cancel_timer_1
+bif erlang:read_timer/1
+bif 'erl.lang.timer':read/1 ebif_read_timer_1
+
+bif erlang:make_tuple/2
+bif 'erl.lang.tuple':make/2 ebif_make_tuple_2
+bif erlang:append_element/2
+bif 'erl.lang.tuple':append_element/2 ebif_append_element_2
+bif erlang:make_tuple/3
+
+bif erlang:system_flag/2
+bif 'erl.system':set_flag/2 ebif_system_flag_2
+bif erlang:system_info/1
+bif 'erl.system':info/1 ebif_system_info_1
+# New in R9C
+bif erlang:system_monitor/0
+bif 'erl.system':monitor/0 ebif_system_monitor_0
+bif erlang:system_monitor/1
+bif 'erl.system':monitor/1 ebif_system_monitor_1
+bif erlang:system_monitor/2
+bif 'erl.system':monitor/2 ebif_system_monitor_2
+# Added 2006-11-07
+bif erlang:system_profile/2
+bif 'erl.system':profile/2 ebif_system_profile_2
+# End Added 2006-11-07
+# Added 2007-01-17
+bif erlang:system_profile/0
+bif 'erl.system':profile/0 ebif_system_profile_0
+# End Added 2007-01-17
+bif erlang:ref_to_list/1
+bif 'erl.lang.ref':to_string/1 ebif_ref_to_string_1 ref_to_list_1
+bif erlang:port_to_list/1
+bif 'erl.lang.port':to_string/1 ebif_port_to_string_1 port_to_list_1
+bif erlang:fun_to_list/1
+bif 'erl.lang.function':to_string/1 ebif_fun_to_string_1 fun_to_list_1
+
+bif erlang:monitor/2
+bif 'erl.lang.proc':monitor/2 ebif_monitor_2
+bif erlang:demonitor/1
+bif 'erl.lang.proc':demonitor/1 ebif_demonitor_1
+bif erlang:demonitor/2
+bif 'erl.lang.proc':demonitor/2 ebif_demonitor_2
+
+bif erlang:is_process_alive/1
+bif 'erl.lang.proc':is_alive/1 ebif_proc_is_alive_1 is_process_alive_1
+
+bif erlang:error/1 error_1
+bif 'erl.lang':error/1 ebif_error_1 error_1
+bif erlang:error/2 error_2
+bif 'erl.lang':error/2 ebif_error_2 error_2
+bif erlang:raise/3 raise_3
+bif 'erl.lang':raise/3 ebif_raise_3 raise_3
+bif erlang:get_stacktrace/0
+bif 'erl.lang.proc':get_stacktrace/0 ebif_get_stacktrace_0
+
+bif erlang:is_builtin/3
+bif 'erl.system.code':is_builtin/3 ebif_is_builtin_3
+
+ubif erlang:'and'/2
+ubif 'erl.lang.bool':'and'/2 ebif_and_2
+ubif erlang:'or'/2
+ubif 'erl.lang.bool':'or'/2 ebif_or_2
+ubif erlang:'xor'/2
+ubif 'erl.lang.bool':'xor'/2 ebif_xor_2
+ubif erlang:'not'/1
+ubif 'erl.lang.bool':'not'/1 ebif_not_1
+
+ubif erlang:'>'/2 sgt_2
+ubif 'erl.lang.term':greater/2 ebif_gt_2 sgt_2
+ubif erlang:'>='/2 sge_2
+ubif 'erl.lang.term':greater_or_equal/2 ebif_ge_2 sge_2
+ubif erlang:'<'/2 slt_2
+ubif 'erl.lang.term':less/2 ebif_lt_2 slt_2
+ubif erlang:'=<'/2 sle_2
+ubif 'erl.lang.term':less_or_equal/2 ebif_le_2 sle_2
+ubif erlang:'=:='/2 seq_2
+ubif 'erl.lang.term':equal/2 ebif_eq_2 seq_2
+ubif erlang:'=='/2 seqeq_2
+ubif 'erl.lang.term':arith_equal/2 ebif_areq_2 seqeq_2
+ubif erlang:'=/='/2 sneq_2
+ubif 'erl.lang.term':not_equal/2 ebif_neq_2 sneq_2
+ubif erlang:'/='/2 sneqeq_2
+ubif 'erl.lang.term':not_arith_equal/2 ebif_nareq_2 sneqeq_2
+ubif erlang:'+'/2 splus_2
+ubif 'erl.lang.number':plus/2 ebif_plus_2 splus_2
+ubif erlang:'-'/2 sminus_2
+ubif 'erl.lang.number':minus/2 ebif_minus_2 sminus_2
+ubif erlang:'*'/2 stimes_2
+ubif 'erl.lang.number':multiply/2 ebif_multiply_2 stimes_2
+ubif erlang:'/'/2 div_2
+ubif 'erl.lang.number':divide/2 ebif_divide_2 div_2
+ubif erlang:'div'/2 intdiv_2
+ubif 'erl.lang.integer':'div'/2 ebif_intdiv_2
+ubif erlang:'rem'/2
+ubif 'erl.lang.integer':'rem'/2 ebif_rem_2
+ubif erlang:'bor'/2
+ubif 'erl.lang.integer':'bor'/2 ebif_bor_2
+ubif erlang:'band'/2
+ubif 'erl.lang.integer':'band'/2 ebif_band_2
+ubif erlang:'bxor'/2
+ubif 'erl.lang.integer':'bxor'/2 ebif_bxor_2
+ubif erlang:'bsl'/2
+ubif 'erl.lang.integer':'bsl'/2 ebif_bsl_2
+ubif erlang:'bsr'/2
+ubif 'erl.lang.integer':'bsr'/2 ebif_bsr_2
+ubif erlang:'bnot'/1
+ubif 'erl.lang.integer':'bnot'/1 ebif_bnot_1
+ubif erlang:'-'/1 sminus_1
+ubif 'erl.lang.number':minus/1 ebif_minus_1 sminus_1
+ubif erlang:'+'/1 splus_1
+ubif 'erl.lang.number':plus/1 ebif_plus_1 splus_1
+
+# New operators in R8. These were the only operators missing.
+# erlang:send/2, erlang:append/2 and erlang:subtract/2 are now also
+# defined in erlang.erl, and the C names can be removed when all
+# internal references have been updated to the new ebif_... entries.
+
+bif erlang:'!'/2 ebif_bang_2
+bif 'erl.lang.proc':send/2 ebif_send_2 send_2
+bif erlang:send/2
+bif 'erl.lang':send/3 ebif_send_3 send_3
+bif erlang:send/3
+bif erlang:'++'/2 ebif_plusplus_2
+bif 'erl.lang.list':append/2 ebif_append_2 ebif_plusplus_2
+bif erlang:append/2
+bif erlang:'--'/2 ebif_minusminus_2
+bif 'erl.lang.list':subtract/2 ebif_list_subtract_2 ebif_minusminus_2
+bif erlang:subtract/2
+
+ubif erlang:is_atom/1
+ubif 'erl.lang.term':is_atom/1 ebif_is_atom_1
+ubif erlang:is_list/1
+ubif 'erl.lang.term':is_list/1 ebif_is_list_1
+ubif erlang:is_tuple/1
+ubif 'erl.lang.term':is_tuple/1 ebif_is_tuple_1
+ubif erlang:is_float/1
+ubif 'erl.lang.term':is_float/1 ebif_is_float_1
+ubif erlang:is_integer/1
+ubif 'erl.lang.term':is_integer/1 ebif_is_integer_1
+ubif erlang:is_number/1
+ubif 'erl.lang.term':is_number/1 ebif_is_number_1
+ubif erlang:is_pid/1
+ubif 'erl.lang.term':is_pid/1 ebif_is_pid_1
+ubif erlang:is_port/1
+ubif 'erl.lang.term':is_port/1 ebif_is_port_1
+ubif erlang:is_reference/1
+ubif 'erl.lang.term':is_reference/1 ebif_is_reference_1
+ubif erlang:is_binary/1
+ubif 'erl.lang.term':is_binary/1 ebif_is_binary_1
+ubif erlang:is_function/1
+ubif 'erl.lang.term':is_function/1 ebif_is_function_1
+ubif erlang:is_function/2
+ubif 'erl.lang.term':is_function/2 ebif_is_function_2
+ubif erlang:is_record/2
+ubif 'erl.lang.term':is_record/2 ebif_is_record_2
+ubif erlang:is_record/3
+ubif 'erl.lang.term':is_record/3 ebif_is_record_3
+
+bif erlang:match_spec_test/3
+
+#
+# Bifs in ets module.
+#
+
+bif ets:all/0
+bif 'erl.lang.ets':all/0 ebif_ets_all_0
+bif ets:new/2
+bif 'erl.lang.ets':new/2 ebif_ets_new_2
+bif ets:delete/1
+bif 'erl.lang.ets':delete/1 ebif_ets_delete_1
+bif ets:delete/2
+bif 'erl.lang.ets':delete/2 ebif_ets_delete_2
+bif ets:delete_all_objects/1
+bif 'erl.lang.ets':delete_all_objects/1 ebif_ets_delete_all_objects_1
+bif ets:delete_object/2
+bif 'erl.lang.ets':delete_object/2 ebif_ets_delete_object_2
+bif ets:first/1
+bif 'erl.lang.ets':first/1 ebif_ets_first_1
+bif ets:is_compiled_ms/1
+bif 'erl.lang.ets':is_compiled_ms/1 ebif_ets_is_compiled_ms_1
+bif ets:lookup/2
+bif 'erl.lang.ets':lookup/2 ebif_ets_lookup_2
+bif ets:lookup_element/3
+bif 'erl.lang.ets':lookup_element/3 ebif_ets_lookup_element_3
+bif ets:info/1
+bif 'erl.lang.ets':info/1 ebif_ets_info_1
+bif ets:info/2
+bif 'erl.lang.ets':info/2 ebif_ets_info_2
+bif ets:last/1
+bif 'erl.lang.ets':last/1 ebif_ets_last_1
+bif ets:match/1
+bif 'erl.lang.ets':match/1 ebif_ets_match_1
+bif ets:match/2
+bif 'erl.lang.ets':match/2 ebif_ets_match_2
+bif ets:match/3
+bif 'erl.lang.ets':match/3 ebif_ets_match_3
+bif ets:match_object/1
+bif 'erl.lang.ets':match_object/1 ebif_ets_match_object_1
+bif ets:match_object/2
+bif 'erl.lang.ets':match_object/2 ebif_ets_match_object_2
+bif ets:match_object/3
+bif 'erl.lang.ets':match_object/3 ebif_ets_match_object_3
+bif ets:member/2
+bif 'erl.lang.ets':is_key/2 ebif_ets_member_2
+bif ets:next/2
+bif 'erl.lang.ets':next/2 ebif_ets_next_2
+bif ets:prev/2
+bif 'erl.lang.ets':prev/2 ebif_ets_prev_2
+bif ets:insert/2
+bif 'erl.lang.ets':insert/2 ebif_ets_insert_2
+bif ets:insert_new/2
+bif 'erl.lang.ets':insert_new/2 ebif_ets_insert_new_2
+bif ets:rename/2
+bif 'erl.lang.ets':rename/2 ebif_ets_rename_2
+bif ets:safe_fixtable/2
+bif 'erl.lang.ets':fixtable/2 ebif_ets_safe_fixtable_2
+bif ets:slot/2
+bif 'erl.lang.ets':slot/2 ebif_ets_slot_2
+bif ets:update_counter/3
+bif 'erl.lang.ets':update_counter/3 ebif_ets_update_counter_3
+bif ets:select/1
+bif 'erl.lang.ets':select/1 ebif_ets_select_1
+bif ets:select/2
+bif 'erl.lang.ets':select/2 ebif_ets_select_2
+bif ets:select/3
+bif 'erl.lang.ets':select/3 ebif_ets_select_3
+bif ets:select_count/2
+bif 'erl.lang.ets':select/2 ebif_ets_select_count_2
+bif ets:select_reverse/1
+bif 'erl.lang.ets':select_reverse/1 ebif_ets_select_reverse_1
+bif ets:select_reverse/2
+bif 'erl.lang.ets':select_reverse/2 ebif_ets_select_reverse_2
+bif ets:select_reverse/3
+bif 'erl.lang.ets':select_reverse/3 ebif_ets_select_reverse_3
+bif ets:select_delete/2
+bif 'erl.lang.ets':select_delete/2 ebif_ets_select_delete_2
+bif ets:match_spec_compile/1
+bif 'erl.lang.ets':match_spec_compile/1 ebif_ets_match_spec_compile_1
+bif ets:match_spec_run_r/3
+bif 'erl.lang.ets':match_spec_run_r/3 ebif_ets_match_spec_run_r_3
+
+#
+# Bifs in os module.
+#
+
+bif os:putenv/2
+bif 'erl.system.os':setenv/2 ebif_os_setenv_2 os_putenv_2
+bif os:getenv/0
+bif 'erl.system.os':getenv/0 ebif_os_getenv_0
+bif os:getenv/1
+bif 'erl.system.os':getenv/1 ebif_os_getenv_1
+bif os:getpid/0
+bif 'erl.system.os':pid/0 ebif_os_pid_0 os_getpid_0
+bif os:timestamp/0
+bif 'erl.system.os':timestamp/0 ebif_os_timestamp_0 os_timestamp_0
+
+#
+# Bifs in the erl_ddll module (the module actually does not exist)
+#
+
+bif erl_ddll:try_load/3
+bif erl_ddll:try_unload/2
+bif erl_ddll:loaded_drivers/0
+bif erl_ddll:info/2
+bif erl_ddll:format_error_int/1
+bif erl_ddll:monitor/2
+bif erl_ddll:demonitor/1
+
+#
+# Bifs in the re module
+#
+bif re:compile/1
+bif re:compile/2
+bif re:run/2
+bif re:run/3
+
+#
+# Bifs in lists module.
+#
+
+bif lists:member/2
+bif 'erl.lang.list':is_element/2 ebif_list_is_element_2 lists_member_2
+bif lists:reverse/2
+bif 'erl.lang.list':reverse/2 ebif_list_reverse_2 lists_reverse_2
+bif lists:keymember/3
+bif 'erl.lang.list.keylist':is_element/3 ebif_keylist_is_element_3 lists_keymember_3
+bif lists:keysearch/3
+bif 'erl.lang.list.keylist':search/3 ebif_keylist_search_3 lists_keysearch_3
+bif lists:keyfind/3
+
+#
+# Bifs for debugging.
+#
+
+bif erts_debug:disassemble/1
+bif 'erl.system.debug':disassemble/1 ebif_erts_debug_disassemble_1
+bif erts_debug:breakpoint/2
+bif 'erl.system.debug':breakpoint/2 ebif_erts_debug_breakpoint_2
+bif erts_debug:same/2
+bif 'erl.system.debug':same/2 ebif_erts_debug_same_2
+bif erts_debug:flat_size/1
+bif 'erl.system.debug':flat_size/1 ebif_erts_debug_flat_size_1
+bif erts_debug:get_internal_state/1
+bif 'erl.system.debug':get_internal_state/1 ebif_erts_debug_get_internal_state_1
+bif erts_debug:set_internal_state/2
+bif 'erl.system.debug':set_internal_state/2 ebif_erts_debug_set_internal_state_2
+bif erts_debug:display/1
+bif 'erl.system.debug':display/1 ebif_erts_debug_display_1
+bif erts_debug:dist_ext_to_term/2
+bif 'erl.system.debug':dist_ext_to_term/2 ebif_erts_debug_dist_ext_to_term_2
+
+#
+# Monitor testing bif's...
+#
+bif erts_debug:dump_monitors/1
+bif erts_debug:dump_links/1
+
+
+#
+# Lock counter bif's
+#
+bif erts_debug:lock_counters/1
+
+#
+# New Bifs in R8.
+#
+
+bif code:get_chunk/2
+bif 'erl.system.code':get_chunk/2 ebif_code_get_chunk_2
+bif code:module_md5/1
+bif 'erl.system.code':module_md5/1 ebif_code_module_md5_1
+bif code:make_stub_module/3
+bif 'erl.system.code':make_stub_module/3 ebif_code_make_stub_module_3
+bif code:is_module_native/1
+bif 'erl.system.code':is_native/1 ebif_code_is_native_1 code_is_module_native_1
+
+bif erlang:blocking_read_file/1
+
+#
+# New Bifs in R9C.
+#
+
+bif erlang:hibernate/3
+bif error_logger:warning_map/0
+
+#
+# New Bifs in R10B.
+#
+bif erlang:get_module_info/1
+bif erlang:get_module_info/2
+ubif erlang:is_boolean/1
+bif string:to_integer/1
+bif string:to_float/1
+bif erlang:make_fun/3
+bif erlang:iolist_size/1
+bif erlang:iolist_to_binary/1
+bif erlang:list_to_existing_atom/1
+
+#
+# New Bifs in R12B-0
+#
+ubif erlang:is_bitstring/1
+ubif erlang:tuple_size/1
+ubif erlang:byte_size/1
+ubif erlang:bit_size/1
+bif erlang:list_to_bitstring/1
+bif erlang:bitstring_to_list/1
+
+#
+# New Bifs in R12B-2
+#
+bif ets:update_element/3
+
+#
+# New Bifs in R12B-4
+#
+bif erlang:decode_packet/3
+
+#
+# New Bifs in R12B-5
+#
+bif unicode:characters_to_binary/2
+bif unicode:characters_to_list/2
+bif unicode:bin_is_7bit/1
+#
+# New Bifs in R13A.
+#
+bif erlang:atom_to_binary/2
+bif erlang:binary_to_atom/2
+bif erlang:binary_to_existing_atom/2
+bif net_kernel:dflag_unicode_io/1
+#
+# New Bifs in R13B-1
+#
+bif ets:give_away/3
+bif ets:setopts/2
+
+#
+# New Bifs in R13B3
+#
+bif erlang:load_nif/2
+bif erlang:call_on_load_function/1
+bif erlang:finish_after_on_load/2
+
+#
+# Obsolete
+#
+
+bif erlang:hash/2
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
new file mode 100644
index 0000000000..03c88da8c6
--- /dev/null
+++ b/erts/emulator/beam/big.c
@@ -0,0 +1,2241 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "big.h"
+#include "error.h"
+#include "bif.h"
+
+#define ZERO_DIGITS(v, sz) do { \
+ dsize_t _t_sz = sz; \
+ ErtsDigit* _t_v = v; \
+ while(_t_sz--) *_t_v++ = 0; \
+ } while(0)
+
+#define MOVE_DIGITS(dst, src, sz) do { \
+ dsize_t _t_sz = sz; \
+ ErtsDigit* _t_dst; \
+ ErtsDigit* _t_src; \
+ if (dst < src) { \
+ _t_dst = dst; \
+ _t_src = src; \
+ while(_t_sz--) *_t_dst++ = *_t_src++; \
+ } \
+ else if (dst > src) { \
+ _t_dst = (dst)+((sz)-1); \
+ _t_src = (src)+((sz)-1); \
+ while(_t_sz--) *_t_dst-- = *_t_src--; \
+ } \
+ } while(0)
+
+/* add a and b with carry in + out */
+#define DSUMc(a,b,c,s) do { \
+ ErtsDigit ___cr = (c); \
+ ErtsDigit ___xr = (a)+(___cr); \
+ ErtsDigit ___yr = (b); \
+ ___cr = (___xr < ___cr); \
+ ___xr = ___yr + ___xr; \
+ ___cr += (___xr < ___yr); \
+ s = ___xr; \
+ c = ___cr; \
+ } while(0)
+
+/* add a and b with carry out */
+#define DSUM(a,b,c,s) do { \
+ ErtsDigit ___xr = (a); \
+ ErtsDigit ___yr = (b); \
+ ___xr = ___yr + ___xr; \
+ s = ___xr; \
+ c = (___xr < ___yr); \
+ } while(0)
+
+#define DSUBb(a,b,r,d) do { \
+ ErtsDigit ___cr = (r); \
+ ErtsDigit ___xr = (a); \
+ ErtsDigit ___yr = (b)+___cr; \
+ ___cr = (___yr < ___cr); \
+ ___yr = ___xr - ___yr; \
+ ___cr += (___yr > ___xr); \
+ d = ___yr; \
+ r = ___cr; \
+ } while(0)
+
+#define DSUB(a,b,r,d) do { \
+ ErtsDigit ___xr = (a); \
+ ErtsDigit ___yr = (b); \
+ ___yr = ___xr - ___yr; \
+ r = (___yr > ___xr); \
+ d = ___yr; \
+ } while(0)
+
+/* type a constant as a ErtsDigit - to get shifts correct */
+#define DCONST(n) ((ErtsDigit)(n))
+
+/*
+ * BIG_HAVE_DOUBLE_DIGIT is defined if we have defined
+ * the type ErtsDoubleDigit which MUST have
+ * sizeof(ErtsDoubleDigit) >= sizeof(ErtsDigit)
+ */
+#ifdef BIG_HAVE_DOUBLE_DIGIT
+
+/* ErtsDoubleDigit => ErtsDigit */
+#define DLOW(x) ((ErtsDigit)(x))
+#define DHIGH(x) ((ErtsDigit)(((ErtsDoubleDigit)(x)) >> D_EXP))
+
+/* ErtsDigit => ErtsDoubleDigit */
+#define DLOW2HIGH(x) (((ErtsDoubleDigit)(x)) << D_EXP)
+#define DDIGIT(a1,a0) (DLOW2HIGH(a1) + (a0))
+
+#define DMULc(a,b,c,p) do { \
+ ErtsDoubleDigit _t = ((ErtsDoubleDigit)(a))*(b) + (c); \
+ p = DLOW(_t); \
+ c = DHIGH(_t); \
+ } while(0)
+#define DMUL(a,b,c1,c0) do { \
+ ErtsDoubleDigit _t = ((ErtsDoubleDigit)(a))*(b); \
+ c0 = DLOW(_t); \
+ c1 = DHIGH(_t); \
+ } while(0)
+
+#define DDIV(a1,a0,b,q) do { \
+ ErtsDoubleDigit _t = DDIGIT((a1),(a0)); \
+ q = _t / (b); \
+ } while(0)
+
+#define DDIV2(a1,a0,b1,b0,q) do { \
+ ErtsDoubleDigit _t = DDIGIT((a1),(a0)); \
+ q = _t / DDIGIT((b1),(b0)); \
+ } while(0)
+
+#define DREM(a1,a0,b,r) do { \
+ ErtsDoubleDigit _t = DDIGIT((a1),(a0)); \
+ r = _t % (b); \
+ } while(0)
+
+#else
+
+/* If we do not have double digit then we have some more work to do */
+#define H_EXP (D_EXP >> 1)
+#define LO_MASK ((ErtsDigit)((DCONST(1) << H_EXP)-1))
+#define HI_MASK ((ErtsDigit)(LO_MASK << H_EXP))
+
+#define DGT(a,b) ((a)>(b))
+#define DEQ(a,b) ((a)==(b))
+
+#define D2GT(a1,a0,b1,b0) (DGT(a1,b1) || (((a1)==(b1)) && DGT(a0,b0)))
+#define D2EQ(a1,a0,b1,b0) (DEQ(a1,b1) && DEQ(a0,b0))
+#define D2LT(a1,a0,b1,b0) D2GT(b1,b0,a1,a0)
+#define D2GTE(a1,a0,b1,b0) (!D2LT(a1,a0,b1,b0))
+#define D2LTE(a1,a0,b1,b0) (!D2GT(a1,a0,b1,b0))
+
+// Add (A+B), A=(a1B+a0) B=(b1B+b0)
+#define D2ADD(a1,a0,b1,b0,c1,c0) do { \
+ ErtsDigit __ci = 0; \
+ DSUM(a0,b0,__ci,c0); \
+ DSUMc(a1,b1,__ci,c1); \
+ } while(0)
+
+// Subtract (A-B), A=(a1B+a0), B=(b1B+b0) (A>=B)
+#define D2SUB(a1,a0,b1,b0,c1,c0) do { \
+ ErtsDigit __bi; \
+ DSUB(a0,b0,__bi,c0); \
+ DSUBb(a1,b1,__bi,c1); \
+ } while(0)
+
+
+/* Left shift (multiply by 2) (A <<= 1 where A=a1*B+a0) */
+#define D2LSHIFT1(a1,a0) do { \
+ a1 = ((a0) >> (D_EXP-1)) | ((a1)<<1); \
+ a0 = (a0) << 1; \
+ } while(0)
+
+/* Right shift (divide by 2) (A >>= 1 where A=a1*B+a0) */
+#define D2RSHIFT1(a1,a0) do { \
+ a0 = (((a1) & 1) << (D_EXP-1)) | ((a0)>>1); \
+ a1 = ((a1) >> 1); \
+ } while(0)
+
+/* Calculate a*b + d1 and store double prec result in d1, d0 */
+#define DMULc(a,b,d1,d0) do { \
+ ErtsHalfDigit __a0 = (a); \
+ ErtsHalfDigit __a1 = ((a) >> H_EXP); \
+ ErtsHalfDigit __b0 = (b); \
+ ErtsHalfDigit __b1 = ((b) >> H_EXP); \
+ ErtsDigit __a0b0 = (ErtsDigit)__a0*__b0; \
+ ErtsDigit __a0b1 = (ErtsDigit)__a0*__b1; \
+ ErtsDigit __a1b0 = (ErtsDigit)__a1*__b0; \
+ ErtsDigit __a1b1 = (ErtsDigit)__a1*__b1; \
+ ErtsDigit __p0,__p1,__p2,__c0; \
+ DSUM(__a0b0,d1,__c0,__p0); \
+ DSUM((__c0<<H_EXP),(__p0>>H_EXP),__p2,__p1); \
+ DSUM(__p1,__a0b1,__c0,__p1); \
+ __p2 += __c0; \
+ DSUM(__p1,__a1b0,__c0,__p1); \
+ __p2 += __c0; \
+ DSUM(__p1,__a1b1<<H_EXP,__c0,__p1); \
+ __p2 += __c0; \
+ DSUM(__a1b1, (__p2<<H_EXP),__c0,__p2); \
+ d1 = (__p2 & HI_MASK) | (__p1 >> H_EXP); \
+ d0 = (__p1 << H_EXP) | (__p0 & LO_MASK); \
+ } while(0)
+
+#define DMUL(a,b,d1,d0) do { \
+ ErtsDigit _ds = 0; \
+ DMULc(a,b,_ds,d0); \
+ d1 = _ds; \
+ } while(0)
+
+/* Calculate a*(Bb1 + b0) + d2 = a*b1B + a*b0 + d2 */
+#define D2MULc(a,b1,b0,d2,d1,d0) do { \
+ DMULc(a, b0, d2, d0); \
+ DMULc(a, b1, d2, d1); \
+ } while(0)
+
+/* Calculate s in a = 2^s*a1 */
+/* NOTE since D2PF is used by other macros variables is prefixed bt __ */
+#if D_EXP == 64
+#define D2PF(a, s) do { \
+ ErtsDigit __x = (a); \
+ int __s = 0; \
+ if (__x <= 0x00000000FFFFFFFF) { __s += 32; __x <<= 32; } \
+ if (__x <= 0x0000FFFFFFFFFFFF) { __s += 16; __x <<= 16; } \
+ if (__x <= 0x00FFFFFFFFFFFFFF) { __s += 8; __x <<= 8; } \
+ if (__x <= 0x0FFFFFFFFFFFFFFF) { __s += 4; __x <<= 4; } \
+ if (__x <= 0x3FFFFFFFFFFFFFFF) { __s += 2; __x <<= 2; } \
+ if (__x <= 0x7FFFFFFFFFFFFFFF) { __s += 1; } \
+ s = __s; \
+ } while(0)
+#elif D_EXP == 32
+#define D2PF(a, s) do { \
+ ErtsDigit __x = (a); \
+ int __s = 0; \
+ if (__x <= 0x0000FFFF) { __s += 16; __x <<= 16; } \
+ if (__x <= 0x00FFFFFF) { __s += 8; __x <<= 8; } \
+ if (__x <= 0x0FFFFFFF) { __s += 4; __x <<= 4; } \
+ if (__x <= 0x3FFFFFFF) { __s += 2; __x <<= 2; } \
+ if (__x <= 0x7FFFFFFF) { __s += 1; } \
+ s = __s; \
+ } while(0)
+#elif D_EXP == 16
+#define D2PF(a, s) do { \
+ ErtsDigit __x = (a); \
+ int __s = 0; \
+ if (__x <= 0x00FF) { __s += 8; __x <<= 8; } \
+ if (__x <= 0x0FFF) { __s += 4; __x <<= 4; } \
+ if (__x <= 0x3FFF) { __s += 2; __x <<= 2; } \
+ if (__x <= 0x7FFF) { __s += 1; } \
+ s = __s; \
+ } while(0)
+#elif D_EXP == 8
+#define D2PF(a, s) do { \
+ ErtsDigit __x = (a); \
+ int __s = 0; \
+ if (__x <= 0x0F) { __s += 4; __x <<= 4; } \
+ if (__x <= 0x3F) { __s += 2; __x <<= 2; } \
+ if (__x <= 0x7F) { __s += 1; } \
+ s = _s; \
+ } while(0)
+#endif
+
+/* Calculate q = (a1B + a0) / b, assume a1 < b */
+#define DDIVREM(a1,a0,b,q,r) do { \
+ ErtsDigit _a1 = (a1); \
+ ErtsDigit _a0 = (a0); \
+ ErtsDigit _b = (b); \
+ ErtsHalfDigit _un1, _un0; \
+ ErtsHalfDigit _vn1, _vn0; \
+ ErtsDigit _q1, _q0; \
+ ErtsDigit _un32, _un21, _un10; \
+ ErtsDigit _rh; \
+ Sint _s; \
+ D2PF(_b, _s); \
+ _b = _b << _s; \
+ _vn1 = _b >> H_EXP; \
+ _vn0 = _b & LO_MASK; \
+ _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
+ _un10 = _a0 << _s; \
+ _un1 = _un10 >> H_EXP; \
+ _un0 = _un10 & LO_MASK; \
+ _q1 = _un32/_vn1; \
+ _rh = _un32 - _q1*_vn1; \
+ while ((_q1 >= (DCONST(1)<<H_EXP))||(_q1*_vn0 > (_rh<<H_EXP)+_un1)) { \
+ _q1--; \
+ _rh += _vn1; \
+ if (_rh >= (DCONST(1)<<H_EXP)) break; \
+ } \
+ _un21 = (_un32<<H_EXP) + _un1 - _q1*_b; \
+ _q0 = _un21/_vn1; \
+ _rh = _un21 - _q0*_vn1; \
+ while ((_q0 >= (DCONST(1)<<H_EXP))||(_q0*_vn0 > ((_rh<<H_EXP)+_un0))) { \
+ _q0--; \
+ _rh += _vn1; \
+ if (_rh >= (DCONST(1)<<H_EXP)) break; \
+ } \
+ r = ((_un21<<H_EXP) + _un0 - _q0*_b) >> _s; \
+ q = (_q1<<H_EXP) + _q0; \
+ } while(0)
+
+/* divide any a=(a1*B + a0) with b */
+#define DDIVREM2(a1,a0,b,q1,q0,r) do { \
+ ErtsDigit __a1 = (a1); \
+ ErtsDigit __b = (b); \
+ q1 = __a1 / __b; \
+ DDIVREM(__a1 % __b, (a0), __b, q0, r); \
+ } while(0)
+
+
+/* Calculate q = (a1B + a0) % b */
+#define DREM(a1,a0,b,r) do { \
+ ErtsDigit __a1 = (a1); \
+ ErtsDigit __b = (b); \
+ ErtsDigit __q0; \
+ DDIVREM((__a1 % __b), (a0), __b, __q0, r); \
+ } while(0)
+
+#define DDIV(a1,a0,b,q) do { \
+ ErtsDigit _tmp; \
+ DDIVREM(a1,a0,b,q,_tmp); \
+ } while(0)
+
+
+/* Calculate q, r A = Bq+R when, assume A1 >= B */
+#if (SIZEOF_VOID_P == 8)
+#define QUOT_LIM 0x7FFFFFFFFFFFFFFF
+#else
+#define QUOT_LIM 0x7FFFFFFF
+#endif
+
+#define D2DIVREM(a1,a0,b1,b0,q0,r1,r0) do { \
+ ErtsDigit _a1 = (a1); \
+ ErtsDigit _a0 = (a0); \
+ ErtsDigit _b1 = (b1); \
+ ErtsDigit _b0 = (b0); \
+ ErtsDigit _q = 0; \
+ int _as = 1; \
+ while(D2GTE(_a1,_a0,_b1,_b0)) { \
+ ErtsDigit _q1; \
+ ErtsDigit _t2=0, _t1, _t0; \
+ if ((_b1 == 1) && (_a1 > 1)) \
+ _q1 = _a1 / 2; \
+ else if ((_a1 > QUOT_LIM) && (_b1 < _a1)) \
+ _q1 = _a1/(_b1+1); \
+ else \
+ _q1 = _a1/_b1; \
+ if (_as<0) \
+ _q -= _q1; \
+ else \
+ _q += _q1; \
+ D2MULc(_q1, _b1, _b0, _t2, _t1, _t0); \
+ ASSERT(_t2 == 0); \
+ if (D2GT(_t1,_t0,_a1,_a0)) { \
+ D2SUB(_t1,_t0,_a1,_a0,_a1,_a0); \
+ _as = -_as; \
+ } \
+ else { \
+ D2SUB(_a1,_a0,_t1,_t0,_a1,_a0); \
+ } \
+ } \
+ if (_as < 0) { \
+ _q--; \
+ D2SUB(_b1,_b0,_a1,_a0,_a1,_a0); \
+ } \
+ q0 = _q; \
+ r1 = _a1; \
+ r0 = _a0; \
+ } while(0)
+
+
+/* Calculate q, r A = Bq+R when assume B>0 */
+#define D2DIVREM_0(a1,a0,b1,b0,q1,q0,r1,r0) do { \
+ ErtsDigit _a1 = (a1); \
+ ErtsDigit _a0 = (a0); \
+ ErtsDigit _b1 = (b1); \
+ ErtsDigit _b0 = (b0); \
+ if (D2EQ(_a1,_a0,0,0)) { \
+ q1 = q0 = 0; \
+ r1 = r0 = 0; \
+ } \
+ else { \
+ ErtsDigit _res1 = 0; \
+ ErtsDigit _res0 = 0; \
+ ErtsDigit _d1 = 0; \
+ ErtsDigit _d0 = 1; \
+ ErtsDigit _e1 = (1 << (D_EXP-1)); \
+ ErtsDigit _e0 = 0; \
+ while(_e1 && !(_a1 & _e1)) \
+ _e1 >>= 1; \
+ if (_e1 == 0) { \
+ _e0 = (1 << (D_EXP-1)); \
+ while(_e0 && !(_a0 & _e0)) \
+ _e0 >>= 1; \
+ } \
+ if (D2GT(_b1,_b0,0,0)) { \
+ while(D2GT(_e1,_e0,_b1,_b0)) { \
+ D2LSHIFT1(_b1,_b0); \
+ D2LSHIFT1(_d1,_d0); \
+ } \
+ } \
+ do { \
+ if (!D2GT(_b1,_b0,_a1,_a0)) { \
+ D2SUB(_a1,_a0, _b1, _b0, _a1, _a0); \
+ D2ADD(_d1,_d0, _res1,_res0, _res1, _res0); \
+ } \
+ D2RSHIFT1(_b1,_b0); \
+ D2RSHIFT1(_d1,_d0); \
+ } while (!D2EQ(_d1,_d0,0,0)); \
+ r1 = _a1; \
+ r0 = _a0; \
+ q1 = _res1; \
+ q0 = _res0; \
+ } \
+ } while(0)
+
+#define DDIV2(a1,a0,b1,b0,q) do { \
+ ErtsDigit _tmp_r1; \
+ ErtsDigit _tmp_r0; \
+ D2DIVREM(a1,a0,b1,b0,q,_tmp_r1,_tmp_r0); \
+ } while(0)
+
+#endif
+
+/*
+** compare two number vectors
+*/
+static int I_comp(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl)
+{
+ if (xl < yl)
+ return -1;
+ else if (xl > yl)
+ return 1;
+ else {
+ if (x == y)
+ return 0;
+ x += (xl-1);
+ y += (yl-1);
+ while((xl > 0) && (*x == *y)) {
+ x--;
+ y--;
+ xl--;
+ }
+ if (xl == 0)
+ return 0;
+ return (*x < *y) ? -1 : 1;
+ }
+}
+
+/*
+** Add digits in x and y and store them in r
+** assumption: (xl >= yl)
+*/
+static dsize_t I_add(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDigit* r)
+{
+ dsize_t sz = xl;
+ register ErtsDigit yr, xr;
+ register ErtsDigit c = 0;
+
+ ASSERT(xl >= yl);
+
+ xl -= yl;
+ do {
+ xr = *x++ + c;
+ yr = *y++;
+ c = (xr < c);
+ xr = yr + xr;
+ c += (xr < yr);
+ *r++ = xr;
+ } while(--yl);
+
+ while(xl--) {
+ xr = *x++ + c;
+ c = (xr < c);
+ *r++ = xr;
+ }
+ if (c) {
+ *r = 1;
+ return sz+1;
+ }
+ return sz;
+}
+/*
+** Add a digits in v1 and store result in vr
+*/
+static dsize_t D_add(ErtsDigit* x, dsize_t xl, ErtsDigit c, ErtsDigit* r)
+{
+ dsize_t sz = xl;
+ register ErtsDigit xr;
+
+ while(xl--) {
+ xr = *x++ + c;
+ c = (xr < c);
+ *r++ = xr;
+ }
+ if (c) {
+ *r = 1;
+ return sz+1;
+ }
+ return sz;
+}
+
+/*
+** Subtract digits v2 from v1 and store result in v3
+** Assert I_comp(x, xl, y, yl) >= 0
+**
+*/
+static dsize_t I_sub(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDigit* r)
+{
+ ErtsDigit* r0 = r;
+ register ErtsDigit yr, xr;
+ register ErtsDigit c = 0;
+
+ ASSERT(I_comp(x, xl, y, yl) >= 0);
+
+ xl -= yl;
+ do {
+ yr = *y++ + c;
+ xr = *x++;
+ c = (yr < c);
+ yr = xr - yr;
+ c += (yr > xr);
+ *r++ = yr;
+ } while(--yl);
+
+ while(xl--) {
+ xr = *x++;
+ yr = xr - c;
+ c = (yr > xr);
+ *r++ = yr;
+ }
+ do {
+ r--;
+ } while(*r == 0 && r != r0);
+
+ return (r - r0) + 1;
+}
+
+/*
+** Subtract digit d from v1 and store result in vr
+*/
+static dsize_t D_sub(ErtsDigit* x, dsize_t xl, ErtsDigit c, ErtsDigit* r)
+{
+ ErtsDigit* r0 = r;
+ register ErtsDigit yr, xr;
+
+ ASSERT(I_comp(x, xl, x, 1) >= 0);
+
+ while(xl--) {
+ xr = *x++;
+ yr = xr - c;
+ c = (yr > xr);
+ *r++ = yr;
+ }
+ do {
+ r--;
+ } while(*r == 0 && r != r0);
+
+ return (r - r0) + 1;
+}
+
+/*
+** subtract Z000...0 - y and store result in r, return new size
+*/
+static dsize_t Z_sub(ErtsDigit* y, dsize_t yl, ErtsDigit* r)
+{
+ ErtsDigit* r0 = r;
+ register ErtsDigit yr;
+ register ErtsDigit c = 0;
+
+ while(yl--) {
+ yr = *y++ + c;
+ c = (yr < c);
+ yr = 0 - yr;
+ c += (yr > 0);
+ *r++ = yr;
+ }
+ do {
+ r--;
+ } while(*r == 0 && r != r0);
+ return (r - r0) + 1;
+}
+
+/*
+** Multiply digits in x with digits in y and store in r
+** Assumption: digits in r must be 0 (upto the size of x)
+*/
+static dsize_t I_mul(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDigit* r)
+{
+ ErtsDigit* r0 = r;
+ ErtsDigit* rt = r;
+
+ while(xl--) {
+ ErtsDigit cp = 0;
+ ErtsDigit c = 0;
+ dsize_t n = yl;
+ ErtsDigit* yt = y;
+ ErtsDigit d;
+ ErtsDigit p;
+
+ d = *x;
+ x++;
+ rt = r;
+
+ switch(d) {
+ case 0:
+ rt = rt + n;
+ break;
+ case 1:
+ while(n--) {
+ DSUMc(*yt, *rt, c, p);
+ *rt++ = p;
+ yt++;
+ }
+ break;
+ case 2:
+ while(n--) {
+ p = *yt;
+ DSUMc(p, p, cp, p);
+ DSUMc(p, *rt, c, p);
+ *rt++ = p;
+ yt++;
+ }
+ break;
+ default:
+ while(n--) {
+ DMULc(d,*yt, cp, p);
+ DSUMc(p,*rt, c, p);
+ *rt++ = p;
+ yt++;
+ }
+ break;
+ }
+ *rt = c + cp;
+ r++;
+ }
+ if (*rt == 0)
+ return (rt - r0);
+ else
+ return (rt - r0) + 1;
+}
+
+/*
+** Square digits in x store in r (x & r may point into a common area)
+** Assumption: x is destroyed if common area and digits in r are zero
+** to the size of xl+1
+*/
+
+static dsize_t I_sqr(ErtsDigit* x, dsize_t xl, ErtsDigit* r)
+{
+ ErtsDigit d_next = *x;
+ ErtsDigit d;
+ ErtsDigit* r0 = r;
+ ErtsDigit* s = r;
+
+ if ((r + xl) == x) /* "Inline" operation */
+ *x = 0;
+ x++;
+
+ while(xl--) {
+ ErtsDigit* y = x;
+ ErtsDigit y_0 = 0, y_1 = 0, y_2 = 0, y_3 = 0;
+ ErtsDigit b0, b1;
+ ErtsDigit z0, z1, z2;
+ ErtsDigit t;
+ dsize_t y_l = xl;
+
+ s = r;
+ d = d_next;
+ d_next = *x;
+ x++;
+
+ DMUL(d, d, b1, b0);
+ DSUMc(*s, b0, y_3, t);
+ *s++ = t;
+ z1 = b1;
+ while(y_l--) {
+ DMUL(d, *y, b1, b0);
+ y++;
+ DSUMc(b0, b0, y_0, z0);
+ DSUMc(z0, z1, y_2, z2);
+ DSUMc(*s, z2, y_3, t);
+ *s++ = t;
+ DSUMc(b1, b1, y_1, z1);
+ }
+ z0 = y_0;
+ DSUMc(z0, z1, y_2, z2);
+ DSUMc(*s, z2, y_3, t);
+ *s = t;
+ if (xl != 0) {
+ s++;
+ t = (y_1+y_2+y_3);
+ *s = t;
+ r += 2;
+ }
+ else {
+ ASSERT((y_1+y_2+y_3) == 0);
+ }
+ }
+ if (*s == 0)
+ return (s - r0);
+ else
+ return (s - r0) + 1;
+}
+
+
+/*
+** Multiply digits d with digits in x and store in r
+*/
+static dsize_t D_mul(ErtsDigit* x, dsize_t xl, ErtsDigit d, ErtsDigit* r)
+{
+ ErtsDigit c = 0;
+ dsize_t rl = xl;
+ ErtsDigit p;
+
+ switch(d) {
+ case 0:
+ ZERO_DIGITS(r, 1);
+ return 1;
+ case 1:
+ if (x != r)
+ MOVE_DIGITS(r, x, xl);
+ return xl;
+ case 2:
+ while(xl--) {
+ p = *x;
+ DSUMc(p, p, c, p);
+ *r++ = p;
+ x++;
+ }
+ break;
+ default:
+ while(xl--) {
+ DMULc(d, *x, c, p);
+ *r++ = p;
+ x++;
+ }
+ break;
+ }
+ if (c == 0)
+ return rl;
+ *r = c;
+ return rl+1;
+}
+
+/*
+** Multiply and subtract
+** calculate r(i) = x(i) - d*y(i)
+** assumption: xl = yl || xl == yl+1
+**
+** Return size of r
+** 0 means borrow
+*/
+static dsize_t D_mulsub(ErtsDigit* x, dsize_t xl, ErtsDigit d,
+ ErtsDigit* y, dsize_t yl, ErtsDigit* r)
+{
+ ErtsDigit c = 0;
+ ErtsDigit b = 0;
+ ErtsDigit c0;
+ ErtsDigit* r0 = r;
+ ErtsDigit s;
+
+ ASSERT(xl == yl || xl == yl+1);
+
+ xl -= yl;
+ while(yl--) {
+ DMULc(d, *y, c, c0);
+ DSUBb(*x, c0, b, s);
+ *r++ = s;
+ x++;
+ y++;
+ }
+ if (xl == 0) {
+ if (c != 0 || b != 0)
+ return 0;
+ }
+ else { /* xl == 1 */
+ DSUBb(*x, c, b, s);
+ *r++ = s;
+ }
+ if (b != 0) return 0;
+
+ do {
+ r--;
+ } while(*r == 0 && r != r0);
+ return (r - r0) + 1;
+}
+
+/*
+** Divide digits in x with a digit,
+** quotient is returned in q and remainder digit in r
+** x and q may be equal
+*/
+static dsize_t D_div(ErtsDigit* x, dsize_t xl, ErtsDigit d, ErtsDigit* q, ErtsDigit* r)
+{
+ ErtsDigit* xp = x + (xl-1);
+ ErtsDigit* qp = q + (xl-1);
+ dsize_t qsz = xl;
+ ErtsDigit a1;
+
+ a1 = *xp;
+ xp--;
+
+ if (d > a1) {
+ if (xl == 1) {
+ *r = a1;
+ *qp = 0;
+ return 1;
+ }
+ qsz--;
+ qp--;
+ }
+
+ do {
+ ErtsDigit q0, a0, b1, b0, b;
+
+ if (d > a1) {
+ a0 = *xp;
+ xp--;
+ }
+ else {
+ a0 = a1; a1 = 0;
+ }
+ DDIV(a1, a0, d, q0);
+ DMUL(d, q0, b1, b0);
+ DSUB(a0,b0, b, a1);
+ *qp = q0;
+ qp--;
+ } while (xp >= x);
+
+ *r = a1;
+ return qsz;
+}
+
+/*
+** Divide digits in x with digits in y and return qutient in q
+** and remainder in r
+** assume that integer(x) > integer(y)
+** Return remainder in x (length int rl)
+** Return quotient size
+*/
+
+static dsize_t I_div(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl,
+ ErtsDigit* q, ErtsDigit* r, dsize_t* rlp)
+{
+ ErtsDigit* rp;
+ ErtsDigit* qp;
+ ErtsDigit b1 = y[yl-1];
+ ErtsDigit b2 = y[yl-2];
+ ErtsDigit a1;
+ ErtsDigit a2;
+ int r_signed = 0;
+ dsize_t ql;
+ dsize_t rl;
+
+ if (x != r)
+ MOVE_DIGITS(r, x, xl);
+ rp = r + (xl-yl);
+ rl = xl;
+
+ ZERO_DIGITS(q, xl-yl+1);
+ qp = q + (xl-yl);
+ ql = 0;
+
+ /* Adjust length */
+ a1 = rp[yl-1];
+ a2 = rp[yl-2];
+ if (b1 < a1 || (b1 == a1 && b2 <= a2))
+ ql = 1;
+
+ do {
+ ErtsDigit q0;
+ dsize_t nsz = yl;
+ dsize_t nnsz;
+
+ a1 = rp[yl-1];
+ a2 = rp[yl-2];
+
+ if (b1 < a1)
+ DDIV2(a1,a2,b1,b2,q0);
+ else if (b1 > a1) {
+ DDIV(a1,a2,b1,q0);
+ nsz++;
+ rp--;
+ qp--;
+ ql++;
+ }
+ else { /* (b1 == a1) */
+ if (b2 <= a2)
+ q0 = 1;
+ else {
+ q0 = D_MASK;
+ nsz++;
+ rp--;
+ qp--;
+ ql++;
+ }
+ }
+
+ if (r_signed)
+ ql = D_sub(qp, ql, q0, qp);
+ else
+ ql = D_add(qp, ql, q0, qp);
+
+ if ((nnsz = D_mulsub(rp, nsz, q0, y, yl, rp)) == 0) {
+ nnsz = Z_sub(r, rl, r);
+ if (nsz > (rl-nnsz))
+ nnsz = nsz - (rl-nnsz);
+ else
+ nnsz = 1;
+ r_signed = !r_signed;
+ }
+
+ if ((nnsz == 1) && (*rp == 0))
+ nnsz = 0;
+ rp = rp - (yl-nnsz);
+ rl -= (nsz-nnsz);
+ qp = qp - (yl-nnsz);
+ ql += (yl-nnsz);
+ } while (I_comp(r, rl, y, yl) >= 0);
+
+ ql -= (q - qp);
+ qp = q;
+
+ if (rl == 0)
+ rl = 1;
+
+ while(rl > 1 && r[rl-1] == 0) /* Remove "trailing zeroes" */
+ --rl;
+
+ if (r_signed && (rl > 1 || *r != 0)) {
+ rl = I_sub(y, yl, r, rl, r);
+ ql = D_sub(qp, ql, 1, qp);
+ }
+
+ *rlp = rl;
+ return ql;
+}
+
+/*
+** Remainder of digits in x and a digit d
+*/
+static ErtsDigit D_rem(ErtsDigit* x, dsize_t xl, ErtsDigit d)
+{
+ ErtsDigit rem = 0;
+
+ x += (xl-1);
+ do {
+ if (rem != 0)
+ DREM(rem, *x, d, rem);
+ else
+ DREM(0, *x, d, rem);
+ x--;
+ xl--;
+ } while(xl > 0);
+ return rem;
+}
+
+/*
+** Remainder of x and y
+**
+** Assumtions: xl >= yl, yl > 1
+** r must contain at least xl number of digits
+*/
+static dsize_t I_rem(ErtsDigit* x, dsize_t xl, ErtsDigit* y, dsize_t yl, ErtsDigit* r)
+{
+ ErtsDigit* rp;
+ ErtsDigit b1 = y[yl-1];
+ ErtsDigit b2 = y[yl-2];
+ ErtsDigit a1;
+ ErtsDigit a2;
+ int r_signed = 0;
+ dsize_t rl;
+
+ if (x != r)
+ MOVE_DIGITS(r, x, xl);
+ rp = r + (xl-yl);
+ rl = xl;
+
+ do {
+ ErtsDigit q0;
+ dsize_t nsz = yl;
+ dsize_t nnsz;
+
+ a1 = rp[yl-1];
+ a2 = rp[yl-2];
+
+ if (b1 < a1)
+ DDIV2(a1,a2,b1,b2,q0);
+ else if (b1 > a1) {
+ DDIV(a1,a2,b1,q0);
+ nsz++;
+ rp--;
+ }
+ else { /* (b1 == a1) */
+ if (b2 <= a2)
+ q0 = 1;
+ else {
+ q0 = D_MASK;
+ nsz++;
+ rp--;
+ }
+ }
+
+ if ((nnsz = D_mulsub(rp, nsz, q0, y, yl, rp)) == 0) {
+ nnsz = Z_sub(r, rl, r);
+ if (nsz > (rl-nnsz))
+ nnsz = nsz - (rl-nnsz);
+ else
+ nnsz = 1;
+ r_signed = !r_signed;
+ }
+
+ if (nnsz == 1 && *rp == 0)
+ nnsz = 0;
+
+ rp = rp - (yl-nnsz);
+ rl -= (nsz-nnsz);
+ } while (I_comp(r, rl, y, yl) >= 0);
+
+ if (rl == 0)
+ rl = 1;
+
+ while(rl > 1 && r[rl-1] == 0) /* Remove "trailing zeroes" */
+ --rl;
+
+ if (r_signed && (rl > 1 || *r != 0))
+ rl = I_sub(y, yl, r, rl, r);
+ return rl;
+}
+
+/*
+** Remove trailing digits from bitwise operations
+*/
+static dsize_t I_btrail(ErtsDigit* r0, ErtsDigit* r, short sign)
+{
+ /* convert negative numbers to one complement */
+ if (sign) {
+ dsize_t rl;
+ ErtsDigit d;
+
+ /* 1 remove all 0xffff words */
+ do {
+ r--;
+ } while(((d = *r) == D_MASK) && (r != r0));
+
+ /* 2 complement high digit */
+ if (d == D_MASK)
+ *r = 0;
+ else {
+ ErtsDigit prev_mask = 0;
+ ErtsDigit mask = (DCONST(1) << (D_EXP-1));
+
+ while((d & mask) == mask) {
+ prev_mask = mask;
+ mask = (prev_mask >> 1) | (DCONST(1)<<(D_EXP-1));
+ }
+ *r = ~d & ~prev_mask;
+ }
+ rl = (r - r0) + 1;
+ while(r != r0) {
+ r--;
+ *r = ~*r;
+ }
+ return D_add(r0, rl, 1, r0);
+ }
+
+ do {
+ r--;
+ } while(*r == 0 && r != r0);
+ return (r - r0) + 1;
+}
+
+/*
+** Bitwise and
+*/
+static dsize_t I_band(ErtsDigit* x, dsize_t xl, short xsgn,
+ ErtsDigit* y, dsize_t yl, short ysgn, ErtsDigit* r)
+{
+ ErtsDigit* r0 = r;
+ short sign = xsgn && ysgn;
+
+ ASSERT(xl >= yl);
+
+ xl -= yl;
+
+ if (!xsgn) {
+ if (!ysgn) {
+ while(yl--)
+ *r++ = *x++ & *y++;
+ }
+ else {
+ ErtsDigit b;
+ ErtsDigit c;
+
+ DSUB(*y,1,b,c);
+ *r++ = *x++ & ~c;
+ y++;
+ yl--;
+ while(yl--) {
+ DSUBb(*y,0,b,c);
+ *r++ = *x++ & ~c;
+ y++;
+ }
+ while (xl--) {
+ *r++ = *x++;
+ }
+ }
+ }
+ else {
+ if (!ysgn) {
+ ErtsDigit b;
+ ErtsDigit c;
+
+ DSUB(*x,1,b,c);
+ *r = ~c & *y;
+ x++; y++; r++;
+ yl--;
+ while(yl--) {
+ DSUBb(*x,0,b,c);
+ *r++ = ~c & *y++;
+ x++;
+ }
+ }
+ else {
+ ErtsDigit b1, b2;
+ ErtsDigit c1, c2;
+
+ DSUB(*x,1,b1,c1);
+ DSUB(*y,1,b2,c2);
+ *r++ = ~c1 & ~c2;
+ x++; y++;
+ yl--;
+ while(yl--) {
+ DSUBb(*x,0,b1,c1);
+ DSUBb(*y,0,b2,c2);
+ *r++ = ~c1 & ~c2;
+ x++; y++;
+ }
+ while(xl--)
+ *r++ = ~*x++;
+ }
+ }
+ return I_btrail(r0, r, sign);
+}
+
+/*
+ * Bitwise 'or'.
+ */
+static dsize_t
+I_bor(ErtsDigit* x, dsize_t xl, short xsgn, ErtsDigit* y,
+ dsize_t yl, short ysgn, ErtsDigit* r)
+{
+ ErtsDigit* r0 = r;
+ short sign = xsgn || ysgn;
+
+ ASSERT(xl >= yl);
+
+ xl -= yl;
+
+ if (!xsgn) {
+ if (!ysgn) {
+ while(yl--)
+ *r++ = *x++ | *y++;
+ while(xl--)
+ *r++ = *x++;
+ }
+ else {
+ ErtsDigit b;
+ ErtsDigit c;
+
+ DSUB(*y,1,b,c);
+ *r++ = *x++ | ~c;
+ y++;
+ yl--;
+ while(yl--) {
+ DSUBb(*y,0,b,c);
+ *r++ = *x++ | ~c;
+ y++;
+ }
+ }
+ }
+ else {
+ if (!ysgn) {
+ ErtsDigit b;
+ ErtsDigit c;
+
+ DSUB(*x,1,b,c);
+ *r++ = ~c | *y++;
+ x++;
+ yl--;
+ while(yl--) {
+ DSUBb(*x,0,b,c);
+ *r++ = ~c | *y++;
+ x++;
+ }
+ while(xl--) {
+ DSUBb(*x,0,b,c);
+ *r++ = ~c;
+ x++;
+ }
+ }
+ else {
+ ErtsDigit b1, b2;
+ ErtsDigit c1, c2;
+
+ DSUB(*x,1,b1,c1);
+ DSUB(*y,1,b2,c2);
+ *r++ = ~c1 | ~c2;
+ x++; y++;
+ yl--;
+ while(yl--) {
+ DSUBb(*x,0,b1,c1);
+ DSUBb(*y,0,b2,c2);
+ *r++ = ~c1 | ~c2;
+ x++; y++;
+ }
+ }
+ }
+ return I_btrail(r0, r, sign);
+}
+
+/*
+** Bitwise xor
+*/
+static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
+ ErtsDigit* y, dsize_t yl, short ysgn, ErtsDigit* r)
+{
+ ErtsDigit* r0 = r;
+ short sign = xsgn != ysgn;
+
+ ASSERT(xl >= yl);
+
+ xl -= yl;
+
+ if (!xsgn) {
+ if (!ysgn) {
+ while(yl--)
+ *r++ = *x++ ^ *y++;
+ while(xl--)
+ *r++ = *x++;
+ }
+ else {
+ ErtsDigit b;
+ ErtsDigit c;
+
+ DSUB(*y,1,b,c);
+ *r++ = *x++ ^ ~c;
+ y++;
+ yl--;
+ while(yl--) {
+ DSUBb(*y,0,b,c);
+ *r++ = *x++ ^ ~c;
+ y++;
+ }
+ while(xl--)
+ *r++ = ~*x++;
+ }
+ }
+ else {
+ if (!ysgn) {
+ ErtsDigit b;
+ ErtsDigit c;
+
+ DSUB(*x,1,b,c);
+ *r++ = ~c ^ *y++;
+ x++;
+ yl--;
+ while(yl--) {
+ DSUBb(*x,0,b,c);
+ *r++ = ~c ^ *y++;
+ x++;
+ }
+ while(xl--)
+ *r++ = ~*x++;
+ }
+ else {
+ ErtsDigit b1, b2;
+ ErtsDigit c1, c2;
+
+ DSUB(*x,1,b1,c1);
+ DSUB(*y,1,b2,c2);
+ *r++ = ~c1 ^ ~c2;
+ x++; y++;
+ yl--;
+ while(yl--) {
+ DSUBb(*x,0,b1,c1);
+ DSUBb(*y,0,b2,c2);
+ *r++ = ~c1 ^ ~c2;
+ x++; y++;
+ }
+ while(xl--) {
+ *r++ = *x++;
+ }
+ }
+ }
+ return I_btrail(r0, r, sign);
+}
+
+/*
+** Bitwise not simulated as
+** bnot -X == (X - 1)
+** bnot +X == -(X + 1)
+*/
+static dsize_t I_bnot(ErtsDigit* x, dsize_t xl, short xsgn, ErtsDigit* r)
+{
+ if (xsgn)
+ return D_add(x, xl, 1, r);
+ else
+ return D_sub(x, xl, 1, r);
+}
+
+/*
+** Arithmetic left shift or right
+*/
+static dsize_t I_lshift(ErtsDigit* x, dsize_t xl, Sint y,
+ short sign, ErtsDigit* r)
+{
+ if (y == 0) {
+ MOVE_DIGITS(r, x, xl);
+ return xl;
+ }
+ else if (xl == 1 && *x == 0) {
+ *r = 0;
+ return 1;
+ }
+ else {
+ long ay = (y < 0) ? -y : y;
+ int bw = ay / D_EXP;
+ int sw = ay % D_EXP;
+ dsize_t rl;
+ ErtsDigit a1=0;
+ ErtsDigit a0=0;
+
+ if (y > 0) { /* shift left */
+ rl = xl + bw + 1;
+
+ while(bw--)
+ *r++ = 0;
+ if (sw) { // NOTE! x >> 32 is not = 0!
+ while(xl--) {
+ a0 = (*x << sw) | a1;
+ a1 = (*x >> (D_EXP - sw));
+ *r++ = a0;
+ x++;
+ }
+ }
+ else {
+ while(xl--) {
+ *r++ = *x++;
+ }
+ }
+ if (a1 == 0)
+ return rl-1;
+ *r = a1;
+ return rl;
+ }
+ else { /* shift right */
+ ErtsDigit* r0 = r;
+ int add_one = 0;
+
+ if (xl <= bw) {
+ if (sign)
+ *r = 1;
+ else
+ *r = 0;
+ return 1;
+ }
+
+ if (sign) {
+ int zl = bw;
+ ErtsDigit* z = x;
+
+ while(zl--) {
+ if (*z != 0) {
+ add_one = 1;
+ break;
+ }
+ z++;
+ }
+ }
+
+ rl = xl - bw;
+ x += (xl-1);
+ r += (rl-1);
+ xl -= bw;
+ if (sw) { // NOTE! x >> 32 is not = 0!
+ while(xl--) {
+ a1 = (*x >> sw) | a0;
+ a0 = (*x << (D_EXP-sw));
+ *r-- = a1;
+ x--;
+ }
+ }
+ else {
+ while(xl--) {
+ *r-- = *x--;
+ }
+ }
+
+ if (sign && (a0 != 0))
+ add_one = 1;
+
+ if (r[rl] == 0) {
+ if (rl == 1) {
+ if (sign)
+ r[1] = 1;
+ return 1;
+ }
+ rl--;
+ }
+ if (add_one)
+ return D_add(r0, rl, 1, r0);
+ return rl;
+ }
+ }
+}
+
+/*
+** Return log(x)/log(2)
+*/
+static int I_lg(ErtsDigit* x, dsize_t xl)
+{
+ dsize_t sz = xl - 1;
+ ErtsDigit d = x[sz];
+
+ sz *= D_EXP;
+ while(d != 0) {
+ d >>= 1;
+ sz++;
+ }
+ return sz - 1;
+}
+
+/*
+** Create bigint on heap if necessary. Like the previously existing
+** make_small_or_big(), except for a HAlloc() instead of an
+** ArithAlloc().
+** NOTE: Only use erts_make_integer(), when order of heap fragments is
+** guaranteed to be correct.
+*/
+Eterm
+erts_make_integer(Uint x, Process *p)
+{
+ Eterm* hp;
+ if (IS_USMALL(0,x))
+ return make_small(x);
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE);
+ return uint_to_big(x,hp);
+ }
+}
+
+/*
+** convert Uint to bigint
+** (must only be used if x is to big to be stored as a small)
+*/
+Eterm uint_to_big(Uint x, Eterm *y)
+{
+ *y = make_pos_bignum_header(1);
+ BIG_DIGIT(y, 0) = x;
+ return make_big(y);
+}
+
+
+/*
+** convert signed int to bigint
+*/
+Eterm small_to_big(Sint x, Eterm *y)
+{
+ if (x >= 0) {
+ *y = make_pos_bignum_header(1);
+ } else {
+ x = -x;
+ *y = make_neg_bignum_header(1);
+ }
+ BIG_DIGIT(y, 0) = x;
+ return make_big(y);
+}
+
+
+Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
+{
+ Eterm *hp = *hpp;
+#ifdef ARCH_32
+ if (x >= (((Uint64) 1) << 32)) {
+ *hp = make_pos_bignum_header(2);
+ BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
+ *hpp += 2;
+ }
+ else
+#endif
+ {
+ *hp = make_pos_bignum_header(1);
+ BIG_DIGIT(hp, 0) = (Uint) x;
+ *hpp += 1;
+ }
+ return make_big(hp);
+}
+
+Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
+{
+ Eterm *hp = *hpp;
+ int neg;
+ if (x >= 0)
+ neg = 0;
+ else {
+ neg = 1;
+ x = -x;
+ }
+#ifdef ARCH_32
+ if (x >= (((Uint64) 1) << 32)) {
+ if (neg)
+ *hp = make_neg_bignum_header(2);
+ else
+ *hp = make_pos_bignum_header(2);
+ BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
+ *hpp += 2;
+ }
+ else
+#endif
+ {
+ if (neg)
+ *hp = make_neg_bignum_header(1);
+ else
+ *hp = make_pos_bignum_header(1);
+ BIG_DIGIT(hp, 0) = (Uint) x;
+ *hpp += 1;
+ }
+ return make_big(hp);
+}
+
+/*
+** Convert a bignum to a double float
+*/
+int
+big_to_double(Eterm x, double* resp)
+{
+ double d = 0.0;
+ Eterm* xp = big_val(x);
+ dsize_t xl = BIG_SIZE(xp);
+ ErtsDigit* s = BIG_V(xp) + xl;
+ short xsgn = BIG_SIGN(xp);
+ double dbase = ((double)(D_MASK)+1);
+#ifndef NO_FPE_SIGNALS
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+#endif
+ __ERTS_SAVE_FP_EXCEPTION(fpexnp);
+
+ __ERTS_FP_CHECK_INIT(fpexnp);
+ while (xl--) {
+ d = d * dbase + *--s;
+
+ __ERTS_FP_ERROR(fpexnp, d, __ERTS_RESTORE_FP_EXCEPTION(fpexnp); return -1);
+ }
+
+ *resp = xsgn ? -d : d;
+ __ERTS_FP_ERROR(fpexnp,*resp,;);
+ __ERTS_RESTORE_FP_EXCEPTION(fpexnp);
+ return 0;
+}
+
+
+/*
+ ** Estimate the number of decimal digits (include sign)
+ */
+int big_decimal_estimate(Eterm x)
+{
+ Eterm* xp = big_val(x);
+ int lg = I_lg(BIG_V(xp), BIG_SIZE(xp));
+ int lg10 = ((lg+1)*28/93)+1;
+
+ if (BIG_SIGN(xp)) lg10++; /* add sign */
+ return lg10+1; /* add null */
+}
+
+/*
+** Convert a bignum into a string of decimal numbers
+*/
+
+static void write_big(Eterm x, void (*write_func)(void *, char), void *arg)
+{
+ Eterm* xp = big_val(x);
+ ErtsDigit* dx = BIG_V(xp);
+ dsize_t xl = BIG_SIZE(xp);
+ short sign = BIG_SIGN(xp);
+ ErtsDigit rem;
+
+ if (xl == 1 && *dx < D_DECIMAL_BASE) {
+ rem = *dx;
+ if (rem == 0)
+ (*write_func)(arg, '0');
+ else {
+ while(rem) {
+ (*write_func)(arg, (rem % 10) + '0');
+ rem /= 10;
+ }
+ }
+ }
+ else {
+ ErtsDigit* tmp = (ErtsDigit*) erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(ErtsDigit)*xl);
+ dsize_t tmpl = xl;
+
+ MOVE_DIGITS(tmp, dx, xl);
+
+ while(1) {
+ tmpl = D_div(tmp, tmpl, D_DECIMAL_BASE, tmp, &rem);
+ if (tmpl == 1 && *tmp == 0) {
+ while(rem) {
+ (*write_func)(arg, (rem % 10)+'0');
+ rem /= 10;
+ }
+ break;
+ }
+ else {
+ int i = D_DECIMAL_EXP;
+ while(i--) {
+ (*write_func)(arg, (rem % 10)+'0');
+ rem /= 10;
+ }
+ }
+ }
+ erts_free(ERTS_ALC_T_TMP, (void *) tmp);
+ }
+
+ if (sign)
+ (*write_func)(arg, '-');
+}
+
+struct big_list__ {
+ Eterm *hp;
+ Eterm res;
+};
+
+static void
+write_list(void *arg, char c)
+{
+ struct big_list__ *blp = (struct big_list__ *) arg;
+ blp->res = CONS(blp->hp, make_small(c), blp->res);
+ blp->hp += 2;
+}
+
+Eterm erts_big_to_list(Eterm x, Eterm **hpp)
+{
+ struct big_list__ bl;
+ bl.hp = *hpp;
+ bl.res = NIL;
+ write_big(x, write_list, (void *) &bl);
+ *hpp = bl.hp;
+ return bl.res;
+}
+
+static void
+write_string(void *arg, char c)
+{
+ *(--(*((char **) arg))) = c;
+}
+
+char *erts_big_to_string(Eterm x, char *buf, Uint buf_sz)
+{
+ char *big_str = buf + buf_sz - 1;
+ *big_str = '\0';
+ write_big(x, write_string, (void *) &big_str);
+ ASSERT(buf <= big_str && big_str <= buf + buf_sz - 1);
+ return big_str;
+}
+
+/*
+** Normalize a bignum given thing pointer length in digits and a sign
+** patch zero if odd length
+*/
+static Eterm big_norm(Eterm *x, dsize_t xl, short sign)
+{
+ Uint arity;
+
+ if (xl == 1) {
+ Uint y = BIG_DIGIT(x, 0);
+
+ if (D_EXP < SMALL_BITS || IS_USMALL(sign, y)) {
+ if (sign)
+ return make_small(-((Sint)y));
+ else
+ return make_small(y);
+ }
+ }
+
+ /* __alpha__: This was fixed */
+ if ((arity = BIG_NEED_SIZE(xl)-1) > BIG_ARITY_MAX)
+ return NIL; /* signal error (too big) */
+
+ if (sign) {
+ *x = make_neg_bignum_header(arity);
+ }
+ else {
+ *x = make_pos_bignum_header(arity);
+ }
+ return make_big(x);
+}
+
+/*
+** Compare bignums
+*/
+int big_comp(Eterm x, Eterm y)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+
+ if (BIG_SIGN(xp) == BIG_SIGN(yp)) {
+ int c = I_comp(BIG_V(xp), BIG_SIZE(xp), BIG_V(yp), BIG_SIZE(yp));
+ if (BIG_SIGN(xp))
+ return -c;
+ else
+ return c;
+ }
+ else
+ return BIG_SIGN(xp) ? -1 : 1;
+}
+
+/*
+** Unsigned compare
+*/
+int big_ucomp(Eterm x, Eterm y)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+
+ return I_comp(BIG_V(xp), BIG_SIZE(xp), BIG_V(yp), BIG_SIZE(yp));
+}
+
+/*
+** Return number of bytes in the bignum
+*/
+dsize_t big_bytes(Eterm x)
+{
+ Eterm* xp = big_val(x);
+ dsize_t sz = BIG_SIZE(xp);
+ ErtsDigit d = BIG_DIGIT(xp, sz-1);
+
+ sz = (sz-1) * sizeof(ErtsDigit);
+ while (d != 0) {
+ ++sz;
+ d >>= 8;
+ }
+ return sz;
+}
+
+/*
+** Load a bignum from bytes
+** xsz is the number of bytes in xp
+*/
+Eterm bytes_to_big(byte *xp, dsize_t xsz, int xsgn, Eterm *r)
+{
+ ErtsDigit* rwp = BIG_V(r);
+ dsize_t rsz = 0;
+ ErtsDigit d;
+ int i;
+
+ while(xsz >= sizeof(ErtsDigit)) {
+ d = 0;
+ for(i = sizeof(ErtsDigit); --i >= 0;)
+ d = (d << 8) | xp[i];
+ *rwp = d;
+ rwp++;
+ xsz -= sizeof(ErtsDigit);
+ xp += sizeof(ErtsDigit);
+ rsz++;
+ }
+
+ if (xsz > 0) {
+ d = 0;
+ for(i = xsz; --i >= 0;)
+ d = (d << 8) | xp[i];
+ *rwp = d;
+ rwp++;
+ rsz++;
+ }
+ return big_norm(r, rsz, (short) xsgn);
+}
+
+/*
+** Store digits in the array of bytes pointed to by p
+*/
+byte* big_to_bytes(Eterm x, byte *p)
+{
+ ErtsDigit* xr = big_v(x);
+ dsize_t xl = big_size(x);
+ ErtsDigit d;
+ int i;
+
+ while(xl > 1) {
+ d = *xr;
+ xr++;
+ for(i = 0; i < sizeof(ErtsDigit); ++i) {
+ p[i] = d & 0xff;
+ d >>= 8;
+ }
+ p += sizeof(ErtsDigit);
+ xl--;
+ }
+ d = *xr;
+ do {
+ *p++ = d & 0xff;
+ d >>= 8;
+ } while (d != 0);
+ return p;
+}
+
+/*
+ * Converts a positive term (small or bignum) to an Uint.
+ *
+ * Fails returning 0 if the term is neither a small nor a bignum,
+ * if it's negative, or the big number does not fit in an Uint;
+ * in addition the error reason, BADARG or SYSTEM_LIMIT, will be
+ * stored in *up.
+ *
+ * Otherwise returns a non-zero value and the converted number
+ * in *up.
+ */
+
+int
+term_to_Uint(Eterm term, Uint *up)
+{
+ if (is_small(term)) {
+ Sint i = signed_val(term);
+ if (i < 0) {
+ *up = BADARG;
+ return 0;
+ }
+ *up = (Uint) i;
+ return 1;
+ } else if (is_big(term)) {
+ ErtsDigit* xr = big_v(term);
+ dsize_t xl = big_size(term);
+ Uint uval = 0;
+ int n = 0;
+
+ if (big_sign(term)) {
+ *up = BADARG;
+ return 0;
+ } else if (xl*D_EXP > sizeof(Uint)*8) {
+ *up = SYSTEM_LIMIT;
+ return 0;
+ }
+ while (xl-- > 0) {
+ uval |= ((Uint)(*xr++)) << n;
+ n += D_EXP;
+ }
+ *up = uval;
+ return 1;
+ } else {
+ *up = BADARG;
+ return 0;
+ }
+}
+
+int term_to_Sint(Eterm term, Sint *sp)
+{
+ if (is_small(term)) {
+ *sp = signed_val(term);
+ return 1;
+ } else if (is_big(term)) {
+ ErtsDigit* xr = big_v(term);
+ dsize_t xl = big_size(term);
+ int sign = big_sign(term);
+ Uint uval = 0;
+ int n = 0;
+
+ if (xl*D_EXP > sizeof(Uint)*8) {
+ return 0;
+ }
+ while (xl-- > 0) {
+ uval |= ((Uint)(*xr++)) << n;
+ n += D_EXP;
+ }
+ if (sign) {
+ uval = -uval;
+ if ((Sint)uval > 0)
+ return 0;
+ } else {
+ if ((Sint)uval < 0)
+ return 0;
+ }
+ *sp = uval;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+** Add and subtract
+*/
+static Eterm B_plus_minus(ErtsDigit *x, dsize_t xl, short xsgn,
+ ErtsDigit *y, dsize_t yl, short ysgn, Eterm *r)
+{
+ if (xsgn == ysgn) {
+ if (xl > yl)
+ return big_norm(r, I_add(x,xl,y,yl,BIG_V(r)), xsgn);
+ else
+ return big_norm(r, I_add(y,yl,x,xl,BIG_V(r)), xsgn);
+ }
+ else {
+ int comp = I_comp(x, xl, y, yl);
+ if (comp == 0)
+ return make_small(0);
+ else if (comp > 0)
+ return big_norm(r, I_sub(x,xl,y,yl,BIG_V(r)), xsgn);
+ else
+ return big_norm(r, I_sub(y,yl,x,xl,BIG_V(r)), ysgn);
+ }
+}
+
+/*
+** Add bignums
+*/
+Eterm big_plus(Eterm x, Eterm y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+
+ return B_plus_minus(BIG_V(xp),BIG_SIZE(xp),(short) BIG_SIGN(xp),
+ BIG_V(yp),BIG_SIZE(yp),(short) BIG_SIGN(yp), r);
+}
+
+/*
+** Subtract bignums
+*/
+
+Eterm big_minus(Eterm x, Eterm y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+
+ return B_plus_minus(BIG_V(xp),BIG_SIZE(xp),(short) BIG_SIGN(xp),
+ BIG_V(yp),BIG_SIZE(yp),(short) !BIG_SIGN(yp), r);
+}
+
+/*
+** Subtract a digit from big number
+*/
+Eterm big_minus_small(Eterm x, Eterm y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+
+ if (BIG_SIGN(xp))
+ return big_norm(r, D_add(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y, BIG_V(r)),
+ (short) BIG_SIGN(xp));
+ else
+ return big_norm(r, D_sub(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y, BIG_V(r)),
+ (short) BIG_SIGN(xp));
+}
+
+/*
+** Multiply smallnums
+*/
+
+Eterm small_times(Sint x, Sint y, Eterm *r)
+{
+ short sign = (x<0) != (y<0);
+ ErtsDigit xu = (x > 0) ? x : -x;
+ ErtsDigit yu = (y > 0) ? y : -y;
+ ErtsDigit d1=0;
+ ErtsDigit d0;
+ Uint arity;
+
+ DMULc(xu, yu, d1, d0);
+
+ if (!d1 && ((D_EXP < SMALL_BITS) || IS_USMALL(sign, d0))) {
+ if (sign)
+ return make_small(-((Sint)d0));
+ else
+ return make_small(d0);
+ }
+
+ BIG_DIGIT(r,0) = d0;
+ arity = d1 ? 2 : 1;
+ if (sign)
+ *r = make_neg_bignum_header(arity);
+ else
+ *r = make_pos_bignum_header(arity);
+ if (d1)
+ BIG_DIGIT(r,1) = d1;
+ return make_big(r);
+}
+
+/*
+** Multiply bignums
+*/
+
+Eterm big_times(Eterm x, Eterm y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+
+ short sign = BIG_SIGN(xp) != BIG_SIGN(yp);
+ dsize_t xsz = BIG_SIZE(xp);
+ dsize_t ysz = BIG_SIZE(yp);
+ dsize_t rsz;
+
+ if (ysz == 1)
+ rsz = D_mul(BIG_V(xp), xsz, BIG_DIGIT(yp, 0), BIG_V(r));
+ else if (xsz == 1)
+ rsz = D_mul(BIG_V(yp), ysz, BIG_DIGIT(xp, 0), BIG_V(r));
+ else if (xp == yp) {
+ ZERO_DIGITS(BIG_V(r), xsz+1);
+ rsz = I_sqr(BIG_V(xp), xsz, BIG_V(r));
+ }
+ else if (xsz >= ysz) {
+ ZERO_DIGITS(BIG_V(r), xsz);
+ rsz = I_mul(BIG_V(xp), xsz, BIG_V(yp), ysz, BIG_V(r));
+ }
+ else {
+ ZERO_DIGITS(BIG_V(r), ysz);
+ rsz = I_mul(BIG_V(yp), ysz, BIG_V(xp), xsz, BIG_V(r));
+ }
+ return big_norm(r, rsz, sign);
+}
+
+
+/*
+** Divide bignums
+*/
+
+Eterm big_div(Eterm x, Eterm y, Eterm *q)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+
+ short sign = BIG_SIGN(xp) != BIG_SIGN(yp);
+ dsize_t xsz = BIG_SIZE(xp);
+ dsize_t ysz = BIG_SIZE(yp);
+ dsize_t qsz;
+
+ if (ysz == 1) {
+ ErtsDigit rem;
+ qsz = D_div(BIG_V(xp), xsz, BIG_DIGIT(yp,0), BIG_V(q), &rem);
+ }
+ else {
+ Eterm* remp;
+ dsize_t rem_sz;
+
+ qsz = xsz - ysz + 1;
+ remp = q + BIG_NEED_SIZE(qsz);
+ qsz = I_div(BIG_V(xp), xsz, BIG_V(yp), ysz, BIG_V(q), BIG_V(remp),
+ &rem_sz);
+ }
+ return big_norm(q, qsz, sign);
+}
+
+/*
+** Remainder
+*/
+Eterm big_rem(Eterm x, Eterm y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+ short sign = BIG_SIGN(xp);
+ dsize_t xsz = BIG_SIZE(xp);
+ dsize_t ysz = BIG_SIZE(yp);
+
+ if (ysz == 1) {
+ ErtsDigit rem;
+ rem = D_rem(BIG_V(xp), xsz, BIG_DIGIT(yp,0));
+ if (IS_USMALL(sign, rem)) {
+ if (sign)
+ return make_small(-(Sint)rem);
+ else
+ return make_small(rem);
+ }
+ else {
+ if (sign)
+ *r = make_neg_bignum_header(1);
+ else
+ *r = make_pos_bignum_header(1);
+ BIG_DIGIT(r, 0) = rem;
+ return make_big(r);
+ }
+ }
+ else {
+ dsize_t rsz = I_rem(BIG_V(xp), xsz, BIG_V(yp), ysz, BIG_V(r));
+ return big_norm(r, rsz, sign);
+ }
+}
+
+Eterm big_neg(Eterm x, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ dsize_t xsz = BIG_SIZE(xp);
+ short xsgn = BIG_SIGN(xp);
+
+ MOVE_DIGITS(BIG_V(r), BIG_V(xp), xsz);
+ return big_norm(r, xsz, (short) !xsgn);
+}
+
+Eterm big_band(Eterm x, Eterm y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+
+ short xsgn = BIG_SIGN(xp);
+ short ysgn = BIG_SIGN(yp);
+ short sign = xsgn && ysgn;
+ dsize_t xsz = BIG_SIZE(xp);
+ dsize_t ysz = BIG_SIZE(yp);
+
+ if (xsz >= ysz)
+ return big_norm(r,I_band(BIG_V(xp),xsz,xsgn,
+ BIG_V(yp),ysz,ysgn,
+ BIG_V(r)),sign);
+ else
+ return big_norm(r,I_band(BIG_V(yp),ysz,ysgn,
+ BIG_V(xp),xsz,xsgn,
+ BIG_V(r)),sign);
+}
+
+
+Eterm big_bor(Eterm x, Eterm y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+ short xsgn = BIG_SIGN(xp);
+ short ysgn = BIG_SIGN(yp);
+ short sign = (xsgn || ysgn);
+ dsize_t xsz = BIG_SIZE(xp);
+ dsize_t ysz = BIG_SIZE(yp);
+
+ if (xsz >= ysz)
+ return big_norm(r,I_bor(BIG_V(xp),xsz,xsgn,
+ BIG_V(yp),ysz,ysgn,
+ BIG_V(r)),sign);
+ else
+ return big_norm(r,I_bor(BIG_V(yp),ysz,ysgn,
+ BIG_V(xp),xsz,xsgn,
+ BIG_V(r)),sign);
+}
+
+
+Eterm big_bxor(Eterm x, Eterm y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ Eterm* yp = big_val(y);
+ short xsgn = BIG_SIGN(xp);
+ short ysgn = BIG_SIGN(yp);
+ short sign = (xsgn != ysgn);
+ dsize_t xsz = BIG_SIZE(xp);
+ dsize_t ysz = BIG_SIZE(yp);
+
+ if (xsz >= ysz)
+ return big_norm(r,I_bxor(BIG_V(xp),xsz,xsgn,
+ BIG_V(yp),ysz,ysgn,
+ BIG_V(r)),sign);
+ else
+ return big_norm(r,I_bxor(BIG_V(yp),ysz,ysgn,
+ BIG_V(xp),xsz,xsgn,
+ BIG_V(r)),sign);
+}
+
+Eterm big_bnot(Eterm x, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ short sign = !BIG_SIGN(xp);
+ dsize_t xsz = BIG_SIZE(xp);
+
+ return big_norm(r, I_bnot(BIG_V(xp), xsz, sign, BIG_V(r)), sign);
+}
+
+Eterm big_lshift(Eterm x, Sint y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+ short sign = BIG_SIGN(xp);
+ dsize_t xsz = BIG_SIZE(xp);
+
+ return big_norm(r, I_lshift(BIG_V(xp), xsz, y, sign, BIG_V(r)), sign);
+}
+
+
+/* add unsigned small int y to x */
+
+Eterm big_plus_small(Eterm x, Uint y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+
+ if (BIG_SIGN(xp))
+ return big_norm(r, D_sub(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y,
+ BIG_V(r)), (short) BIG_SIGN(xp));
+ else
+ return big_norm(r, D_add(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y,
+ BIG_V(r)), (short) BIG_SIGN(xp));
+}
+
+Eterm big_times_small(Eterm x, Uint y, Eterm *r)
+{
+ Eterm* xp = big_val(x);
+
+ return big_norm(r, D_mul(BIG_V(xp),BIG_SIZE(xp), (ErtsDigit) y,
+ BIG_V(r)), (short) BIG_SIGN(xp));
+}
+
+/*
+** Expects the big to fit.
+*/
+Uint32 big_to_uint32(Eterm b)
+{
+ Uint u;
+ if (!term_to_Uint(b, &u)) {
+ ASSERT(0);
+ return 0;
+ }
+ return u;
+}
+
+/*
+ * Check if a fixnum or bignum equals 2^32.
+ */
+int term_equals_2pow32(Eterm x)
+{
+ if (sizeof(Uint) > 4) {
+ Uint u;
+ if (!term_to_Uint(x, &u))
+ return 0;
+ return (u & 0xFFFFFFFF) == 0 && ((u >> 16) >> 16) == 1;
+ } else {
+ Eterm *bp;
+ if (!is_big(x))
+ return 0;
+ bp = big_val(x);
+#if D_EXP == 16 // 16 bit platfrom not really supported!!!
+ return (BIG_SIZE(bp) == 3) && !BIG_DIGIT(bp,0) && !BIG_DIGIT(bp,1) &&
+ BIG_DIGIT(bp,2) == 1;
+#elif D_EXP == 32
+ return (BIG_SIZE(bp) == 2) && !BIG_DIGIT(bp,0) &&
+ BIG_DIGIT(bp,1) == 1;
+#elif D_EXP == 64
+ return (BIG_SIZE(bp) == 1) &&
+ ((BIG_DIGIT(bp,0) & 0xffffffff) == 0) &&
+ ((BIG_DIGIT(bp,0) >> 32) == 1);
+#endif
+ return 0;
+ }
+}
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
new file mode 100644
index 0000000000..b8e38d482c
--- /dev/null
+++ b/erts/emulator/beam/big.h
@@ -0,0 +1,155 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __BIG_H__
+#define __BIG_H__
+
+#ifndef __SYS_H__
+#include "sys.h"
+#endif
+
+#ifndef __CONFIG_H__
+#include "erl_vm.h"
+#endif
+
+#ifndef __GLOBAL_H__
+#include "global.h"
+#endif
+
+typedef Uint ErtsDigit;
+
+#if (SIZEOF_VOID_P == 4) && defined(SIZEOF_LONG_LONG) && (SIZEOF_LONG_LONG == 8)
+/* Assume 32-bit machine with long long support */
+typedef Uint64 ErtsDoubleDigit;
+typedef Uint16 ErtsHalfDigit;
+#define BIG_HAVE_DOUBLE_DIGIT 1
+
+#elif (SIZEOF_VOID_P == 4)
+/* Assume 32-bit machine with no long support */
+#undef BIG_HAVE_DOUBLE_DIGIT
+typedef Uint16 ErtsHalfDigit;
+
+#elif (SIZEOF_VOID_P == 8)
+/* Assume 64-bit machine, does it exist 128 bit long long long ? */
+#undef BIG_HAVE_DOUBLE_DIGIT
+typedef Uint32 ErtsHalfDigit;
+#else
+#error "can not determine machine size"
+#endif
+
+#define D_DECIMAL_EXP 9
+#define D_DECIMAL_BASE 1000000000
+
+typedef Uint dsize_t; /* Vector size type */
+
+#define D_EXP (SIZEOF_VOID_P*8)
+#define D_MASK ((ErtsDigit)(-1)) /* D_BASE-1 */
+
+/* macros for bignum objects */
+#define big_v(x) BIG_V(big_val(x))
+#define big_sign(x) BIG_SIGN(big_val(x))
+#define big_arity(x) BIG_ARITY(big_val(x))
+#define big_digit(x,i) BIG_DIGIT(big_val(x),i)
+#define big_size(x) BIG_SIZE(big_val(x))
+
+
+/* macros for thing pointers */
+
+#define BIG_V(xp) ((ErtsDigit*)((xp)+1))
+#define BIG_SIGN(xp) (!!bignum_header_is_neg(*xp))
+#define BIG_ARITY(xp) ((Uint)bignum_header_arity(*(xp)))
+#define BIG_DIGIT(xp,i) *(BIG_V(xp)+(i))
+#define BIG_DIGITS_PER_WORD (sizeof(Uint)/sizeof(ErtsDigit))
+
+#define BIG_SIZE(xp) BIG_ARITY(xp)
+
+/* Check for small */
+#define IS_USMALL(sgn,x) ((sgn) ? ((x) <= MAX_SMALL+1) : ((x) <= MAX_SMALL))
+#define IS_SSMALL(x) (((x) >= MIN_SMALL) && ((x) <= MAX_SMALL))
+
+/* The heap size needed for a bignum */
+#define BIG_NEED_SIZE(x) ((x) + 1)
+
+#define BIG_UINT_HEAP_SIZE (1 + 1) /* always, since sizeof(Uint) <= sizeof(Eterm) */
+
+#ifdef ARCH_32
+
+#define ERTS_UINT64_BIG_HEAP_SIZE__(X) \
+ ((X) >= (((Uint64) 1) << 32) ? (1 + 2) : (1 + 1))
+#define ERTS_SINT64_HEAP_SIZE(X) \
+ (IS_SSMALL((X)) \
+ ? 0 \
+ : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(X)))
+#define ERTS_UINT64_HEAP_SIZE(X) \
+ (IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X)))
+
+#else
+
+#define ERTS_SINT64_HEAP_SIZE(X) \
+ (IS_SSMALL((X)) ? 0 : (1 + 1))
+#define ERTS_UINT64_HEAP_SIZE(X) \
+ (IS_USMALL(0, (X)) ? 0 : (1 + 1))
+
+#endif
+
+int big_decimal_estimate(Eterm);
+Eterm erts_big_to_list(Eterm, Eterm**);
+char *erts_big_to_string(Eterm x, char *buf, Uint buf_sz);
+
+Eterm small_times(Sint, Sint, Eterm*);
+
+Eterm big_plus(Eterm, Eterm, Eterm*);
+Eterm big_minus(Eterm, Eterm, Eterm*);
+Eterm big_times(Eterm, Eterm, Eterm*);
+Eterm big_div(Eterm, Eterm, Eterm*);
+Eterm big_rem(Eterm, Eterm, Eterm*);
+Eterm big_neg(Eterm, Eterm*);
+
+Eterm big_minus_small(Eterm, Uint, Eterm*);
+Eterm big_plus_small(Eterm, Uint, Eterm*);
+Eterm big_times_small(Eterm, Uint, Eterm*);
+
+Eterm big_band(Eterm, Eterm, Eterm*);
+Eterm big_bor(Eterm, Eterm, Eterm*);
+Eterm big_bxor(Eterm, Eterm, Eterm*);
+Eterm big_bnot(Eterm, Eterm*);
+
+Eterm big_lshift(Eterm, Sint, Eterm*);
+int big_comp (Eterm, Eterm);
+int big_ucomp (Eterm, Eterm);
+int big_to_double(Eterm x, double* resp);
+Eterm small_to_big(Sint, Eterm*);
+Eterm uint_to_big(Uint, Eterm*);
+Eterm erts_make_integer(Uint, Process *);
+
+dsize_t big_bytes(Eterm);
+Eterm bytes_to_big(byte*, dsize_t, int, Eterm*);
+byte* big_to_bytes(Eterm, byte*);
+
+int term_to_Uint(Eterm, Uint*);
+int term_to_Sint(Eterm, Sint*);
+
+Uint32 big_to_uint32(Eterm b);
+int term_equals_2pow32(Eterm);
+
+Eterm erts_uint64_to_big(Uint64, Eterm **);
+Eterm erts_sint64_to_big(Sint64, Eterm **);
+
+#endif
+
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
new file mode 100644
index 0000000000..49bc0d6457
--- /dev/null
+++ b/erts/emulator/beam/binary.c
@@ -0,0 +1,677 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+
+#ifdef DEBUG
+static int list_to_bitstr_buf(Eterm obj, char* buf, int len);
+#else
+static int list_to_bitstr_buf(Eterm obj, char* buf);
+#endif
+static Sint bitstr_list_len(Eterm obj);
+
+void
+erts_init_binary(void)
+{
+ /* Verify Binary alignment... */
+ if ((((Uint) &((Binary *) 0)->orig_bytes[0]) % ((Uint) 8)) != 0) {
+ /* I assume that any compiler should be able to optimize this
+ away. If not, this test is not very expensive... */
+ erl_exit(ERTS_ABORT_EXIT,
+ "Internal error: Address of orig_bytes[0] of a Binary"
+ "is *not* 8-byte aligned\n");
+ }
+}
+
+/*
+ * Create a brand new binary from scratch.
+ */
+
+Eterm
+new_binary(Process *p, byte *buf, int len)
+{
+ ProcBin* pb;
+ Binary* bptr;
+
+ if (len <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb = (ErlHeapBin *) HAlloc(p, heap_bin_size(len));
+ hb->thing_word = header_heap_bin(len);
+ hb->size = len;
+ if (buf != NULL) {
+ sys_memcpy(hb->data, buf, len);
+ }
+ return make_binary(hb);
+ }
+
+ /*
+ * Allocate the binary struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(len);
+ bptr->flags = 0;
+ bptr->orig_size = len;
+ erts_refc_init(&bptr->refc, 1);
+ if (buf != NULL) {
+ sys_memcpy(bptr->orig_bytes, buf, len);
+ }
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = len;
+ pb->next = MSO(p).mso;
+ MSO(p).mso = pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+
+ /*
+ * Miscellanous updates. Return the tagged binary.
+ */
+ MSO(p).overhead += pb->size / sizeof(Eterm);
+ return make_binary(pb);
+}
+
+/*
+ * When heap binary is not desired...
+ */
+
+Eterm erts_new_mso_binary(Process *p, byte *buf, int len)
+{
+ ProcBin* pb;
+ Binary* bptr;
+
+ /*
+ * Allocate the binary struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(len);
+ bptr->flags = 0;
+ bptr->orig_size = len;
+ erts_refc_init(&bptr->refc, 1);
+ if (buf != NULL) {
+ sys_memcpy(bptr->orig_bytes, buf, len);
+ }
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = len;
+ pb->next = MSO(p).mso;
+ MSO(p).mso = pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+
+ /*
+ * Miscellanous updates. Return the tagged binary.
+ */
+ MSO(p).overhead += pb->size / sizeof(Eterm);
+ return make_binary(pb);
+}
+
+/*
+ * Create a brand new binary from scratch on the heap.
+ */
+
+Eterm
+erts_new_heap_binary(Process *p, byte *buf, int len, byte** datap)
+{
+ ErlHeapBin* hb = (ErlHeapBin *) HAlloc(p, heap_bin_size(len));
+
+ hb->thing_word = header_heap_bin(len);
+ hb->size = len;
+ if (buf != NULL) {
+ sys_memcpy(hb->data, buf, len);
+ }
+ *datap = (byte*) hb->data;
+ return make_binary(hb);
+}
+
+Eterm
+erts_realloc_binary(Eterm bin, size_t size)
+{
+ Eterm* bval = binary_val(bin);
+
+ if (thing_subtag(*bval) == HEAP_BINARY_SUBTAG) {
+ ASSERT(size <= binary_size(bin));
+ binary_size(bin) = size;
+ } else { /* REFC */
+ ProcBin* pb = (ProcBin *) bval;
+ Binary* newbin = erts_bin_realloc(pb->val, size);
+ newbin->orig_size = size;
+ pb->val = newbin;
+ pb->size = size;
+ pb->bytes = (byte*) newbin->orig_bytes;
+ pb->flags = 0;
+ bin = make_binary(pb);
+ }
+ return bin;
+}
+
+byte*
+erts_get_aligned_binary_bytes(Eterm bin, byte** base_ptr)
+{
+ byte* bytes;
+ Eterm* real_bin;
+ Uint byte_size;
+ Uint offs = 0;
+ Uint bit_offs = 0;
+
+ if (is_not_binary(bin)) {
+ return NULL;
+ }
+ byte_size = binary_size(bin);
+ real_bin = binary_val(bin);
+ if (*real_bin == HEADER_SUB_BIN) {
+ ErlSubBin* sb = (ErlSubBin *) real_bin;
+ if (sb->bitsize) {
+ return NULL;
+ }
+ offs = sb->offs;
+ bit_offs = sb->bitoffs;
+ real_bin = binary_val(sb->orig);
+ }
+ if (*real_bin == HEADER_PROC_BIN) {
+ bytes = ((ProcBin *) real_bin)->bytes + offs;
+ } else {
+ bytes = (byte *)(&(((ErlHeapBin *) real_bin)->data)) + offs;
+ }
+ if (bit_offs) {
+ byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, byte_size);
+
+ erts_copy_bits(bytes, bit_offs, 1, buf, 0, 1, byte_size*8);
+ *base_ptr = buf;
+ bytes = buf;
+ }
+ return bytes;
+}
+
+static Eterm
+bin_bytes_to_list(Eterm previous, Eterm* hp, byte* bytes, Uint size, Uint bitoffs)
+{
+ if (bitoffs == 0) {
+ while (size) {
+ previous = CONS(hp, make_small(bytes[--size]), previous);
+ hp += 2;
+ }
+ } else {
+ byte present;
+ byte next;
+ next = bytes[size];
+ while (size) {
+ present = next;
+ next = bytes[--size];
+ previous = CONS(hp, make_small(((present >> (8-bitoffs)) |
+ (next << bitoffs)) & 255), previous);
+ hp += 2;
+ }
+ }
+ return previous;
+}
+
+
+BIF_RETTYPE binary_to_list_1(BIF_ALIST_1)
+{
+ Eterm real_bin;
+ Uint offset;
+ Uint size;
+ Uint bitsize;
+ Uint bitoffs;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ goto error;
+ }
+ size = binary_size(BIF_ARG_1);
+ ERTS_GET_REAL_BIN(BIF_ARG_1, real_bin, offset, bitoffs, bitsize);
+ if (bitsize != 0) {
+ goto error;
+ }
+ if (size == 0) {
+ BIF_RET(NIL);
+ } else {
+ Eterm* hp = HAlloc(BIF_P, 2 * size);
+ byte* bytes = binary_bytes(real_bin)+offset;
+
+ BIF_RET(bin_bytes_to_list(NIL, hp, bytes, size, bitoffs));
+ }
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE binary_to_list_3(BIF_ALIST_3)
+{
+ byte* bytes;
+ Uint size;
+ Uint bitoffs;
+ Uint bitsize;
+ Uint i;
+ Uint start;
+ Uint stop;
+ Eterm* hp;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ goto error;
+ }
+ if (!term_to_Uint(BIF_ARG_2, &start) || !term_to_Uint(BIF_ARG_3, &stop)) {
+ goto error;
+ }
+ size = binary_size(BIF_ARG_1);
+ ERTS_GET_BINARY_BYTES(BIF_ARG_1, bytes, bitoffs, bitsize);
+ if (start < 1 || start > size || stop < 1 ||
+ stop > size || stop < start ) {
+ goto error;
+ }
+ i = stop-start+1;
+ hp = HAlloc(BIF_P, 2*i);
+ BIF_RET(bin_bytes_to_list(NIL, hp, bytes+start-1, i, bitoffs));
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
+{
+ Eterm real_bin;
+ Uint offset;
+ Uint size;
+ Uint bitsize;
+ Uint bitoffs;
+ byte* bytes;
+ Eterm previous = NIL;
+ Eterm* hp;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ size = binary_size(BIF_ARG_1);
+ ERTS_GET_REAL_BIN(BIF_ARG_1, real_bin, offset, bitoffs, bitsize);
+ bytes = binary_bytes(real_bin)+offset;
+ if (bitsize == 0) {
+ hp = HAlloc(BIF_P, 2 * size);
+ } else if (size == 0) {
+ hp = HAlloc(BIF_P, 2);
+ BIF_RET(CONS(hp,BIF_ARG_1,NIL));
+ } else {
+ ErlSubBin* last;
+
+ hp = HAlloc(BIF_P, ERL_SUB_BIN_SIZE+2+2*size);
+ last = (ErlSubBin *) hp;
+ last->thing_word = HEADER_SUB_BIN;
+ last->size = 0;
+ last->bitsize = bitsize;
+ last->offs = offset+size;
+ last->bitoffs = bitoffs;
+ last->orig = real_bin;
+ last->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+ previous = CONS(hp, make_binary(last), previous);
+ hp += 2;
+ }
+ BIF_RET(bin_bytes_to_list(previous, hp, bytes, size, bitoffs));
+}
+
+
+/* Turn a possibly deep list of ints (and binaries) into */
+/* One large binary object */
+
+BIF_RETTYPE list_to_binary_1(BIF_ALIST_1)
+{
+ Eterm bin;
+ int i;
+ int offset;
+ byte* bytes;
+ if (is_nil(BIF_ARG_1)) {
+ BIF_RET(new_binary(BIF_P,(byte*)"",0));
+ }
+ if (is_not_list(BIF_ARG_1)) {
+ goto error;
+ }
+ if ((i = io_list_len(BIF_ARG_1)) < 0) {
+ goto error;
+ }
+ bin = new_binary(BIF_P, (byte *)NULL, i);
+ bytes = binary_bytes(bin);
+ offset = io_list_to_buf(BIF_ARG_1, (char*) bytes, i);
+ ASSERT(offset == 0);
+ BIF_RET(bin);
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/* Turn a possibly deep list of ints (and binaries) into */
+/* One large binary object */
+
+BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1)
+{
+ Eterm bin;
+ int i;
+ int offset;
+ byte* bytes;
+
+ if (is_binary(BIF_ARG_1)) {
+ BIF_RET(BIF_ARG_1);
+ }
+ if (is_nil(BIF_ARG_1)) {
+ BIF_RET(new_binary(BIF_P,(byte*)"",0));
+ }
+ if (is_not_list(BIF_ARG_1)) {
+ goto error;
+ }
+ if ((i = io_list_len(BIF_ARG_1)) < 0) {
+ goto error;
+ }
+ bin = new_binary(BIF_P, (byte *)NULL, i);
+ bytes = binary_bytes(bin);
+ offset = io_list_to_buf(BIF_ARG_1, (char*) bytes, i);
+ ASSERT(offset == 0);
+ BIF_RET(bin);
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE list_to_bitstring_1(BIF_ALIST_1)
+{
+ Eterm bin;
+ int i,offset;
+ byte* bytes;
+ ErlSubBin* sb1;
+ Eterm* hp;
+
+ if (is_nil(BIF_ARG_1)) {
+ BIF_RET(new_binary(BIF_P,(byte*)"",0));
+ }
+ if (is_not_list(BIF_ARG_1)) {
+ goto error;
+ }
+ if ((i = bitstr_list_len(BIF_ARG_1)) < 0) {
+ goto error;
+ }
+ bin = new_binary(BIF_P, (byte *)NULL, i);
+ bytes = binary_bytes(bin);
+#ifdef DEBUG
+ offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes, i);
+#else
+ offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes);
+#endif
+ ASSERT(offset >= 0);
+ if (offset > 0) {
+ hp = HAlloc(BIF_P, ERL_SUB_BIN_SIZE);
+ sb1 = (ErlSubBin *) hp;
+ sb1->thing_word = HEADER_SUB_BIN;
+ sb1->size = i-1;
+ sb1->offs = 0;
+ sb1->orig = bin;
+ sb1->bitoffs = 0;
+ sb1->bitsize = offset;
+ sb1->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+ bin = make_binary(sb1);
+ }
+
+ BIF_RET(bin);
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE split_binary_2(BIF_ALIST_2)
+{
+ Uint pos;
+ ErlSubBin* sb1;
+ ErlSubBin* sb2;
+ size_t orig_size;
+ Eterm orig;
+ Uint offset;
+ Uint bit_offset;
+ Uint bit_size;
+ Eterm* hp;
+
+ if (is_not_binary(BIF_ARG_1)) {
+ goto error;
+ }
+ if (!term_to_Uint(BIF_ARG_2, &pos)) {
+ goto error;
+ }
+ if ((orig_size = binary_size(BIF_ARG_1)) < pos) {
+ goto error;
+ }
+ hp = HAlloc(BIF_P, 2*ERL_SUB_BIN_SIZE+3);
+ ERTS_GET_REAL_BIN(BIF_ARG_1, orig, offset, bit_offset, bit_size);
+ sb1 = (ErlSubBin *) hp;
+ sb1->thing_word = HEADER_SUB_BIN;
+ sb1->size = pos;
+ sb1->offs = offset;
+ sb1->orig = orig;
+ sb1->bitoffs = bit_offset;
+ sb1->bitsize = 0;
+ sb1->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+
+ sb2 = (ErlSubBin *) hp;
+ sb2->thing_word = HEADER_SUB_BIN;
+ sb2->size = orig_size - pos;
+ sb2->offs = offset + pos;
+ sb2->orig = orig;
+ sb2->bitoffs = bit_offset;
+ sb2->bitsize = bit_size; /* The extra bits go into the second binary. */
+ sb2->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+
+ return TUPLE2(hp, make_binary(sb1), make_binary(sb2));
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+void
+erts_cleanup_mso(ProcBin* pb)
+{
+ while (pb != NULL) {
+ ProcBin* next = pb->next;
+ if (erts_refc_dectest(&pb->val->refc, 0) == 0)
+ erts_bin_free(pb->val);
+ pb = next;
+ }
+}
+
+/*
+ * Local functions.
+ */
+
+/*
+ * The input list is assumed to be type-correct and the buffer is
+ * assumed to be of sufficient size. Those assumptions are verified in
+ * the DEBUG-built emulator.
+ */
+static int
+#ifdef DEBUG
+list_to_bitstr_buf(Eterm obj, char* buf, int len)
+#else
+list_to_bitstr_buf(Eterm obj, char* buf)
+#endif
+{
+ Eterm* objp;
+ int offset = 0;
+ DECLARE_ESTACK(s);
+ goto L_again;
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ ASSERT(len > 0);
+ if (offset == 0) {
+ *buf++ = unsigned_val(obj);
+ } else {
+ *buf = (char)((unsigned_val(obj) >> offset) |
+ ((*buf >> (8-offset)) << (8-offset)));
+ buf++;
+ *buf = (unsigned_val(obj) << (8-offset));
+ }
+#ifdef DEBUG
+ len--;
+#endif
+ } else if (is_binary(obj)) {
+ byte* bptr;
+ size_t size = binary_size(obj);
+ Uint bitsize;
+ Uint bitoffs;
+ Uint num_bits;
+
+ ASSERT(size <= len);
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ num_bits = 8*size+bitsize;
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
+ offset += bitsize;
+ buf += size + (offset>7);
+#ifdef DEBUG
+ len -= size + (offset>7);
+#endif
+ offset = offset & 7;
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ } else {
+ ASSERT(is_nil(obj));
+ }
+
+ obj = CDR(objp);
+ if (is_list(obj)) {
+ goto L_iter_list; /* on tail */
+ } else if (is_binary(obj)) {
+ byte* bptr;
+ size_t size = binary_size(obj);
+ Uint bitsize;
+ Uint bitoffs;
+ Uint num_bits;
+
+ ASSERT(size <= len);
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ num_bits = 8*size+bitsize;
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
+ offset += bitsize;
+ buf += size+(offset>7);
+#ifdef DEBUG
+ len -= size+(offset>7);
+#endif
+ offset = offset & 7;
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ } else if (is_binary(obj)) {
+ byte* bptr;
+ size_t size = binary_size(obj);
+ Uint bitsize;
+ Uint bitoffs;
+ Uint num_bits;
+
+ ASSERT(size <= len);
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ num_bits = 8*size+bitsize;
+ copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits);
+ offset += bitsize;
+ buf += size + (offset>7);
+#ifdef DEBUG
+ len -= size + (offset>7);
+#endif
+ offset = offset & 7;
+ } else {
+ ASSERT(is_nil(obj));
+ }
+ }
+
+ DESTROY_ESTACK(s);
+ return offset;
+}
+
+static Sint
+bitstr_list_len(Eterm obj)
+{
+ Eterm* objp;
+ Uint len = 0;
+ Uint offs = 0;
+ DECLARE_ESTACK(s);
+ goto L_again;
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ len++;
+ } else if (is_binary(obj)) {
+ len += binary_size(obj);
+ offs += binary_bitsize(obj);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj)) {
+ len += binary_size(obj);
+ offs += binary_bitsize(obj);
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ } else if (is_binary(obj)) {
+ len += binary_size(obj);
+ offs += binary_bitsize(obj);
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ DESTROY_ESTACK(s);
+ return (Sint) (len + (offs/8) + ((offs % 8) != 0));
+
+ L_type_error:
+ DESTROY_ESTACK(s);
+ return (Sint) -1;
+}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
new file mode 100644
index 0000000000..5ea47e16f5
--- /dev/null
+++ b/erts/emulator/beam/break.c
@@ -0,0 +1,747 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/* This File contains functions which are called if a user hits ^C */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "version.h"
+#include "error.h"
+#include "version.h"
+#include "erl_db.h"
+#include "bif.h"
+#include "erl_version.h"
+#include "hash.h"
+#include "atom.h"
+#include "beam_load.h"
+#include "erl_instrument.h"
+#include "erl_bif_timer.h"
+
+#ifdef _OSE_
+#include "time.h"
+#endif
+
+/* Forward declarations -- should really appear somewhere else */
+static void process_killer(void);
+void do_break(void);
+void erl_crash_dump_v(char *file, int line, char* fmt, va_list args);
+void erl_crash_dump(char* file, int line, char* fmt, ...);
+
+#ifdef DEBUG
+static void bin_check(void);
+#endif
+
+static void print_garb_info(int to, void *to_arg, Process* p);
+#ifdef OPPROF
+static void dump_frequencies(void);
+#endif
+
+static void dump_attributes(int to, void *to_arg, byte* ptr, int size);
+
+extern char* erts_system_version[];
+
+static void
+port_info(int to, void *to_arg)
+{
+ int i;
+ for (i = 0; i < erts_max_ports; i++)
+ print_port_info(to, to_arg, i);
+}
+
+void
+process_info(int to, void *to_arg)
+{
+ int i;
+ for (i = 0; i < erts_max_processes; i++) {
+ if ((process_tab[i] != NULL) && (process_tab[i]->i != ENULL)) {
+ if (process_tab[i]->status != P_EXITING)
+ print_process_info(to, to_arg, process_tab[i]);
+ }
+ }
+
+ port_info(to, to_arg);
+}
+
+static void
+process_killer(void)
+{
+ int i, j;
+ Process* rp;
+
+ erts_printf("\n\nProcess Information\n\n");
+ erts_printf("--------------------------------------------------\n");
+ for (i = erts_max_processes-1; i >= 0; i--) {
+ if (((rp = process_tab[i]) != NULL) && rp->i != ENULL) {
+ int br;
+ print_process_info(ERTS_PRINT_STDOUT, NULL, rp);
+ erts_printf("(k)ill (n)ext (r)eturn:\n");
+ while(1) {
+ if ((j = sys_get_key(0)) <= 0)
+ halt_0(0);
+ switch(j) {
+ case 'k':
+ if (rp->status == P_WAITING) {
+ Uint32 rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ erts_smp_proc_inc_refc(rp);
+ erts_smp_proc_lock(rp, rp_locks);
+ (void) erts_send_exit_signal(NULL,
+ NIL,
+ rp,
+ &rp_locks,
+ am_kill,
+ NIL,
+ NULL,
+ 0);
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+ }
+ else
+ erts_printf("Can only kill WAITING processes this way\n");
+
+ case 'n': br = 1; break;
+ case 'r': return;
+ default: return;
+ }
+ if (br == 1) break;
+ }
+ }
+ }
+}
+
+typedef struct {
+ int is_first;
+ int to;
+ void *to_arg;
+} PrintMonitorContext;
+
+static void doit_print_link(ErtsLink *lnk, void *vpcontext)
+{
+ PrintMonitorContext *pcontext = vpcontext;
+ int to = pcontext->to;
+ void *to_arg = pcontext->to_arg;
+
+ if (pcontext->is_first) {
+ pcontext->is_first = 0;
+ erts_print(to, to_arg, "%T", lnk->pid);
+ } else {
+ erts_print(to, to_arg, ", %T", lnk->pid);
+ }
+}
+
+
+static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
+{
+ PrintMonitorContext *pcontext = vpcontext;
+ int to = pcontext->to;
+ void *to_arg = pcontext->to_arg;
+ char *prefix = ", ";
+
+ if (pcontext->is_first) {
+ pcontext->is_first = 0;
+ prefix = "";
+ }
+
+ if (mon->type == MON_ORIGIN) {
+ if (is_atom(mon->pid)) { /* dist by name */
+ ASSERT(is_node_name_atom(mon->pid));
+ erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
+ mon->pid, mon->ref);
+ erts_print(to, to_arg,"}");
+ } else if (is_atom(mon->name)){ /* local by name */
+ erts_print(to, to_arg, "%s{to,{%T,%T},%T}", prefix, mon->name,
+ erts_this_dist_entry->sysname, mon->ref);
+ } else { /* local and distributed by pid */
+ erts_print(to, to_arg, "%s{to,%T,%T}", prefix, mon->pid, mon->ref);
+ }
+ } else { /* MON_TARGET */
+ erts_print(to, to_arg, "%s{from,%T,%T}", prefix, mon->pid, mon->ref);
+ }
+}
+
+/* Display info about an individual Erlang process */
+void
+print_process_info(int to, void *to_arg, Process *p)
+{
+ int garbing = 0;
+ int running = 0;
+ struct saved_calls *scb;
+
+ /* display the PID */
+ erts_print(to, to_arg, "=proc:%T\n", p->id);
+
+ /* Display the state */
+ erts_print(to, to_arg, "State: ");
+ switch (p->status) {
+ case P_FREE:
+ erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */
+ break;
+ case P_RUNABLE:
+ erts_print(to, to_arg, "Scheduled\n");
+ break;
+ case P_WAITING:
+ erts_print(to, to_arg, "Waiting\n");
+ break;
+ case P_SUSPENDED:
+ erts_print(to, to_arg, "Suspended\n");
+ break;
+ case P_RUNNING:
+ erts_print(to, to_arg, "Running\n");
+ running = 1;
+ break;
+ case P_EXITING:
+ erts_print(to, to_arg, "Exiting\n");
+ break;
+ case P_GARBING:
+ erts_print(to, to_arg, "Garbing\n");
+ garbing = 1;
+ running = 1;
+ break;
+ }
+
+ /*
+ * If the process is registered as a global process, display the
+ * registered name
+ */
+ if (p->reg != NULL)
+ erts_print(to, to_arg, "Name: %T\n", p->reg->name);
+
+ /*
+ * Display the initial function name
+ */
+ erts_print(to, to_arg, "Spawned as: %T:%T/%bpu\n",
+ p->initial[INITIAL_MOD],
+ p->initial[INITIAL_FUN],
+ p->initial[INITIAL_ARI]);
+
+ if (p->current != NULL) {
+ if (running) {
+ erts_print(to, to_arg, "Last scheduled in for: ");
+ } else {
+ erts_print(to, to_arg, "Current call: ");
+ }
+ erts_print(to, to_arg, "%T:%T/%bpu\n",
+ p->current[0],
+ p->current[1],
+ p->current[2]);
+ }
+
+ erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
+
+ erts_print(to, to_arg, "Started: %s", ctime((time_t*)&p->started.tv_sec));
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+ erts_print(to, to_arg, "Message queue length: %d\n", p->msg.len);
+
+ /* display the message queue only if there is anything in it */
+ if (!ERTS_IS_CRASH_DUMPING && p->msg.first != NULL && !garbing) {
+ ErlMessage* mp;
+ erts_print(to, to_arg, "Message queue: [");
+ for (mp = p->msg.first; mp; mp = mp->next)
+ erts_print(to, to_arg, mp->next ? "%T," : "%T", ERL_MESSAGE_TERM(mp));
+ erts_print(to, to_arg, "]\n");
+ }
+
+ {
+ long s = 0;
+ int frags = 0;
+ ErlHeapFragment *m = p->mbuf;
+ while (m != NULL) {
+ frags++;
+ s += m->size;
+ m = m->next;
+ }
+ erts_print(to, to_arg, "Number of heap fragments: %d\n", frags);
+ }
+ erts_print(to, to_arg, "Heap fragment data: %bpu\n", MBUF_SIZE(p));
+
+ scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
+ if (scb) {
+ int i, j;
+
+ erts_print(to, to_arg, "Last calls:");
+ for (i = 0; i < scb->n; i++) {
+ erts_print(to, to_arg, " ");
+ j = scb->cur - i - 1;
+ if (j < 0)
+ j += scb->len;
+ if (scb->ct[j] == &exp_send)
+ erts_print(to, to_arg, "send");
+ else if (scb->ct[j] == &exp_receive)
+ erts_print(to, to_arg, "'receive'");
+ else if (scb->ct[j] == &exp_timeout)
+ erts_print(to, to_arg, "timeout");
+ else
+ erts_print(to, to_arg, "%T:%T/%bpu\n",
+ scb->ct[j]->code[0],
+ scb->ct[j]->code[1],
+ scb->ct[j]->code[2]);
+ }
+ erts_print(to, to_arg, "\n");
+ }
+
+ /* display the links only if there are any*/
+ if (p->nlinks != NULL || p->monitors != NULL) {
+ PrintMonitorContext context = {1,to};
+ erts_print(to, to_arg,"Link list: [");
+ erts_doforall_links(p->nlinks, &doit_print_link, &context);
+ erts_doforall_monitors(p->monitors, &doit_print_monitor, &context);
+ erts_print(to, to_arg,"]\n");
+ }
+
+ if (!ERTS_IS_CRASH_DUMPING) {
+
+ /* and the dictionary */
+ if (p->dictionary != NULL && !garbing) {
+ erts_print(to, to_arg, "Dictionary: ");
+ erts_dictionary_dump(to, to_arg, p->dictionary);
+ erts_print(to, to_arg, "\n");
+ }
+ }
+
+ /* print the number of reductions etc */
+ erts_print(to, to_arg, "Reductions: %bpu\n", p->reds);
+
+ erts_print(to, to_arg, "Stack+heap: %bpu\n", p->heap_sz);
+ erts_print(to, to_arg, "OldHeap: %bpu\n",
+ (OLD_HEAP(p) == NULL) ? 0 :
+ (unsigned)(OLD_HEND(p) - OLD_HEAP(p)) );
+ erts_print(to, to_arg, "Heap unused: %bpu\n", (p->hend - p->htop));
+ erts_print(to, to_arg, "OldHeap unused: %bpu\n",
+ (OLD_HEAP(p) == NULL) ? 0 : (OLD_HEND(p) - OLD_HEAP(p)) );
+
+ if (garbing) {
+ print_garb_info(to, to_arg, p);
+ }
+
+ if (ERTS_IS_CRASH_DUMPING) {
+ erts_program_counter_info(to, to_arg, p);
+ } else {
+ erts_print(to, to_arg, "Stack dump:\n");
+#ifdef ERTS_SMP
+ if (!garbing)
+#endif
+ erts_stack_dump(to, to_arg, p);
+ }
+}
+
+static void
+print_garb_info(int to, void *to_arg, Process* p)
+{
+ /* ERTS_SMP: A scheduler is probably concurrently doing gc... */
+#ifndef ERTS_SMP
+ erts_print(to, to_arg, "New heap start: %bpX\n", p->heap);
+ erts_print(to, to_arg, "New heap top: %bpX\n", p->htop);
+ erts_print(to, to_arg, "Stack top: %bpX\n", p->stop);
+ erts_print(to, to_arg, "Stack end: %bpX\n", p->hend);
+ erts_print(to, to_arg, "Old heap start: %bpX\n", OLD_HEAP(p));
+ erts_print(to, to_arg, "Old heap top: %bpX\n", OLD_HTOP(p));
+ erts_print(to, to_arg, "Old heap end: %bpX\n", OLD_HEND(p));
+#endif
+}
+
+void
+info(int to, void *to_arg)
+{
+ erts_memory(&to, to_arg, NULL, THE_NON_VALUE);
+ atom_info(to, to_arg);
+ module_info(to, to_arg);
+ export_info(to, to_arg);
+ register_info(to, to_arg);
+ erts_fun_info(to, to_arg);
+ erts_node_table_info(to, to_arg);
+ erts_dist_table_info(to, to_arg);
+ erts_allocated_areas(&to, to_arg, NULL);
+ erts_allocator_info(to, to_arg);
+
+}
+
+void
+loaded(int to, void *to_arg)
+{
+ int i;
+ int old = 0;
+ int cur = 0;
+ Eterm* code;
+
+ /*
+ * Calculate and print totals.
+ */
+ for (i = 0; i < module_code_size(); i++) {
+ if (module_code(i) != NULL &&
+ ((module_code(i)->code_length != 0) ||
+ (module_code(i)->old_code_length != 0))) {
+ cur += module_code(i)->code_length;
+ if (module_code(i)->old_code_length != 0) {
+ old += module_code(i)->old_code_length;
+ }
+ }
+ }
+ erts_print(to, to_arg, "Current code: %d\n", cur);
+ erts_print(to, to_arg, "Old code: %d\n", old);
+
+ /*
+ * Print one line per module.
+ */
+
+ for (i = 0; i < module_code_size(); i++) {
+ if (!ERTS_IS_CRASH_DUMPING) {
+ /*
+ * Interactive dump; keep it brief.
+ */
+ if (module_code(i) != NULL &&
+ ((module_code(i)->code_length != 0) ||
+ (module_code(i)->old_code_length != 0))) {
+ erts_print(to, to_arg, "%T", make_atom(module_code(i)->module));
+ cur += module_code(i)->code_length;
+ erts_print(to, to_arg, " %d", module_code(i)->code_length );
+ if (module_code(i)->old_code_length != 0) {
+ erts_print(to, to_arg, " (%d old)",
+ module_code(i)->old_code_length );
+ old += module_code(i)->old_code_length;
+ }
+ erts_print(to, to_arg, "\n");
+ }
+ } else {
+ /*
+ * To crash dump; make it parseable.
+ */
+ if (module_code(i) != NULL &&
+ ((module_code(i)->code_length != 0) ||
+ (module_code(i)->old_code_length != 0))) {
+ erts_print(to, to_arg, "=mod:");
+ erts_print(to, to_arg, "%T", make_atom(module_code(i)->module));
+ erts_print(to, to_arg, "\n");
+ erts_print(to, to_arg, "Current size: %d\n",
+ module_code(i)->code_length);
+ code = module_code(i)->code;
+ if (code != NULL && code[MI_ATTR_PTR]) {
+ erts_print(to, to_arg, "Current attributes: ");
+ dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR],
+ code[MI_ATTR_SIZE]);
+ }
+ if (code != NULL && code[MI_COMPILE_PTR]) {
+ erts_print(to, to_arg, "Current compilation info: ");
+ dump_attributes(to, to_arg, (byte *) code[MI_COMPILE_PTR],
+ code[MI_COMPILE_SIZE]);
+ }
+
+ if (module_code(i)->old_code_length != 0) {
+ erts_print(to, to_arg, "Old size: %d\n", module_code(i)->old_code_length);
+ code = module_code(i)->old_code;
+ if (code[MI_ATTR_PTR]) {
+ erts_print(to, to_arg, "Old attributes: ");
+ dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR],
+ code[MI_ATTR_SIZE]);
+ }
+ if (code[MI_COMPILE_PTR]) {
+ erts_print(to, to_arg, "Old compilation info: ");
+ dump_attributes(to, to_arg, (byte *) code[MI_COMPILE_PTR],
+ code[MI_COMPILE_SIZE]);
+ }
+ }
+ }
+ }
+ }
+}
+
+
+static void
+dump_attributes(int to, void *to_arg, byte* ptr, int size)
+{
+ while (size-- > 0) {
+ erts_print(to, to_arg, "%02X", *ptr++);
+ }
+ erts_print(to, to_arg, "\n");
+}
+
+
+void
+do_break(void)
+{
+ int i;
+#ifdef __WIN32__
+ char *mode; /* enough for storing "window" */
+
+ /* check if we're in console mode and, if so,
+ halt immediately if break is called */
+ mode = erts_read_env("ERL_CONSOLE_MODE");
+ if (mode && strcmp(mode, "window") != 0)
+ erl_exit(0, "");
+ erts_free_read_env(mode);
+#endif /* __WIN32__ */
+
+ erts_printf("\n"
+ "BREAK: (a)bort (c)ontinue (p)roc info (i)nfo (l)oaded\n"
+ " (v)ersion (k)ill (D)b-tables (d)istribution\n");
+
+ while (1) {
+ if ((i = sys_get_key(0)) <= 0)
+ erl_exit(0, "");
+ switch (i) {
+ case 'q':
+ case 'a':
+ case '*': /*
+ * The asterisk is an read error on windows,
+ * where sys_get_key isn't that great in console mode.
+ * The usual reason for a read error is Ctrl-C. Treat this as
+ * 'a' to avoid infinite loop.
+ */
+ erl_exit(0, "");
+ case 'A': /* Halt generating crash dump */
+ erl_exit(1, "Crash dump requested by user");
+ case 'c':
+ return;
+ case 'p':
+ process_info(ERTS_PRINT_STDOUT, NULL);
+ return;
+ case 'm':
+ return;
+ case 'o':
+ port_info(ERTS_PRINT_STDOUT, NULL);
+ return;
+ case 'i':
+ info(ERTS_PRINT_STDOUT, NULL);
+ return;
+ case 'l':
+ loaded(ERTS_PRINT_STDOUT, NULL);
+ return;
+ case 'v':
+ erts_printf("Erlang (%s) emulator version "
+ ERLANG_VERSION "\n",
+ EMULATOR);
+ erts_printf("Compiled on " ERLANG_COMPILE_DATE "\n");
+ return;
+ case 'd':
+ distribution_info(ERTS_PRINT_STDOUT, NULL);
+ return;
+ case 'D':
+ db_info(ERTS_PRINT_STDOUT, NULL, 1);
+ return;
+ case 'k':
+ process_killer();
+ return;
+#ifdef OPPROF
+ case 'X':
+ dump_frequencies();
+ return;
+ case 'x':
+ {
+ int i;
+ for (i = 0; i <= HIGHEST_OP; i++) {
+ if (opc[i].name != NULL) {
+ erts_printf("%-16s %8d\n", opc[i].name, opc[i].count);
+ }
+ }
+ }
+ return;
+ case 'z':
+ {
+ int i;
+ for (i = 0; i <= HIGHEST_OP; i++)
+ opc[i].count = 0;
+ }
+ return;
+#endif
+#ifdef DEBUG
+ case 't':
+ p_slpq();
+ return;
+ case 'b':
+ bin_check();
+ return;
+ case 'C':
+ abort();
+#endif
+ case '\n':
+ continue;
+ default:
+ erts_printf("Eh?\n\n");
+ }
+ }
+
+}
+
+
+#ifdef OPPROF
+static void
+dump_frequencies(void)
+{
+ int i;
+ FILE* fp;
+ time_t now;
+ static char name[] = "op_freq.dump";
+
+ fp = fopen(name, "w");
+ if (fp == NULL) {
+ fprintf(stderr, "Failed to open %s for writing\n", name);
+ return;
+ }
+
+ time(&now);
+ fprintf(fp, "# Generated %s\n", ctime(&now));
+
+ for (i = 0; i <= HIGHEST_OP; i++) {
+ if (opc[i].name != NULL) {
+ fprintf(fp, "%s %d\n", opc[i].name, opc[i].count);
+ }
+ }
+ fclose(fp);
+ erts_printf("Frequencies dumped to %s\n", name);
+}
+#endif
+
+
+#ifdef DEBUG
+
+static void
+bin_check(void)
+{
+ Process *rp;
+ ProcBin *bp;
+ int i, printed;
+
+ for (i=0; i < erts_max_processes; i++) {
+ if ((rp = process_tab[i]) == NULL)
+ continue;
+ if (!(bp = rp->off_heap.mso))
+ continue;
+ printed = 0;
+ while (bp) {
+ if (printed == 0) {
+ erts_printf("Process %T holding binary data \n", rp->id);
+ printed = 1;
+ }
+ erts_printf("0x%08lx orig_size: %ld, norefs = %ld\n",
+ (unsigned long)bp->val,
+ (long)bp->val->orig_size,
+ erts_smp_atomic_read(&bp->val->refc));
+
+ bp = bp->next;
+ }
+ if (printed == 1)
+ erts_printf("--------------------------------------\n");
+ }
+ /* db_bin_check() has to be rewritten for the AVL trees... */
+ /*db_bin_check();*/
+}
+
+#endif
+
+/* XXX THIS SHOULD BE IN SYSTEM !!!! */
+void
+erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
+{
+ int fd;
+ time_t now;
+ size_t dumpnamebufsize = MAXPATHLEN;
+ char dumpnamebuf[MAXPATHLEN];
+ char* dumpname;
+
+ if (ERTS_IS_CRASH_DUMPING)
+ return;
+
+ /* Wait for all threads to block. If all threads haven't blocked
+ * after a minute, we go anyway and hope for the best...
+ *
+ * We do not release system again. We expect an exit() or abort() after
+ * dump has been written.
+ *
+ * NOTE: We allow gc therefore it is important not to lock *any*
+ * process locks.
+ */
+ erts_smp_emergency_block_system(60000, ERTS_BS_FLG_ALLOW_GC);
+ /* Either worked or not... */
+
+ /* Allow us to pass certain places without locking... */
+#ifdef ERTS_SMP
+ erts_smp_atomic_inc(&erts_writing_erl_crash_dump);
+#else
+ erts_writing_erl_crash_dump = 1;
+#endif
+
+ erts_sys_prepare_crash_dump();
+
+ if (erts_sys_getenv("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0)
+ dumpname = "erl_crash.dump";
+ else
+ dumpname = &dumpnamebuf[0];
+
+ fd = open(dumpname,O_WRONLY | O_CREAT | O_TRUNC,0640);
+ if (fd < 0)
+ return; /* Can't create the crash dump, skip it */
+
+ time(&now);
+ erts_fdprintf(fd, "=erl_crash_dump:0.1\n%s", ctime(&now));
+
+ if (file != NULL)
+ erts_fdprintf(fd, "The error occurred in file %s, line %d\n", file, line);
+
+ if (fmt != NULL && *fmt != '\0') {
+ erts_fdprintf(fd, "Slogan: ");
+ erts_vfdprintf(fd, fmt, args);
+ }
+ erts_fdprintf(fd, "System version: ");
+ erts_print_system_version(fd, NULL, NULL);
+ erts_fdprintf(fd, "%s\n", "Compiled: " ERLANG_COMPILE_DATE);
+ erts_fdprintf(fd, "Atoms: %d\n", atom_table_size());
+ info(fd, NULL); /* General system info */
+ if (process_tab != NULL) /* XXX true at init */
+ process_info(fd, NULL); /* Info about each process and port */
+ db_info(fd, NULL, 0);
+ erts_print_bif_timer_info(fd, NULL);
+ distribution_info(fd, NULL);
+ erts_fdprintf(fd, "=loaded_modules\n");
+ loaded(fd, NULL);
+ erts_dump_fun_entries(fd, NULL);
+ erts_deep_process_dump(fd, NULL);
+ erts_fdprintf(fd, "=atoms\n");
+ dump_atoms(fd, NULL);
+
+ /* Keep the instrumentation data at the end of the dump */
+ if (erts_instr_memory_map || erts_instr_stat) {
+ erts_fdprintf(fd, "=instr_data\n");
+
+ if (erts_instr_stat) {
+ erts_fdprintf(fd, "=memory_status\n");
+ erts_instr_dump_stat_to_fd(fd, 0);
+ }
+ if (erts_instr_memory_map) {
+ erts_fdprintf(fd, "=memory_map\n");
+ erts_instr_dump_memory_map_to_fd(fd);
+ }
+ }
+
+ erts_fdprintf(fd, "=end\n");
+ close(fd);
+ erts_fprintf(stderr,"\nCrash dump was written to: %s\n", dumpname);
+}
+
+void
+erl_crash_dump(char* file, int line, char* fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ erl_crash_dump_v(file, line, fmt, args);
+ va_end(args);
+}
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
new file mode 100644
index 0000000000..0a5050b1fe
--- /dev/null
+++ b/erts/emulator/beam/copy.c
@@ -0,0 +1,981 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "erl_gc.h"
+#include "erl_nmgc.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+
+#ifdef HYBRID
+MA_STACK_DECLARE(src);
+MA_STACK_DECLARE(dst);
+MA_STACK_DECLARE(offset);
+#endif
+
+void
+init_copy(void)
+{
+#ifdef HYBRID
+ MA_STACK_ALLOC(src);
+ MA_STACK_ALLOC(dst);
+ MA_STACK_ALLOC(offset);
+#endif
+}
+
+/*
+ * Copy object "obj" to process p.
+ */
+Eterm
+copy_object(Eterm obj, Process* to)
+{
+ Uint size = size_object(obj);
+ Eterm* hp = HAlloc(to, size);
+ Eterm res;
+
+ res = copy_struct(obj, size, &hp, &to->off_heap);
+#ifdef DEBUG
+ if (eq(obj, res) == 0) {
+ erl_exit(ERTS_ABORT_EXIT, "copy not equal to source\n");
+ }
+#endif
+ return res;
+}
+
+/*
+ * Return the "flat" size of the object.
+ */
+
+Uint
+size_object(Eterm obj)
+{
+ Uint sum = 0;
+ Eterm* ptr;
+ int arity;
+
+ DECLARE_ESTACK(s);
+ for (;;) {
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_LIST:
+ sum += 2;
+ ptr = list_val(obj);
+ obj = *ptr++;
+ if (!IS_CONST(obj)) {
+ ESTACK_PUSH(s, obj);
+ }
+ obj = *ptr;
+ break;
+ case TAG_PRIMARY_BOXED:
+ {
+ Eterm hdr = *boxed_val(obj);
+ ASSERT(is_header(hdr));
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG:
+ ptr = tuple_val(obj);
+ arity = header_arity(hdr);
+ sum += arity + 1;
+ if (arity == 0) { /* Empty tuple -- unusual. */
+ goto size_common;
+ }
+ while (arity-- > 1) {
+ obj = *++ptr;
+ if (!IS_CONST(obj)) {
+ ESTACK_PUSH(s, obj);
+ }
+ }
+ obj = *++ptr;
+ break;
+ case FUN_SUBTAG:
+ {
+ Eterm* bptr = fun_val(obj);
+ ErlFunThing* funp = (ErlFunThing *) bptr;
+ unsigned eterms = 1 /* creator */ + funp->num_free;
+ unsigned sz = thing_arityval(hdr);
+
+ sum += 1 /* header */ + sz + eterms;
+ bptr += 1 /* header */ + sz;
+ while (eterms-- > 1) {
+ obj = *bptr++;
+ if (!IS_CONST(obj)) {
+ ESTACK_PUSH(s, obj);
+ }
+ }
+ obj = *bptr;
+ break;
+ }
+ case SUB_BINARY_SUBTAG:
+ {
+ Eterm real_bin;
+ Uint offset; /* Not used. */
+ Uint bitsize;
+ Uint bitoffs;
+ Uint extra_bytes;
+ Eterm hdr;
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ if ((bitsize + bitoffs) > 8) {
+ sum += ERL_SUB_BIN_SIZE;
+ extra_bytes = 2;
+ } else if ((bitsize + bitoffs) > 0) {
+ sum += ERL_SUB_BIN_SIZE;
+ extra_bytes = 1;
+ } else {
+ extra_bytes = 0;
+ }
+ hdr = *binary_val(real_bin);
+ if (thing_subtag(hdr) == REFC_BINARY_SUBTAG) {
+ sum += PROC_BIN_SIZE;
+ } else {
+ sum += heap_bin_size(binary_size(obj)+extra_bytes);
+ }
+ goto size_common;
+ }
+ break;
+ case BIN_MATCHSTATE_SUBTAG:
+ erl_exit(ERTS_ABORT_EXIT,
+ "size_object: matchstate term not allowed");
+ default:
+ sum += thing_arityval(hdr) + 1;
+ /* Fall through */
+ size_common:
+ if (ESTACK_ISEMPTY(s)) {
+ DESTROY_ESTACK(s);
+ return sum;
+ }
+ obj = ESTACK_POP(s);
+ break;
+ }
+ }
+ break;
+ case TAG_PRIMARY_IMMED1:
+ if (ESTACK_ISEMPTY(s)) {
+ DESTROY_ESTACK(s);
+ return sum;
+ }
+ obj = ESTACK_POP(s);
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "size_object: bad tag for %#x\n", obj);
+ }
+ }
+}
+
+/*
+ * Copy a structure to a heap.
+ */
+Eterm
+copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
+{
+ char* hstart;
+ Uint hsize;
+ Eterm* htop;
+ Eterm* hbot;
+ Eterm* hp;
+ Eterm* objp;
+ Eterm* tp;
+ Eterm res;
+ Eterm elem;
+ Eterm* tailp;
+ Eterm* argp;
+ Eterm* const_tuple;
+ Eterm hdr;
+ int i;
+#ifdef DEBUG
+ Eterm org_obj = obj;
+ Uint org_sz = sz;
+#endif
+
+ if (IS_CONST(obj))
+ return obj;
+
+ hp = htop = *hpp;
+ hbot = htop + sz;
+ hstart = (char *)htop;
+ hsize = (char*) hbot - hstart;
+ const_tuple = 0;
+
+ /* Copy the object onto the heap */
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_LIST: argp = &res; goto L_copy_list;
+ case TAG_PRIMARY_BOXED: argp = &res; goto L_copy_boxed;
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s, line %d: Internal error in copy_struct: 0x%08x\n",
+ __FILE__, __LINE__,obj);
+ }
+
+ L_copy:
+ while (hp != htop) {
+ obj = *hp;
+
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_IMMED1:
+ hp++;
+ break;
+ case TAG_PRIMARY_LIST:
+ objp = list_val(obj);
+ if (in_area(objp,hstart,hsize)) {
+ hp++;
+ break;
+ }
+ argp = hp++;
+ /* Fall through */
+
+ L_copy_list:
+ tailp = argp;
+ while (is_list(obj)) {
+ objp = list_val(obj);
+ tp = tailp;
+ elem = *objp;
+ if (IS_CONST(elem)) {
+ *(hbot-2) = elem;
+ tailp = hbot-1;
+ hbot -= 2;
+ }
+ else {
+ *htop = elem;
+ tailp = htop+1;
+ htop += 2;
+ }
+ *tp = make_list(tailp - 1);
+ obj = *(objp+1);
+ }
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_IMMED1: *tailp = obj; goto L_copy;
+ case TAG_PRIMARY_BOXED: argp = tailp; goto L_copy_boxed;
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s, line %d: Internal error in copy_struct: 0x%08x\n",
+ __FILE__, __LINE__,obj);
+ }
+
+ case TAG_PRIMARY_BOXED:
+ if (in_area(boxed_val(obj),hstart,hsize)) {
+ hp++;
+ break;
+ }
+ argp = hp++;
+
+ L_copy_boxed:
+ objp = boxed_val(obj);
+ hdr = *objp;
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG:
+ {
+ int const_flag = 1; /* assume constant tuple */
+ i = arityval(hdr);
+ *argp = make_tuple(htop);
+ tp = htop; /* tp is pointer to new arity value */
+ *htop++ = *objp++; /* copy arity value */
+ while (i--) {
+ elem = *objp++;
+ if (!IS_CONST(elem)) {
+ const_flag = 0;
+ }
+ *htop++ = elem;
+ }
+ if (const_flag) {
+ const_tuple = tp; /* this is the latest const_tuple */
+ }
+ }
+ break;
+ case REFC_BINARY_SUBTAG:
+ {
+ ProcBin* pb;
+
+ pb = (ProcBin *) objp;
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+ i = thing_arityval(*objp) + 1;
+ hbot -= i;
+ tp = hbot;
+ while (i--) {
+ *tp++ = *objp++;
+ }
+ *argp = make_binary(hbot);
+ pb = (ProcBin*) hbot;
+ erts_refc_inc(&pb->val->refc, 2);
+ pb->next = off_heap->mso;
+ pb->flags = 0;
+ off_heap->mso = pb;
+ off_heap->overhead += pb->size / sizeof(Eterm);
+ }
+ break;
+ case SUB_BINARY_SUBTAG:
+ {
+ ErlSubBin* sb = (ErlSubBin *) objp;
+ Eterm real_bin = sb->orig;
+ Uint bit_offset = sb->bitoffs;
+ Uint bit_size = sb -> bitsize;
+ Uint offset = sb->offs;
+ size_t size = sb->size;
+ Uint extra_bytes;
+ Uint real_size;
+ if ((bit_size + bit_offset) > 8) {
+ extra_bytes = 2;
+ } else if ((bit_size + bit_offset) > 0) {
+ extra_bytes = 1;
+ } else {
+ extra_bytes = 0;
+ }
+ real_size = size+extra_bytes;
+ objp = binary_val(real_bin);
+ if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
+ ErlHeapBin* from = (ErlHeapBin *) objp;
+ ErlHeapBin* to;
+ i = heap_bin_size(real_size);
+ hbot -= i;
+ to = (ErlHeapBin *) hbot;
+ to->thing_word = header_heap_bin(real_size);
+ to->size = real_size;
+ sys_memcpy(to->data, ((byte *)from->data)+offset, real_size);
+ } else {
+ ProcBin* from = (ProcBin *) objp;
+ ProcBin* to;
+
+ ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
+ if (from->flags) {
+ erts_emasculate_writable_binary(from);
+ }
+ hbot -= PROC_BIN_SIZE;
+ to = (ProcBin *) hbot;
+ to->thing_word = HEADER_PROC_BIN;
+ to->size = real_size;
+ to->val = from->val;
+ erts_refc_inc(&to->val->refc, 2);
+ to->bytes = from->bytes + offset;
+ to->next = off_heap->mso;
+ to->flags = 0;
+ off_heap->mso = to;
+ off_heap->overhead += to->size / sizeof(Eterm);
+ }
+ *argp = make_binary(hbot);
+ if (extra_bytes != 0) {
+ ErlSubBin* res;
+ hbot -= ERL_SUB_BIN_SIZE;
+ res = (ErlSubBin *) hbot;
+ res->thing_word = HEADER_SUB_BIN;
+ res->size = size;
+ res->bitsize = bit_size;
+ res->bitoffs = bit_offset;
+ res->offs = 0;
+ res->is_writable = 0;
+ res->orig = *argp;
+ *argp = make_binary(hbot);
+ }
+ break;
+ }
+ break;
+ case FUN_SUBTAG:
+ {
+ ErlFunThing* funp = (ErlFunThing *) objp;
+
+ i = thing_arityval(hdr) + 2 + funp->num_free;
+ tp = htop;
+ while (i--) {
+ *htop++ = *objp++;
+ }
+#ifndef HYBRID /* FIND ME! */
+ funp = (ErlFunThing *) tp;
+ funp->next = off_heap->funs;
+ off_heap->funs = funp;
+ erts_refc_inc(&funp->fe->refc, 2);
+#endif
+ *argp = make_fun(tp);
+ }
+ break;
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ {
+ ExternalThing *etp = (ExternalThing *) htop;
+
+ i = thing_arityval(hdr) + 1;
+ tp = htop;
+
+ while (i--) {
+ *htop++ = *objp++;
+ }
+
+ etp->next = off_heap->externals;
+ off_heap->externals = etp;
+ erts_refc_inc(&etp->node->refc, 2);
+
+ *argp = make_external(tp);
+ }
+ break;
+ case BIN_MATCHSTATE_SUBTAG:
+ erl_exit(ERTS_ABORT_EXIT,
+ "copy_struct: matchstate term not allowed");
+ default:
+ i = thing_arityval(hdr)+1;
+ hbot -= i;
+ tp = hbot;
+ *argp = make_boxed(hbot);
+ while (i--) {
+ *tp++ = *objp++;
+ }
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (header_is_thing(obj) || hp == const_tuple) {
+ hp += header_arity(obj) + 1;
+ } else {
+ hp++;
+ }
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ if (htop != hbot)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Internal error in copy_struct() when copying %T:"
+ " htop=%p != hbot=%p (sz=%bpu)\n",
+ org_obj, htop, hbot, org_sz);
+#else
+ if (htop > hbot) {
+ erl_exit(ERTS_ABORT_EXIT,
+ "Internal error in copy_struct(): htop, hbot overrun\n");
+ }
+#endif
+ *hpp = (Eterm *) (hstart+hsize);
+ return res;
+}
+
+#ifdef HYBRID
+
+#ifdef BM_MESSAGE_SIZES
+# define BM_ADD(var,val) (var) += (val);
+#else
+# define BM_ADD(var,val)
+#endif
+
+#ifdef DEBUG
+# define CLEARMEM(PTR,SIZE) memset(PTR,0,SIZE*sizeof(Eterm))
+#else
+# define CLEARMEM(PTR,SIZE)
+#endif
+
+#ifdef INCREMENTAL
+#define GlobalAlloc(p, need, hp) \
+do { \
+ Uint n = (need); \
+ BM_ADD(words_copied,n); \
+ BM_SWAP_TIMER(copy,system); \
+ /* If a new collection cycle is started during copy, the message * \
+ * will end up in the old generation and all allocations * \
+ * thereafter must go directly into the old generation. */ \
+ if (alloc_old) { \
+ erts_incremental_gc((p),n,&dest,1); \
+ (hp) = erts_inc_alloc(n); \
+ } else { \
+ (hp) = IncAlloc((p),n,&dest,1); \
+ if (ma_gc_flags & GC_CYCLE_START) { \
+ alloc_old = 1; \
+ global_htop = global_heap; \
+ (hp) = erts_inc_alloc(n); \
+ } \
+ } \
+ CLEARMEM((hp),(n)); \
+ BM_SWAP_TIMER(system,copy); \
+} while(0)
+
+#else /* no INCREMELNTAL */
+
+#define GlobalAlloc(p, need, hp) \
+do { \
+ Uint n = (need); \
+ total_need += n; \
+ if (total_need >= global_heap_sz) \
+ erl_exit(ERTS_ABORT_EXIT, "Copying a message (%d words) larger than the nursery simply won't work...\n", total_need); \
+ if (global_hend - n < global_htop) { \
+ BM_SWAP_TIMER(copy,system); \
+ erts_global_garbage_collect((p),total_need,NULL,0); \
+ BM_SWAP_TIMER(system,copy); \
+ total_need = 0; \
+ ma_src_top = 0; \
+ ma_dst_top = 0; \
+ ma_offset_top = 0; \
+ goto copy_start; \
+ } \
+ (hp) = global_htop; \
+ global_htop += n; \
+ BM_ADD(words_copied,n); \
+} while(0)
+#endif /* INCREMENTAL */
+
+/* Copy a message to the message area. */
+Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs)
+{
+ Eterm obj;
+ Eterm dest;
+#ifdef INCREMENTAL
+ int alloc_old = 0;
+#else
+ int total_need = 0;
+#endif
+
+ VERBOSE(DEBUG_MESSAGES,
+ ("COPY START; %T is sending a message @ 0x%016x\n%T\n",
+ from->id, orig, orig));
+
+#ifndef INCREMENTAL
+ copy_start:
+#endif
+ MA_STACK_PUSH(src,orig);
+ MA_STACK_PUSH(dst,&dest);
+ MA_STACK_PUSH(offset,offs);
+
+ while (ma_src_top > 0) {
+ obj = MA_STACK_POP(src);
+
+ /* copy_struct_lazy should never be called with something that
+ * do not need to be copied. Within the loop, nothing that do
+ * not need copying should be placed in the src-stack.
+ */
+ ASSERT(!NO_COPY(obj));
+
+ switch (primary_tag(obj)) {
+ case TAG_PRIMARY_LIST: {
+ Eterm *hp;
+ Eterm *objp;
+
+ GlobalAlloc(from,2,hp);
+ objp = list_val(obj);
+
+ MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_list(hp));
+ MA_STACK_POP(dst);
+
+ /* TODO: Byt ordningen nedan så att CDR pushas först. */
+
+ if (NO_COPY(*objp)) {
+ hp[0] = *objp;
+#ifdef INCREMENTAL
+ if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend))
+ INC_STORE(gray,hp,2);
+#endif
+ } else {
+ MA_STACK_PUSH(src,*objp);
+ MA_STACK_PUSH(dst,hp);
+ MA_STACK_PUSH(offset,0);
+ }
+
+ objp++;
+
+ if (NO_COPY(*objp)) {
+ hp[1] = *objp;
+#ifdef INCREMENTAL
+ if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend))
+ INC_STORE(gray,hp,2);
+#endif
+ }
+ else {
+ MA_STACK_PUSH(src,*objp);
+ MA_STACK_PUSH(dst,hp);
+ MA_STACK_PUSH(offset,1);
+ }
+ continue;
+ }
+
+ case TAG_PRIMARY_BOXED: {
+ Eterm *objp = boxed_val(obj);
+
+ switch (*objp & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG: {
+ Uint ari = arityval(*objp);
+ Uint i;
+ Eterm *hp;
+ GlobalAlloc(from,ari + 1,hp);
+ /* A GC above might invalidate the value of objp */
+ objp = boxed_val(obj);
+ MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp));
+ MA_STACK_POP(dst);
+ *hp = *objp++;
+ for (i = 1; i <= ari; i++) {
+ switch (primary_tag(*objp)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ if (NO_COPY(*objp)) {
+ hp[i] = *objp;
+#ifdef INCREMENTAL
+ if (ptr_within(ptr_val(*objp),
+ inc_fromspc,inc_fromend))
+ INC_STORE(gray,hp,BOXED_NEED(hp,*hp));
+#endif
+ objp++;
+ } else {
+ MA_STACK_PUSH(src,*objp++);
+ MA_STACK_PUSH(dst,hp);
+ MA_STACK_PUSH(offset,i);
+ }
+ break;
+ default:
+ hp[i] = *objp++;
+ }
+ }
+ continue;
+ }
+
+ case REFC_BINARY_SUBTAG: {
+ ProcBin *pb;
+ Uint i = thing_arityval(*objp) + 1;
+ Eterm *hp;
+ GlobalAlloc(from,i,hp);
+ /* A GC above might invalidate the value of objp */
+ objp = boxed_val(obj);
+ MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_binary(hp));
+ MA_STACK_POP(dst);
+ pb = (ProcBin*) hp;
+ while (i--) {
+ *hp++ = *objp++;
+ }
+ erts_refc_inc(&pb->val->refc, 2);
+ pb->next = erts_global_offheap.mso;
+ erts_global_offheap.mso = pb;
+ erts_global_offheap.overhead += pb->size / sizeof(Eterm);
+ continue;
+ }
+
+ case FUN_SUBTAG: {
+ ErlFunThing *funp = (ErlFunThing*) objp;
+ Uint i = thing_arityval(*objp) + 1;
+ Uint j = i + 1 + funp->num_free;
+ Uint k = i;
+ Eterm *hp, *hp_start;
+ GlobalAlloc(from,j,hp);
+ /* A GC above might invalidate the value of objp */
+ objp = boxed_val(obj);
+ hp_start = hp;
+ MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_fun(hp));
+ MA_STACK_POP(dst);
+ funp = (ErlFunThing*) hp;
+ while (i--) {
+ *hp++ = *objp++;
+ }
+#ifndef HYBRID // FIND ME!
+ funp->next = erts_global_offheap.funs;
+ erts_global_offheap.funs = funp;
+ erts_refc_inc(&funp->fe->refc, 2);
+#endif
+ for (i = k; i < j; i++) {
+ switch (primary_tag(*objp)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ if (NO_COPY(*objp)) {
+#ifdef INCREMENTAL
+ if (ptr_within(ptr_val(*objp),
+ inc_fromspc,inc_fromend))
+ INC_STORE(gray,hp,BOXED_NEED(hp,*hp));
+#endif
+ *hp++ = *objp++;
+ } else {
+ MA_STACK_PUSH(src,*objp++);
+ MA_STACK_PUSH(dst,hp_start);
+ MA_STACK_PUSH(offset,i);
+ hp++;
+ }
+ break;
+ default:
+ *hp++ = *objp++;
+ }
+ }
+ continue;
+ }
+
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG: {
+ ExternalThing *etp;
+ Uint i = thing_arityval(*objp) + 1;
+ Eterm *hp;
+ GlobalAlloc(from,i,hp);
+ /* A GC above might invalidate the value of objp */
+ objp = boxed_val(obj);
+ MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_external(hp));
+ MA_STACK_POP(dst);
+ etp = (ExternalThing*) hp;
+ while (i--) {
+ *hp++ = *objp++;
+ }
+
+ etp->next = erts_global_offheap.externals;
+ erts_global_offheap.externals = etp;
+ erts_refc_inc(&etp->node->refc, 2);
+ continue;
+ }
+
+ case SUB_BINARY_SUBTAG: {
+ ErlSubBin *sb = (ErlSubBin *) objp;
+ Eterm *hp;
+ Eterm res_binary;
+ Eterm real_bin = sb->orig;
+ Uint bit_offset = sb->bitoffs;
+ Uint bit_size = sb -> bitsize;
+ Uint sub_offset = sb->offs;
+ size_t size = sb->size;
+ Uint extra_bytes;
+ Uint real_size;
+ Uint sub_binary_heapneed;
+ if ((bit_size + bit_offset) > 8) {
+ extra_bytes = 2;
+ sub_binary_heapneed = ERL_SUB_BIN_SIZE;
+ } else if ((bit_size + bit_offset) > 0) {
+ extra_bytes = 1;
+ sub_binary_heapneed = ERL_SUB_BIN_SIZE;
+ } else {
+ extra_bytes = 0;
+ sub_binary_heapneed = 0;
+ }
+
+ real_size = size+extra_bytes;
+ objp = binary_val(real_bin);
+ if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) {
+ ErlHeapBin *from_bin;
+ ErlHeapBin *to_bin;
+ Uint i = heap_bin_size(real_size);
+ GlobalAlloc(from,i+sub_binary_heapneed,hp);
+ from_bin = (ErlHeapBin *) objp;
+ to_bin = (ErlHeapBin *) hp;
+ to_bin->thing_word = header_heap_bin(real_size);
+ to_bin->size = real_size;
+ sys_memcpy(to_bin->data, ((byte *)from_bin->data) +
+ sub_offset, real_size);
+ res_binary = make_binary(to_bin);
+ hp += i;
+ } else {
+ ProcBin *from_bin;
+ ProcBin *to_bin;
+
+ ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG);
+ from_bin = (ProcBin *) objp;
+ erts_refc_inc(&from_bin->val->refc, 2);
+ GlobalAlloc(from,PROC_BIN_SIZE+sub_binary_heapneed,hp);
+ to_bin = (ProcBin *) hp;
+ to_bin->thing_word = HEADER_PROC_BIN;
+ to_bin->size = real_size;
+ to_bin->val = from_bin->val;
+ to_bin->bytes = from_bin->bytes + sub_offset;
+ to_bin->next = erts_global_offheap.mso;
+ erts_global_offheap.mso = to_bin;
+ erts_global_offheap.overhead += to_bin->size / sizeof(Eterm);
+ res_binary=make_binary(to_bin);
+ hp += PROC_BIN_SIZE;
+ }
+ if (extra_bytes != 0) {
+ ErlSubBin* res;
+ res = (ErlSubBin *) hp;
+ res->thing_word = HEADER_SUB_BIN;
+ res->size = size;
+ res->bitsize = bit_size;
+ res->bitoffs = bit_offset;
+ res->offs = 0;
+ res->is_writable = 0;
+ res->orig = res_binary;
+ res_binary = make_binary(hp);
+ }
+ MA_STACK_UPDATE(dst,MA_STACK_POP(offset),res_binary);
+ MA_STACK_POP(dst);
+ continue;
+ }
+
+ case BIN_MATCHSTATE_SUBTAG:
+ erl_exit(ERTS_ABORT_EXIT,
+ "copy_struct_lazy: matchstate term not allowed");
+
+ default: {
+ Uint size = thing_arityval(*objp) + 1;
+ Eterm *hp;
+ GlobalAlloc(from,size,hp);
+ /* A GC above might invalidate the value of objp */
+ objp = boxed_val(obj);
+ MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_boxed(hp));
+ MA_STACK_POP(dst);
+ while (size--) {
+ *hp++ = *objp++;
+ }
+ continue;
+ }
+ }
+ continue;
+ }
+
+ case TAG_PRIMARY_HEADER:
+ ASSERT((obj & _TAG_HEADER_MASK) == ARITYVAL_SUBTAG);
+ {
+ Eterm *objp = &obj;
+ Uint ari = arityval(obj);
+ Uint i;
+ Eterm *hp;
+ GlobalAlloc(from,ari + 1,hp);
+ MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp));
+ MA_STACK_POP(dst);
+ *hp = *objp++;
+ for (i = 1; i <= ari; i++) {
+ switch (primary_tag(*objp)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ if (NO_COPY(*objp)) {
+#ifdef INCREMENTAL
+ if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend))
+ INC_STORE(gray,hp,ari + 1);
+#endif
+ hp[i] = *objp++;
+ } else {
+ MA_STACK_PUSH(src,*objp++);
+ MA_STACK_PUSH(dst,hp);
+ MA_STACK_PUSH(offset,i);
+ }
+ break;
+ default:
+ hp[i] = *objp++;
+ }
+ }
+ continue;
+ }
+
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s, line %d: Internal error in copy_struct_lazy: 0x%08x\n",
+ __FILE__, __LINE__,obj);
+ }
+ }
+
+ VERBOSE(DEBUG_MESSAGES,
+ ("Copy allocated @ 0x%08lx:\n%T\n",
+ (unsigned long)ptr_val(dest),dest));
+
+ ma_gc_flags &= ~GC_CYCLE_START;
+
+ ASSERT(eq(orig, dest));
+ ASSERT(ma_src_top == 0);
+ ASSERT(ma_dst_top == 0);
+ ASSERT(ma_offset_top == 0);
+ return dest;
+}
+
+#undef NO_COPY
+#endif /* HYBRID */
+
+/*
+ * Copy a term that is guaranteed to be contained in a single
+ * heap block. The heap block is copied word by word, and any
+ * pointers are offsetted to point correctly in the new location.
+ *
+ * Typically used to copy a term from an ets table.
+ *
+ * NOTE: Assumes that term is a tuple (ptr is an untagged tuple ptr).
+ */
+Eterm
+copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
+{
+ Eterm* tp = ptr;
+ Eterm* hp = *hpp;
+ Sint offs = hp - tp;
+
+ while (sz--) {
+ Eterm val = *tp++;
+
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_IMMED1:
+ *hp++ = val;
+ break;
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ *hp++ = offset_ptr(val, offs);
+ break;
+ case TAG_PRIMARY_HEADER:
+ *hp++ = val;
+ switch (val & _HEADER_SUBTAG_MASK) {
+ case ARITYVAL_SUBTAG:
+ break;
+ case REFC_BINARY_SUBTAG:
+ {
+ ProcBin* pb = (ProcBin *) (hp-1);
+ int tari = thing_arityval(val);
+
+ sz -= tari;
+ while (tari--) {
+ *hp++ = *tp++;
+ }
+ erts_refc_inc(&pb->val->refc, 2);
+ pb->next = off_heap->mso;
+ off_heap->mso = pb;
+ off_heap->overhead += pb->size / sizeof(Eterm);
+ }
+ break;
+ case FUN_SUBTAG:
+ {
+#ifndef HYBRID /* FIND ME! */
+ ErlFunThing* funp = (ErlFunThing *) (hp-1);
+#endif
+ int tari = thing_arityval(val);
+
+ sz -= tari;
+ while (tari--) {
+ *hp++ = *tp++;
+ }
+#ifndef HYBRID /* FIND ME! */
+ funp->next = off_heap->funs;
+ off_heap->funs = funp;
+ erts_refc_inc(&funp->fe->refc, 2);
+#endif
+ }
+ break;
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ {
+ ExternalThing* etp = (ExternalThing *) (hp-1);
+ int tari = thing_arityval(val);
+
+ sz -= tari;
+ while (tari--) {
+ *hp++ = *tp++;
+ }
+ etp->next = off_heap->externals;
+ off_heap->externals = etp;
+ erts_refc_inc(&etp->node->refc, 2);
+ }
+ break;
+ default:
+ {
+ int tari = header_arity(val);
+
+ sz -= tari;
+ while (tari--) {
+ *hp++ = *tp++;
+ }
+ }
+ break;
+ }
+ break;
+ }
+ }
+ *hpp = hp;
+ return make_tuple(ptr + offs);
+}
diff --git a/erts/emulator/beam/decl.h b/erts/emulator/beam/decl.h
new file mode 100644
index 0000000000..da1be29d53
--- /dev/null
+++ b/erts/emulator/beam/decl.h
@@ -0,0 +1,55 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __DECL_H__
+#define __DECL_H__
+
+#if defined(__STDC__) || defined(_MSC_VER)
+#define EXTERN_FUNCTION(t, f, x) extern t f x
+#define FUNCTION(t, f, x) t f x
+#define _DOTS_ ...
+#define _VOID_ void
+#elif defined(__cplusplus)
+#define EXTERN_FUNCTION(f, x) extern "C" { f x }
+#define FUNCTION(t, f, x) t f x
+#define _DOTS_ ...
+#define _VOID_ void
+#else
+#define EXTERN_FUNCTION(t, f, x) extern t f (/*x*/)
+#define FUNCTION(t, f, x) t f (/*x*/)
+#define _DOTS_
+#define _VOID_
+#endif
+
+/*
+** Example of declarations
+**
+** EXTERN_FUNCTION(void, foo, (int, int, char));
+** FUNCTION(void, bar, (int, char));
+**
+** struct funcs {
+** FUNCTION(int*, (*f1), (int, int));
+** FUNCTION(void, (*f2), (int, char));
+** FUNCTION(void, (*f3), (_VOID_));
+** FUNCTION(int, (*f4), (char*, _DOTS_));
+** };
+**
+*/
+
+#endif
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
new file mode 100644
index 0000000000..e3094404e2
--- /dev/null
+++ b/erts/emulator/beam/dist.c
@@ -0,0 +1,3256 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * distribution of erlang messages to other nodes.
+ */
+
+
+/* define this to get a lot of debug output */
+/* #define ERTS_DIST_MSG_DBG */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERTS_WANT_EXTERNAL_TAGS
+
+#include <stddef.h>
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "dist.h"
+#include "bif.h"
+#include "external.h"
+#include "erl_binary.h"
+
+/* Turn this on to get printouts of all distribution messages
+ * which go on the line
+ */
+#if 0
+#define ERTS_DIST_MSG_DBG
+#endif
+#if 0
+#define ERTS_RAW_DIST_MSG_DBG
+#endif
+
+#if defined(ERTS_DIST_MSG_DBG) || defined(ERTS_RAW_DIST_MSG_DBG)
+static void bw(byte *buf, int sz)
+{
+ bin_write(ERTS_PRINT_STDERR,NULL,buf,sz);
+}
+#endif
+
+#ifdef ERTS_DIST_MSG_DBG
+static void
+dist_msg_dbg(ErtsDistExternal *edep, char *what, byte *buf, int sz)
+{
+ byte *extp = edep->extp;
+ Eterm msg;
+ Sint size = erts_decode_dist_ext_size(edep, 0);
+ if (size < 0) {
+ erts_fprintf(stderr,
+ "DIST MSG DEBUG: erts_decode_dist_ext_size(%s) failed:\n",
+ what);
+ bw(buf, sz);
+ }
+ else {
+ Eterm *hp;
+ ErlHeapFragment *mbuf = new_message_buffer(size);
+ hp = mbuf->mem;
+ msg = erts_decode_dist_ext(&hp, &mbuf->off_heap, edep);
+ if (is_value(msg))
+ erts_fprintf(stderr, " %s: %T\n", what, msg);
+ else {
+ erts_fprintf(stderr,
+ "DIST MSG DEBUG: erts_decode_dist_ext(%s) failed:\n",
+ what);
+ bw(buf, sz);
+ }
+ free_message_buffer(mbuf);
+ edep->extp = extp;
+ }
+}
+
+#endif
+
+
+
+#define PASS_THROUGH 'p' /* This code should go */
+
+int erts_is_alive; /* System must be blocked on change */
+
+/* distribution trap functions */
+Export* dsend2_trap = NULL;
+Export* dsend3_trap = NULL;
+/*Export* dsend_nosuspend_trap = NULL;*/
+Export* dlink_trap = NULL;
+Export* dunlink_trap = NULL;
+Export* dmonitor_node_trap = NULL;
+Export* dgroup_leader_trap = NULL;
+Export* dexit_trap = NULL;
+Export* dmonitor_p_trap = NULL;
+
+/* local variables */
+
+
+/* forward declarations */
+
+static void clear_dist_entry(DistEntry*);
+static int dsig_send(ErtsDSigData *, Eterm, Eterm, int);
+static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm);
+static void init_nodes_monitors(void);
+
+static erts_smp_atomic_t no_caches;
+
+static void
+delete_cache(ErtsAtomCache *cache)
+{
+ if (cache) {
+ erts_free(ERTS_ALC_T_DCACHE, (void *) cache);
+ ASSERT(erts_smp_atomic_read(&no_caches) > 0);
+ erts_smp_atomic_dec(&no_caches);
+ }
+}
+
+
+static void
+create_cache(DistEntry *dep)
+{
+ int i;
+ ErtsAtomCache *cp;
+
+ ERTS_SMP_LC_ASSERT(
+ is_internal_port(dep->cid)
+ && erts_lc_is_port_locked(&erts_port[internal_port_index(dep->cid)]));
+ ASSERT(!dep->cache);
+
+ dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE,
+ sizeof(ErtsAtomCache));
+ erts_smp_atomic_inc(&no_caches);
+ for (i = 0; i < sizeof(cp->in_arr)/sizeof(cp->in_arr[0]); i++) {
+ cp->in_arr[i] = THE_NON_VALUE;
+ cp->out_arr[i] = THE_NON_VALUE;
+ }
+}
+
+Uint erts_dist_cache_size(void)
+{
+ return (Uint) erts_smp_atomic_read(&no_caches)*sizeof(ErtsAtomCache);
+}
+
+static ErtsProcList *
+get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_spinlock_is_locked(&dep->qlock));
+ dep->qflgs &= ~unset_qflgs;
+ if (dep->qflgs & ERTS_DE_QFLG_EXIT) {
+ /* No resume when exit has been scheduled */
+ return NULL;
+ }
+ else {
+ ErtsProcList *plp;
+ plp = dep->suspended.first;
+ dep->suspended.first = NULL;
+ dep->suspended.last = NULL;
+ return plp;
+ }
+}
+
+/*
+** A full node name constists of a "n@h"
+**
+** n must be a valid node name: string of ([a-z][A-Z][0-9]_-)+
+**
+** h is not checked at all, we assume that we have a properly
+** configured machine where the networking is ok for the OS
+**
+** We do check that there is not a second @ in the string, since
+** many distributed operations are guaranteed not to work then.
+*/
+
+
+static int is_node_name(char *ptr, int len)
+{
+ int c = '\0'; /* suppress use-before-set warning */
+ int pos = 0;
+
+ while (pos < len) {
+ c = ptr[pos++];
+ if (! ((c == '-') || (c == '_') ||
+ ((c >= 'a') && (c <= 'z')) ||
+ ((c >= 'A') && (c <= 'Z')) ||
+ ((c >= '0') && (c <= '9'))))
+ break;
+ }
+
+ /* Scanned past the host name: now we want to see a '@', and there
+ should be text both before and after it. */
+ if (c != '@' || pos < 2 || pos == len)
+ return 0;
+
+ while (pos < len) {
+ c = ptr[pos++];
+ if (c == '@')
+ return 0;
+ }
+
+ return 1;
+}
+
+int is_node_name_atom(Eterm a)
+{
+ int i;
+ if(is_not_atom(a))
+ return 0;
+ i = atom_val(a);
+ ASSERT((i > 0) && (i < atom_table_size()) && (atom_tab(i) != NULL));
+ return is_node_name((char*)atom_tab(i)->name, atom_tab(i)->len);
+}
+
+typedef struct {
+ DistEntry *dep;
+} NetExitsContext;
+
+/*
+** This function is called when a distribution
+** port or process terminates
+*/
+static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
+{
+ Process *rp;
+ ErtsMonitor *rmon;
+ DistEntry *dep = ((NetExitsContext *) vnecp)->dep;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
+
+ rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ if (!rp)
+ goto done;
+
+ if (mon->type == MON_ORIGIN) {
+ /* local pid is beeing monitored */
+ rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ /* ASSERT(rmon != NULL); nope, can happen during process exit */
+ if (rmon != NULL) {
+ erts_destroy_monitor(rmon);
+ }
+ } else {
+ Eterm lhp[3];
+ Eterm watched;
+ ASSERT(mon->type == MON_TARGET);
+ rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ /* ASSERT(rmon != NULL); can happen during process exit */
+ if (rmon != NULL) {
+ ASSERT(is_atom(rmon->name) || is_nil(rmon->name));
+ watched = (is_atom(rmon->name)
+ ? TUPLE2(lhp, rmon->name, dep->sysname)
+ : rmon->pid);
+#ifdef ERTS_SMP
+ rp_locks |= ERTS_PROC_LOCKS_MSG_SEND;
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND);
+#endif
+ erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
+ watched, am_noconnection);
+ erts_destroy_monitor(rmon);
+ }
+ }
+ erts_smp_proc_unlock(rp, rp_locks);
+ done:
+ erts_destroy_monitor(mon);
+}
+
+typedef struct {
+ NetExitsContext *necp;
+ ErtsLink *lnk;
+} LinkNetExitsContext;
+
+/*
+** This is the function actually doing the job of sending exit messages
+** for links in a dist entry upon net_exit (the node goes down), NB,
+** only process links, not node monitors are handled here,
+** they reside in a separate tree....
+*/
+static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp)
+{
+ ErtsLink *lnk = ((LinkNetExitsContext *) vlnecp)->lnk; /* the local pid */
+ ErtsLink *rlnk;
+ Process *rp;
+
+ ASSERT(lnk->type == LINK_PID);
+ if (is_internal_pid(lnk->pid)) {
+ int xres;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
+
+ rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
+ if (!rp) {
+ goto done;
+ }
+
+ rlnk = erts_remove_link(&(rp->nlinks), sublnk->pid);
+ xres = erts_send_exit_signal(NULL,
+ sublnk->pid,
+ rp,
+ &rp_locks,
+ am_noconnection,
+ NIL,
+ NULL,
+ 0);
+
+ if (rlnk) {
+ erts_destroy_link(rlnk);
+ if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ /* We didn't exit the process and it is traced */
+ trace_proc(NULL, rp, am_getting_unlinked, sublnk->pid);
+ }
+ }
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ done:
+ erts_destroy_link(sublnk);
+
+}
+
+
+
+
+
+/*
+** This function is called when a distribution
+** port or process terminates, once for each link on the high level,
+** it in turn traverses the link subtree for the specific link node...
+*/
+static void doit_link_net_exits(ErtsLink *lnk, void *vnecp)
+{
+ LinkNetExitsContext lnec = {(NetExitsContext *) vnecp, lnk};
+ ASSERT(lnk->type == LINK_PID)
+ erts_sweep_links(ERTS_LINK_ROOT(lnk), &doit_link_net_exits_sub, (void *) &lnec);
+#ifdef DEBUG
+ ERTS_LINK_ROOT(lnk) = NULL;
+#endif
+ erts_destroy_link(lnk);
+}
+
+
+static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
+{
+ DistEntry *dep = ((NetExitsContext *) vnecp)->dep;
+ Eterm name = dep->sysname;
+ Process *rp;
+ ErtsLink *rlnk;
+ Uint i,n;
+ ASSERT(lnk->type == LINK_NODE)
+ if (is_internal_pid(lnk->pid)) {
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
+ rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
+ if (!rp) {
+ goto done;
+ }
+ rlnk = erts_remove_link(&(rp->nlinks), name);
+ if (rlnk != NULL) {
+ ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE));
+ erts_destroy_link(rlnk);
+ }
+ n = ERTS_LINK_REFC(lnk);
+ for (i = 0; i < n; ++i) {
+ ErlHeapFragment* bp;
+ ErlOffHeap *ohp;
+ Eterm tup;
+ Eterm *hp = erts_alloc_message_heap(3,&bp,&ohp,rp,&rp_locks);
+ tup = TUPLE2(hp, am_nodedown, name);
+ erts_queue_message(rp, &rp_locks, bp, tup, NIL);
+ }
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ done:
+ erts_destroy_link(lnk);
+}
+
+
+/*
+ * proc is currently running or exiting process.
+ */
+int erts_do_net_exits(DistEntry *dep, Eterm reason)
+{
+ Eterm nodename;
+
+ if (dep == erts_this_dist_entry) { /* Net kernel has died (clean up!!) */
+ Eterm nd_reason = (reason == am_no_network
+ ? am_no_network
+ : am_net_kernel_terminated);
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+
+ /* KILL all port controllers */
+ while(erts_visible_dist_entries || erts_hidden_dist_entries) {
+ DistEntry *tdep;
+ Eterm prt_id;
+ Port *prt;
+ if(erts_hidden_dist_entries)
+ tdep = erts_hidden_dist_entries;
+ else
+ tdep = erts_visible_dist_entries;
+ prt_id = tdep->cid;
+ ASSERT(is_internal_port(prt_id));
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+
+ prt = erts_id2port(prt_id, NULL, 0);
+ if (prt) {
+ ASSERT(prt->status & ERTS_PORT_SFLG_DISTRIBUTION);
+ ASSERT(prt->dist_entry);
+ /* will call do_net_exists !!! */
+ erts_do_exit_port(prt, prt_id, nd_reason);
+ erts_port_release(prt);
+ }
+
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ }
+
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+
+ nodename = erts_this_dist_entry->sysname;
+ erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC);
+ erts_set_this_node(am_Noname, 0);
+ erts_is_alive = 0;
+ send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nd_reason);
+ erts_smp_release_system();
+
+ }
+ else { /* recursive call via erts_do_exit_port() will end up here */
+ NetExitsContext nec = {dep};
+ ErtsLink *nlinks;
+ ErtsLink *node_links;
+ ErtsMonitor *monitors;
+ Uint32 flags;
+
+ erts_smp_atomic_set(&dep->dist_cmd_scheduled, 1);
+ erts_smp_de_rwlock(dep);
+
+ ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid)
+ && erts_lc_is_port_locked(&erts_port[internal_port_index(dep->cid)]));
+
+ if (erts_port_task_is_scheduled(&dep->dist_cmd))
+ erts_port_task_abort(dep->cid, &dep->dist_cmd);
+
+ if (dep->status & ERTS_DE_SFLG_EXITING) {
+#ifdef DEBUG
+ erts_smp_spin_lock(&dep->qlock);
+ ASSERT(dep->qflgs & ERTS_DE_QFLG_EXIT);
+ erts_smp_spin_unlock(&dep->qlock);
+#endif
+ }
+ else {
+ dep->status |= ERTS_DE_SFLG_EXITING;
+ erts_smp_spin_lock(&dep->qlock);
+ ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT));
+ dep->qflgs |= ERTS_DE_QFLG_EXIT;
+ erts_smp_spin_unlock(&dep->qlock);
+ }
+
+ erts_smp_de_links_lock(dep);
+ monitors = dep->monitors;
+ nlinks = dep->nlinks;
+ node_links = dep->node_links;
+ dep->monitors = NULL;
+ dep->nlinks = NULL;
+ dep->node_links = NULL;
+ erts_smp_de_links_unlock(dep);
+
+ nodename = dep->sysname;
+ flags = dep->flags;
+
+ erts_set_dist_entry_not_connected(dep);
+
+ erts_smp_de_rwunlock(dep);
+
+ erts_sweep_monitors(monitors, &doit_monitor_net_exits, (void *) &nec);
+ erts_sweep_links(nlinks, &doit_link_net_exits, (void *) &nec);
+ erts_sweep_links(node_links, &doit_node_link_net_exits, (void *) &nec);
+
+ send_nodes_mon_msgs(NULL,
+ am_nodedown,
+ nodename,
+ flags & DFLAG_PUBLISHED ? am_visible : am_hidden,
+ reason == am_normal ? am_connection_closed : reason);
+
+ clear_dist_entry(dep);
+
+ }
+ return 1;
+}
+
+static Export*
+trap_function(Eterm func, int arity)
+{
+ return erts_export_put(am_erlang, func, arity);
+}
+
+void init_dist(void)
+{
+ init_nodes_monitors();
+
+ erts_smp_atomic_init(&no_caches, 0);
+
+ /* Lookup/Install all references to trap functions */
+ dsend2_trap = trap_function(am_dsend,2);
+ dsend3_trap = trap_function(am_dsend,3);
+ /* dsend_nosuspend_trap = trap_function(am_dsend_nosuspend,2);*/
+ dlink_trap = trap_function(am_dlink,1);
+ dunlink_trap = trap_function(am_dunlink,1);
+ dmonitor_node_trap = trap_function(am_dmonitor_node,3);
+ dgroup_leader_trap = trap_function(am_dgroup_leader,2);
+ dexit_trap = trap_function(am_dexit, 2);
+ dmonitor_p_trap = trap_function(am_dmonitor_p, 2);
+}
+
+#define ErtsDistOutputBuf2Binary(OB) \
+ ((Binary *) (((char *) (OB)) - offsetof(Binary, orig_bytes)))
+
+static ERTS_INLINE ErtsDistOutputBuf *
+alloc_dist_obuf(Uint size)
+{
+ ErtsDistOutputBuf *obuf;
+ Uint obuf_size = sizeof(ErtsDistOutputBuf)+sizeof(byte)*(size-1);
+ Binary *bin = erts_bin_drv_alloc(obuf_size);
+ bin->flags = BIN_FLAG_DRV;
+ erts_refc_init(&bin->refc, 1);
+ bin->orig_size = (long) obuf_size;
+ obuf = (ErtsDistOutputBuf *) &bin->orig_bytes[0];
+#ifdef DEBUG
+ obuf->dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN;
+ ASSERT(bin == ErtsDistOutputBuf2Binary(obuf));
+#endif
+ return obuf;
+}
+
+static ERTS_INLINE void
+free_dist_obuf(ErtsDistOutputBuf *obuf)
+{
+ Binary *bin = ErtsDistOutputBuf2Binary(obuf);
+ ASSERT(obuf->dbg_pattern == ERTS_DIST_OUTPUT_BUF_DBG_PATTERN);
+ if (erts_refc_dectest(&bin->refc, 0) == 0)
+ erts_bin_free(bin);
+}
+
+static ERTS_INLINE Sint
+size_obuf(ErtsDistOutputBuf *obuf)
+{
+ Binary *bin = ErtsDistOutputBuf2Binary(obuf);
+ return bin->orig_size;
+}
+
+static void clear_dist_entry(DistEntry *dep)
+{
+ Sint obufsize = 0;
+ ErtsAtomCache *cache;
+ ErtsProcList *suspendees;
+ ErtsDistOutputBuf *obuf;
+
+ erts_smp_de_rwlock(dep);
+ cache = dep->cache;
+ dep->cache = NULL;
+
+#ifdef DEBUG
+ erts_smp_de_links_lock(dep);
+ ASSERT(!dep->nlinks);
+ ASSERT(!dep->node_links);
+ ASSERT(!dep->monitors);
+ erts_smp_de_links_unlock(dep);
+#endif
+
+ erts_smp_spin_lock(&dep->qlock);
+
+ if (!dep->out_queue.last)
+ obuf = dep->finalized_out_queue.first;
+ else {
+ dep->out_queue.last->next = dep->finalized_out_queue.first;
+ obuf = dep->out_queue.first;
+ }
+
+ dep->out_queue.first = NULL;
+ dep->out_queue.last = NULL;
+ dep->finalized_out_queue.first = NULL;
+ dep->finalized_out_queue.last = NULL;
+ dep->status = 0;
+ suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL);
+
+ erts_smp_spin_unlock(&dep->qlock);
+ erts_smp_atomic_set(&dep->dist_cmd_scheduled, 0);
+ dep->send = NULL;
+ erts_smp_de_rwunlock(dep);
+
+ erts_resume_processes(suspendees);
+
+ delete_cache(cache);
+
+ while (obuf) {
+ ErtsDistOutputBuf *fobuf;
+ fobuf = obuf;
+ obuf = obuf->next;
+ obufsize += size_obuf(fobuf);
+ free_dist_obuf(fobuf);
+ }
+
+ if (obufsize) {
+ erts_smp_spin_lock(&dep->qlock);
+ ASSERT(dep->qsize >= obufsize);
+ dep->qsize -= obufsize;
+ erts_smp_spin_unlock(&dep->qlock);
+ }
+}
+
+/*
+ * The erts_dsig_send_*() functions implemented below, sends asynchronous
+ * distributed signals to other Erlang nodes. Before sending a distributed
+ * signal, you need to prepare the operation by calling erts_dsig_prepare()
+ * (see dist.h).
+ *
+ * Note that the distributed signal send operation is truly asynchronous,
+ * and the signal is not guaranteed to reach the receiver if the connection
+ * goes down before the signal has reached the receiver.
+ */
+
+/*
+** Send a DOP_LINK link message
+*/
+int
+erts_dsig_send_link(ErtsDSigData *dsdp, Eterm local, Eterm remote)
+{
+ Eterm ctl_heap[4];
+ Eterm ctl = TUPLE3(&ctl_heap[0], make_small(DOP_LINK), local, remote);
+
+ return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+}
+
+int
+erts_dsig_send_unlink(ErtsDSigData *dsdp, Eterm local, Eterm remote)
+{
+ Eterm ctl_heap[4];
+ Eterm ctl = TUPLE3(&ctl_heap[0], make_small(DOP_UNLINK), local, remote);
+
+ return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+}
+
+
+/* A local process that's beeing monitored by a remote one exits. We send:
+ {DOP_MONITOR_P_EXIT, Local pid or name, Remote pid, ref, reason},
+ which is rather sad as only the ref is needed, no pid's... */
+int
+erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
+ Eterm ref, Eterm reason)
+{
+ Eterm ctl;
+ Eterm ctl_heap[6];
+
+ ctl = TUPLE5(&ctl_heap[0], make_small(DOP_MONITOR_P_EXIT),
+ watched, watcher, ref, reason);
+
+#ifdef DEBUG
+ erts_smp_de_links_lock(dsdp->dep);
+ ASSERT(!erts_lookup_monitor(dsdp->dep->monitors, ref));
+ erts_smp_de_links_unlock(dsdp->dep);
+#endif
+
+ return dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+}
+
+/* We want to monitor a process (named or unnamed) on another node, we send:
+ {DOP_MONITOR_P, Local pid, Remote pid or name, Ref}, which is exactly what's
+ needed on the other side... */
+int
+erts_dsig_send_monitor(ErtsDSigData *dsdp, Eterm watcher, Eterm watched,
+ Eterm ref)
+{
+ Eterm ctl;
+ Eterm ctl_heap[5];
+
+ ctl = TUPLE4(&ctl_heap[0],
+ make_small(DOP_MONITOR_P),
+ watcher, watched, ref);
+
+ return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+}
+
+/* A local process monitoring a remote one wants to stop monitoring, either
+ because of a demonitor bif call or because the local process died. We send
+ {DOP_DEMONITOR_P, Local pid, Remote pid or name, ref}, which is once again
+ rather redundant as only the ref will be needed on the other side... */
+int
+erts_dsig_send_demonitor(ErtsDSigData *dsdp, Eterm watcher,
+ Eterm watched, Eterm ref, int force)
+{
+ Eterm ctl;
+ Eterm ctl_heap[5];
+
+ ctl = TUPLE4(&ctl_heap[0],
+ make_small(DOP_DEMONITOR_P),
+ watcher, watched, ref);
+
+ return dsig_send(dsdp, ctl, THE_NON_VALUE, force);
+}
+
+int
+erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
+{
+ Eterm ctl;
+ Eterm ctl_heap[5];
+ Eterm token = NIL;
+ Process *sender = dsdp->proc;
+
+ if (SEQ_TRACE_TOKEN(sender) != NIL) {
+ seq_trace_update_send(sender);
+ token = SEQ_TRACE_TOKEN(sender);
+ seq_trace_output(token, message, SEQ_TRACE_SEND, remote, sender);
+ }
+
+ if (token != NIL)
+ ctl = TUPLE4(&ctl_heap[0],
+ make_small(DOP_SEND_TT), am_Cookie, remote, token);
+ else
+ ctl = TUPLE3(&ctl_heap[0], make_small(DOP_SEND), am_Cookie, remote);
+ return dsig_send(dsdp, ctl, message, 0);
+}
+
+int
+erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
+{
+ Eterm ctl;
+ Eterm ctl_heap[6];
+ Eterm token = NIL;
+ Process *sender = dsdp->proc;
+
+ if (SEQ_TRACE_TOKEN(sender) != NIL) {
+ seq_trace_update_send(sender);
+ token = SEQ_TRACE_TOKEN(sender);
+ seq_trace_output(token, message, SEQ_TRACE_SEND, remote_name, sender);
+ }
+
+ if (token != NIL)
+ ctl = TUPLE5(&ctl_heap[0], make_small(DOP_REG_SEND_TT),
+ sender->id, am_Cookie, remote_name, token);
+ else
+ ctl = TUPLE4(&ctl_heap[0], make_small(DOP_REG_SEND),
+ sender->id, am_Cookie, remote_name);
+ return dsig_send(dsdp, ctl, message, 0);
+}
+
+/* local has died, deliver the exit signal to remote */
+int
+erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
+ Eterm reason, Eterm token)
+{
+ Eterm ctl;
+ Eterm ctl_heap[6];
+
+ if (token != NIL) {
+ seq_trace_update_send(dsdp->proc);
+ seq_trace_output_exit(token, reason, SEQ_TRACE_SEND, remote, local);
+ ctl = TUPLE5(&ctl_heap[0],
+ make_small(DOP_EXIT_TT), local, remote, token, reason);
+ } else {
+ ctl = TUPLE4(&ctl_heap[0], make_small(DOP_EXIT), local, remote, reason);
+ }
+ /* forced, i.e ignore busy */
+ return dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+}
+
+int
+erts_dsig_send_exit(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason)
+{
+ Eterm ctl_heap[5];
+ Eterm ctl = TUPLE4(&ctl_heap[0],
+ make_small(DOP_EXIT), local, remote, reason);
+ /* forced, i.e ignore busy */
+ return dsig_send(dsdp, ctl, THE_NON_VALUE, 1);
+}
+
+int
+erts_dsig_send_exit2(ErtsDSigData *dsdp, Eterm local, Eterm remote, Eterm reason)
+{
+ Eterm ctl_heap[5];
+ Eterm ctl = TUPLE4(&ctl_heap[0],
+ make_small(DOP_EXIT2), local, remote, reason);
+
+ return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+}
+
+
+int
+erts_dsig_send_group_leader(ErtsDSigData *dsdp, Eterm leader, Eterm remote)
+{
+ Eterm ctl_heap[4];
+ Eterm ctl = TUPLE3(&ctl_heap[0],
+ make_small(DOP_GROUP_LEADER), leader, remote);
+
+ return dsig_send(dsdp, ctl, THE_NON_VALUE, 0);
+}
+
+#if defined(PURIFY)
+# define PURIFY_MSG(msg) \
+ purify_printf("%s, line %d: %s", __FILE__, __LINE__, msg)
+#elif defined(VALGRIND)
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+
+# define PURIFY_MSG(msg) \
+ do { \
+ char buf__[1]; size_t bufsz__ = sizeof(buf__); \
+ if (erts_sys_getenv("VALGRIND_LOG_XML", buf__, &bufsz__) >= 0) { \
+ VALGRIND_PRINTF("<erlang_error_log>" \
+ "%s, line %d: %s</erlang_error_log>\n", \
+ __FILE__, __LINE__, msg); \
+ } else { \
+ VALGRIND_PRINTF("%s, line %d: %s", __FILE__, __LINE__, msg); \
+ } \
+ } while (0)
+#else
+# define PURIFY_MSG(msg)
+#endif
+
+/*
+** Input from distribution port.
+** Input follows the distribution protocol v4.5
+**
+** The protocol is a 4 byte header protocol
+** the DOP_DATA is stripped by driver_output
+**
+** assert hlen == 0 !!!
+*/
+int erts_net_message(Port *prt,
+ DistEntry *dep,
+ byte *hbuf,
+ int hlen,
+ byte *buf,
+ int len)
+{
+ ErtsDistExternal ede;
+ byte *t;
+ Sint ctl_len;
+ int orig_ctl_len;
+ Eterm arg;
+ Eterm from, to;
+ Eterm watcher, watched;
+ Eterm ref;
+ Eterm *tuple;
+ Eterm reason;
+ Process* rp;
+ Eterm ctl_default[64];
+ Eterm* ctl = ctl_default;
+ ErlOffHeap off_heap;
+ Eterm* hp;
+ Sint type;
+ Eterm token;
+ Eterm token_size;
+ ErtsMonitor *mon;
+ ErtsLink *lnk;
+ int res;
+#ifdef ERTS_DIST_MSG_DBG
+ int orig_len = len;
+#endif
+
+ /* Thanks to Luke Gorrie */
+ off_heap.mso = NULL;
+#ifndef HYBRID /* FIND ME! */
+ off_heap.funs = NULL;
+#endif
+ off_heap.overhead = 0;
+ off_heap.externals = NULL;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (!erts_is_alive)
+ return 0;
+ if (hlen > 0)
+ goto data_error;
+ if (len == 0) /* HANDLE TICK !!! */
+ return 0;
+
+#ifdef ERTS_RAW_DIST_MSG_DBG
+ erts_fprintf(stderr, "<< ");
+ bw(buf, len);
+#endif
+
+ if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
+ t = buf;
+ else {
+ /* Skip PASS_THROUGH */
+ t = buf+1;
+ len--;
+ }
+
+ if (len == 0) {
+ PURIFY_MSG("data error");
+ goto data_error;
+ }
+
+ res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache);
+
+ if (res >= 0)
+ res = ctl_len = erts_decode_dist_ext_size(&ede, 0);
+ else {
+#ifdef ERTS_DIST_MSG_DBG
+ erts_fprintf(stderr, "DIST MSG DEBUG: erts_prepare_dist_ext() failed:\n");
+ bw(buf, orig_len);
+#endif
+ ctl_len = 0;
+ }
+
+ if (res < 0) {
+#ifdef ERTS_DIST_MSG_DBG
+ erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n");
+ bw(buf, orig_len);
+#endif
+ PURIFY_MSG("data error");
+ goto data_error;
+ }
+ orig_ctl_len = ctl_len;
+ if (ctl_len > sizeof(ctl_default)/sizeof(ctl_default[0])) {
+ ctl = erts_alloc(ERTS_ALC_T_DCTRL_BUF, ctl_len * sizeof(Eterm));
+ }
+ hp = ctl;
+
+ arg = erts_decode_dist_ext(&hp, &off_heap, &ede);
+ if (is_non_value(arg)) {
+#ifdef ERTS_DIST_MSG_DBG
+ erts_fprintf(stderr, "DIST MSG DEBUG: erts_dist_ext_size(CTL) failed:\n");
+ bw(buf, orig_len);
+#endif
+ PURIFY_MSG("data error");
+ goto data_error;
+ }
+ ctl_len = t - buf;
+
+#ifdef ERTS_DIST_MSG_DBG
+ erts_fprintf(stderr, "<<%s CTL: %T\n", len != orig_len ? "P" : " ", arg);
+#endif
+
+ if (is_not_tuple(arg) ||
+ (tuple = tuple_val(arg), arityval(*tuple) < 1) ||
+ is_not_small(tuple[1])) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Invalid distribution message: %.200T", arg);
+ erts_send_error_to_logger_nogl(dsbufp);
+ goto data_error;
+ }
+
+ token_size = 0;
+
+ switch (type = unsigned_val(tuple[1])) {
+ case DOP_LINK:
+ from = tuple[2];
+ to = tuple[3]; /* local proc to link to */
+
+ if (is_not_pid(from) || is_not_pid(to)) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ PURIFY_MSG("data error");
+ erts_dsprintf(dsbufp,
+ "Invalid DOP_LINK distribution message: %.200T",
+ arg);
+ erts_send_error_to_logger_nogl(dsbufp);
+ goto data_error;
+ }
+
+ rp = erts_pid2proc_opt(NULL, 0,
+ to, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ if (!rp) {
+ /* This is tricky (we MUST force a distributed send) */
+ ErtsDSigData dsd;
+ int code;
+ code = erts_dsig_prepare(&dsd, dep, NULL, ERTS_DSP_NO_LOCK, 0);
+ if (code == ERTS_DSIG_PREP_CONNECTED) {
+ code = erts_dsig_send_exit(&dsd, to, from, am_noproc);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ break;
+ }
+
+ erts_smp_de_links_lock(dep);
+ res = erts_add_link(&(rp->nlinks), LINK_PID, from);
+
+ if (res < 0) {
+ /* It was already there! Lets skip the rest... */
+ erts_smp_de_links_unlock(dep);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ break;
+ }
+ lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->id);
+ erts_add_link(&(ERTS_LINK_ROOT(lnk)), LINK_PID, from);
+ erts_smp_de_links_unlock(dep);
+
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS))
+ trace_proc(NULL, rp, am_getting_linked, from);
+
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ break;
+
+ case DOP_UNLINK: {
+ ErtsDistLinkData dld;
+ from = tuple[2];
+ to = tuple[3];
+
+ rp = erts_pid2proc_opt(NULL, 0,
+ to, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ if (!rp)
+ break;
+
+ lnk = erts_remove_link(&(rp->nlinks), from);
+
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS) && lnk != NULL) {
+ trace_proc(NULL, rp, am_getting_unlinked, from);
+ }
+
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+
+ erts_remove_dist_link(&dld, to, from, dep);
+ erts_destroy_dist_link(&dld);
+ if (lnk)
+ erts_destroy_link(lnk);
+ break;
+ }
+
+ case DOP_MONITOR_P: {
+ /* A remote process wants to monitor us, we get:
+ {DOP_MONITOR_P, Remote pid, local pid or name, ref} */
+ Eterm name;
+
+ watcher = tuple[2];
+ watched = tuple[3]; /* local proc to monitor */
+ ref = tuple[4];
+
+ if (is_atom(watched)) {
+ name = watched;
+ rp = erts_whereis_process(NULL, 0,
+ watched, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ }
+ else {
+ name = NIL;
+ rp = erts_pid2proc_opt(NULL, 0,
+ watched, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ }
+
+ if (!rp) {
+ ErtsDSigData dsd;
+ int code;
+ code = erts_dsig_prepare(&dsd, dep, NULL, ERTS_DSP_NO_LOCK, 0);
+ if (code == ERTS_DSIG_PREP_CONNECTED) {
+ code = erts_dsig_send_m_exit(&dsd, watcher, watched, ref,
+ am_noproc);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ }
+ else {
+ if (is_atom(watched))
+ watched = rp->id;
+ erts_smp_de_links_lock(dep);
+ erts_add_monitor(&(dep->monitors), MON_ORIGIN, ref, watched, name);
+ erts_add_monitor(&(rp->monitors), MON_TARGET, ref, watcher, name);
+ erts_smp_de_links_unlock(dep);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ }
+
+ break;
+ }
+
+ case DOP_DEMONITOR_P:
+ /* A remote node informs us that a local pid in no longer monitored
+ We get {DOP_DEMONITOR_P, Remote pid, Local pid or name, ref},
+ We need only the ref of course */
+
+ /* watcher = tuple[2]; */
+ /* watched = tuple[3]; May be an atom in case of monitor name */
+ ref = tuple[4];
+
+ erts_smp_de_links_lock(dep);
+ mon = erts_remove_monitor(&(dep->monitors),ref);
+ erts_smp_de_links_unlock(dep);
+ /* ASSERT(mon != NULL); can happen in case of broken dist message */
+ if (mon == NULL) {
+ break;
+ }
+ watched = mon->pid;
+ erts_destroy_monitor(mon);
+ rp = erts_pid2proc_opt(NULL, 0,
+ watched, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ if (!rp) {
+ break;
+ }
+ mon = erts_remove_monitor(&(rp->monitors),ref);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ ASSERT(mon != NULL);
+ if (mon == NULL) {
+ break;
+ }
+ erts_destroy_monitor(mon);
+ break;
+
+ case DOP_NODE_LINK: /* XXX never sent ?? */
+ break;
+
+ case DOP_REG_SEND_TT:
+ token_size = size_object(tuple[5]);
+ /* Fall through ... */
+ case DOP_REG_SEND:
+ /* {DOP_REG_SEND, From, Cookie, ToName} -- Message */
+ /* {DOP_REG_SEND_TT, From, Cookie, ToName, TraceToken} -- Message */
+
+ /*
+ * There is intentionally no testing of the cookie (it is always '')
+ * from R9B and onwards.
+ */
+#ifdef ERTS_DIST_MSG_DBG
+ dist_msg_dbg(&ede, "MSG", buf, orig_len);
+#endif
+
+ from = tuple[2];
+ to = tuple[4];
+ rp = erts_whereis_process(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ if (rp) {
+ Uint xsize = (type == DOP_REG_SEND
+ ? 0
+ : ERTS_HEAP_FRAG_SIZE(token_size));
+ ErtsProcLocks locks = 0;
+ ErtsDistExternal *ede_copy;
+
+ ede_copy = erts_make_dist_ext_copy(&ede, xsize);
+ if (type == DOP_REG_SEND) {
+ token = NIL;
+ } else {
+ ErlHeapFragment *heap_frag;
+ ErlOffHeap *ohp;
+ ASSERT(xsize);
+ heap_frag = erts_dist_ext_trailer(ede_copy);
+ ERTS_INIT_HEAP_FRAG(heap_frag, token_size);
+ hp = heap_frag->mem;
+ ohp = &heap_frag->off_heap;
+ token = tuple[5];
+ token = copy_struct(token, token_size, &hp, ohp);
+ }
+
+ erts_queue_dist_message(rp, &locks, ede_copy, token);
+ if (locks)
+ erts_smp_proc_unlock(rp, locks);
+ erts_smp_proc_dec_refc(rp);
+ }
+ break;
+
+ case DOP_SEND_TT:
+ token_size = size_object(tuple[4]);
+ /* Fall through ... */
+ case DOP_SEND:
+ /*
+ * There is intentionally no testing of the cookie (it is always '')
+ * from R9B and onwards.
+ */
+#ifdef ERTS_DIST_MSG_DBG
+ dist_msg_dbg(&ede, "MSG", buf, orig_len);
+#endif
+
+ to = tuple[3];
+ rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ if (rp) {
+ Uint xsize = type == DOP_SEND ? 0 : ERTS_HEAP_FRAG_SIZE(token_size);
+ ErtsProcLocks locks = 0;
+ ErtsDistExternal *ede_copy;
+
+ ede_copy = erts_make_dist_ext_copy(&ede, xsize);
+ if (type == DOP_SEND) {
+ token = NIL;
+ } else {
+ ErlHeapFragment *heap_frag;
+ ErlOffHeap *ohp;
+ ASSERT(xsize);
+ heap_frag = erts_dist_ext_trailer(ede_copy);
+ ERTS_INIT_HEAP_FRAG(heap_frag, token_size);
+ hp = heap_frag->mem;
+ ohp = &heap_frag->off_heap;
+ token = tuple[4];
+ token = copy_struct(token, token_size, &hp, ohp);
+ }
+
+ erts_queue_dist_message(rp, &locks, ede_copy, token);
+ if (locks)
+ erts_smp_proc_unlock(rp, locks);
+ erts_smp_proc_dec_refc(rp);
+ }
+ break;
+
+ case DOP_MONITOR_P_EXIT: {
+ /* We are monitoring a process on the remote node which dies, we get
+ {DOP_MONITOR_P_EXIT, Remote pid or name, Local pid, ref, reason} */
+
+
+ Eterm lhp[3];
+ Eterm sysname;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_MSG_SEND|ERTS_PROC_LOCK_LINK;
+
+ /* watched = tuple[2]; */ /* remote proc which died */
+ /* watcher = tuple[3]; */
+ ref = tuple[4];
+ reason = tuple[5];
+
+ erts_smp_de_links_lock(dep);
+ sysname = dep->sysname;
+ mon = erts_remove_monitor(&(dep->monitors), ref);
+ /*
+ * If demonitor was performed at the same time as the
+ * monitored process exits, monitoring side will have
+ * removed info about monitor. In this case, do nothing
+ * and everything will be as it should.
+ */
+ erts_smp_de_links_unlock(dep);
+ if (mon == NULL) {
+ break;
+ }
+ rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ if (rp == NULL) {
+ break;
+ }
+
+ erts_destroy_monitor(mon);
+
+ mon = erts_remove_monitor(&(rp->monitors),ref);
+
+ if (mon == NULL) {
+ erts_smp_proc_unlock(rp, rp_locks);
+ break;
+ }
+
+ watched = (is_not_nil(mon->name)
+ ? TUPLE2(&lhp[0], mon->name, sysname)
+ : mon->pid);
+
+ erts_queue_monitor_message(rp, &rp_locks,
+ ref, am_process, watched, reason);
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_destroy_monitor(mon);
+ break;
+ }
+
+ case DOP_EXIT_TT:
+ case DOP_EXIT: {
+ ErtsDistLinkData dld;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
+ /* 'from', which 'to' is linked to, died */
+ if (type == DOP_EXIT) {
+ from = tuple[2];
+ to = tuple[3];
+ reason = tuple[4];
+ token = NIL;
+ } else {
+ from = tuple[2];
+ to = tuple[3];
+ token = tuple[4];
+ reason = tuple[5];
+ }
+ if (is_not_internal_pid(to)) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ PURIFY_MSG("data error");
+ erts_dsprintf(dsbufp,
+ "Invalid DOP_EXIT distribution message: %.200T",
+ arg);
+ erts_send_error_to_logger_nogl(dsbufp);
+ goto data_error;
+ }
+
+ rp = erts_pid2proc(NULL, 0, to, rp_locks);
+ if (!rp)
+ lnk = NULL;
+ else {
+ lnk = erts_remove_link(&(rp->nlinks), from);
+
+ /* If lnk == NULL, we have unlinked on this side, i.e.
+ * ignore exit.
+ */
+ if (lnk) {
+ int xres;
+#if 0
+ /* Arndt: Maybe it should never be 'kill', but it can be,
+ namely when a linked process does exit(kill). Until we know
+ whether that is incorrect and what should happen instead,
+ we leave the assertion out. */
+ ASSERT(reason != am_kill); /* should never be kill (killed) */
+#endif
+ xres = erts_send_exit_signal(NULL,
+ from,
+ rp,
+ &rp_locks,
+ reason,
+ token,
+ NULL,
+ ERTS_XSIG_FLG_IGN_KILL);
+ if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ /* We didn't exit the process and it is traced */
+ trace_proc(NULL, rp, am_getting_unlinked, from);
+ }
+ }
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ erts_remove_dist_link(&dld, to, from, dep);
+ if (lnk)
+ erts_destroy_link(lnk);
+ erts_destroy_dist_link(&dld);
+ break;
+ }
+ case DOP_EXIT2_TT:
+ case DOP_EXIT2: {
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ /* 'from' is send an exit signal to 'to' */
+ if (type == DOP_EXIT2) {
+ from = tuple[2];
+ to = tuple[3];
+ reason = tuple[4];
+ token = NIL;
+ } else {
+ from = tuple[2];
+ to = tuple[3];
+ token = tuple[4];
+ reason = tuple[5];
+ }
+ rp = erts_pid2proc_opt(NULL, 0, to, rp_locks,
+ ERTS_P2P_FLG_SMP_INC_REFC);
+ if (rp) {
+ (void) erts_send_exit_signal(NULL,
+ from,
+ rp,
+ &rp_locks,
+ reason,
+ token,
+ NULL,
+ 0);
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+ }
+ break;
+ }
+ case DOP_GROUP_LEADER:
+ from = tuple[2]; /* Group leader */
+ to = tuple[3]; /* new member */
+ if (is_not_pid(from))
+ break;
+
+ rp = erts_pid2proc(NULL, 0, to, ERTS_PROC_LOCK_MAIN);
+ if (!rp)
+ break;
+ rp->group_leader = STORE_NC_IN_PROC(rp, from);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ break;
+
+ default: {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "Illegal value in distribution dispatch switch: "
+ "%.200T",
+ arg);
+ erts_send_error_to_logger_nogl(dsbufp);
+ PURIFY_MSG("data error");
+ goto data_error;
+ }
+ }
+
+ if (off_heap.mso) {
+ erts_cleanup_mso(off_heap.mso);
+ }
+ if (off_heap.externals) {
+ erts_cleanup_externals(off_heap.externals);
+ }
+#ifndef HYBRID /* FIND ME! */
+ if (off_heap.funs) {
+ erts_cleanup_funs(off_heap.funs);
+ }
+ if (ctl != ctl_default) {
+ erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
+ }
+#endif
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ return 0;
+
+ data_error:
+ if (off_heap.mso) {
+ erts_cleanup_mso(off_heap.mso);
+ }
+ if (off_heap.externals) {
+ erts_cleanup_externals(off_heap.externals);
+ }
+#ifndef HYBRID /* FIND ME! */
+ if (off_heap.funs) {
+ erts_cleanup_funs(off_heap.funs);
+ }
+ if (ctl != ctl_default) {
+ erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
+ }
+#endif
+ erts_do_exit_port(prt, dep->cid, am_killed);
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ return -1;
+}
+
+#define ERTS_DE_BUSY_LIMIT (128*1024)
+
+static int
+dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
+{
+ Eterm cid;
+ int suspended = 0;
+ int resume = 0;
+ Uint32 pass_through_size;
+ Uint data_size, dhdr_ext_size;
+ ErtsAtomCacheMap *acmp;
+ ErtsDistOutputBuf *obuf;
+ DistEntry *dep = dsdp->dep;
+ Uint32 flags = dep->flags;
+ Process *c_p = dsdp->proc;
+
+ if (!c_p || dsdp->no_suspend)
+ force_busy = 1;
+
+ ERTS_SMP_LC_ASSERT(!c_p
+ || (ERTS_PROC_LOCK_MAIN
+ == erts_proc_lc_my_proc_locks(c_p)));
+
+ if (!erts_is_alive)
+ return ERTS_DSIG_SEND_OK;
+
+ if (flags & DFLAG_DIST_HDR_ATOM_CACHE) {
+ acmp = erts_get_atom_cache_map(c_p);
+ pass_through_size = 0;
+ }
+ else {
+ acmp = NULL;
+ pass_through_size = 1;
+ }
+
+#ifdef ERTS_DIST_MSG_DBG
+ erts_fprintf(stderr, ">>%s CTL: %T\n", pass_through_size ? "P" : " ", ctl);
+ if (is_value(msg))
+ erts_fprintf(stderr, " MSG: %T\n", msg);
+#endif
+
+ data_size = pass_through_size;
+ erts_reset_atom_cache_map(acmp);
+ data_size += erts_encode_dist_ext_size(ctl, flags, acmp);
+ if (is_value(msg))
+ data_size += erts_encode_dist_ext_size(msg, flags, acmp);
+ erts_finalize_atom_cache_map(acmp);
+
+ dhdr_ext_size = erts_encode_ext_dist_header_size(acmp);
+ data_size += dhdr_ext_size;
+
+ obuf = alloc_dist_obuf(data_size);
+ obuf->ext_endp = &obuf->data[0] + pass_through_size + dhdr_ext_size;
+
+ /* Encode internal version of dist header */
+ obuf->extp = erts_encode_ext_dist_header_setup(obuf->ext_endp, acmp);
+ /* Encode control message */
+ erts_encode_dist_ext(ctl, &obuf->ext_endp, flags, acmp);
+ if (is_value(msg)) {
+ /* Encode message */
+ erts_encode_dist_ext(msg, &obuf->ext_endp, flags, acmp);
+ }
+
+ ASSERT(obuf->extp < obuf->ext_endp);
+ ASSERT(&obuf->data[0] <= obuf->extp - pass_through_size);
+ ASSERT(obuf->ext_endp <= &obuf->data[0] + data_size);
+
+ data_size = obuf->ext_endp - obuf->extp;
+
+ /*
+ * Signal encoded; now verify that the connection still exists,
+ * and if so enqueue the signal and schedule it for send.
+ */
+ obuf->next = NULL;
+ erts_smp_de_rlock(dep);
+ cid = dep->cid;
+ if (cid != dsdp->cid
+ || dep->connection_id != dsdp->connection_id
+ || dep->status & ERTS_DE_SFLG_EXITING) {
+ /* Not the same connection as when we started; drop message... */
+ erts_smp_de_runlock(dep);
+ free_dist_obuf(obuf);
+ }
+ else {
+ ErtsProcList *plp = NULL;
+ erts_smp_spin_lock(&dep->qlock);
+ dep->qsize += size_obuf(obuf);
+ if (dep->qsize >= ERTS_DE_BUSY_LIMIT)
+ dep->qflgs |= ERTS_DE_QFLG_BUSY;
+ if (!force_busy && (dep->qflgs & ERTS_DE_QFLG_BUSY)) {
+ erts_smp_spin_unlock(&dep->qlock);
+
+ plp = erts_proclist_create(c_p);
+ plp->next = NULL;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ suspended = 1;
+ erts_smp_spin_lock(&dep->qlock);
+ }
+
+ /* Enqueue obuf on dist entry */
+ if (dep->out_queue.last)
+ dep->out_queue.last->next = obuf;
+ else
+ dep->out_queue.first = obuf;
+ dep->out_queue.last = obuf;
+
+ if (!force_busy) {
+ if (!(dep->qflgs & ERTS_DE_QFLG_BUSY)) {
+ if (suspended)
+ resume = 1; /* was busy when we started, but isn't now */
+ }
+ else {
+ /* Enqueue suspended process on dist entry */
+ ASSERT(plp);
+ if (dep->suspended.last)
+ dep->suspended.last->next = plp;
+ else
+ dep->suspended.first = plp;
+ dep->suspended.last = plp;
+ }
+ }
+
+ erts_smp_spin_unlock(&dep->qlock);
+ erts_schedule_dist_command(NULL, dep);
+ erts_smp_de_runlock(dep);
+
+ if (resume) {
+ erts_resume(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proclist_destroy(plp);
+ /*
+ * Note that the calling process still have to yield as if it
+ * suspended. If not, the calling process could later be
+ * erroneously scheduled when it shouldn't be.
+ */
+ }
+ }
+
+ if (c_p) {
+ int reds;
+ /*
+ * Bump reductions on calling process.
+ *
+ * This is the reduction cost: Always a base cost of 8 reductions
+ * plus 16 reductions per kilobyte generated external data.
+ */
+
+ data_size >>= (10-4);
+#if defined(ARCH_64)
+ data_size &= 0x003fffffffffffff;
+#elif defined(ARCH_32)
+ data_size &= 0x003fffff;
+#else
+# error "Ohh come on ... !?!"
+#endif
+ reds = 8 + ((int) data_size > 1000000 ? 1000000 : (int) data_size);
+ BUMP_REDS(c_p, reds);
+ }
+
+ if (suspended) {
+ if (!resume && erts_system_monitor_flags.busy_dist_port)
+ monitor_generic(c_p, am_busy_dist_port, cid);
+ return ERTS_DSIG_SEND_YIELD;
+ }
+ return ERTS_DSIG_SEND_OK;
+}
+
+
+static Uint
+dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
+{
+ int fpe_was_unmasked;
+ Uint size = obuf->ext_endp - obuf->extp;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (size > (Uint) INT_MAX)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Absurdly large distribution output data buffer "
+ "(%bpu bytes) passed.\n",
+ size);
+
+ prt->caller = NIL;
+ fpe_was_unmasked = erts_block_fpe();
+ (*prt->drv_ptr->output)((ErlDrvData) prt->drv_data,
+ (char*) obuf->extp,
+ (int) size);
+ erts_unblock_fpe(fpe_was_unmasked);
+ return size;
+}
+
+static Uint
+dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
+{
+ int fpe_was_unmasked;
+ Uint size = obuf->ext_endp - obuf->extp;
+ SysIOVec iov[2];
+ ErlDrvBinary* bv[2];
+ ErlIOVec eiov;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (size > (Uint) INT_MAX)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Absurdly large distribution output data buffer "
+ "(%bpu bytes) passed.\n",
+ size);
+
+ iov[0].iov_base = NULL;
+ iov[0].iov_len = 0;
+ bv[0] = NULL;
+
+ iov[1].iov_base = obuf->extp;
+ iov[1].iov_len = size;
+ bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf));
+
+ eiov.vsize = 2;
+ eiov.size = size;
+ eiov.iov = iov;
+ eiov.binv = bv;
+
+ ASSERT(prt->drv_ptr->outputv);
+
+ prt->caller = NIL;
+ fpe_was_unmasked = erts_block_fpe();
+ (*prt->drv_ptr->outputv)((ErlDrvData) prt->drv_data, &eiov);
+ erts_unblock_fpe(fpe_was_unmasked);
+
+ return size;
+}
+
+
+#if defined(ARCH_64)
+#define ERTS_PORT_REDS_MASK__ 0x003fffffffffffffL
+#elif defined(ARCH_32)
+#define ERTS_PORT_REDS_MASK__ 0x003fffff
+#else
+# error "Ohh come on ... !?!"
+#endif
+
+#define ERTS_PORT_REDS_DIST_CMD_START 5
+#define ERTS_PORT_REDS_DIST_CMD_FINALIZE 3
+#define ERTS_PORT_REDS_DIST_CMD_EXIT 200
+#define ERTS_PORT_REDS_DIST_CMD_RESUMED 5
+#define ERTS_PORT_REDS_DIST_CMD_DATA(SZ) \
+ ((SZ) < (1 << 10) \
+ ? ((Sint) 1) \
+ : ((((Sint) (SZ)) >> 10) & ((Sint) ERTS_PORT_REDS_MASK__)))
+
+int
+erts_dist_command(Port *prt, int reds_limit)
+{
+ Sint reds = ERTS_PORT_REDS_DIST_CMD_START;
+ int prt_busy;
+ int de_busy;
+ Uint32 status;
+ Uint32 flags;
+ Uint32 qflgs;
+ Sint obufsize = 0;
+ ErtsDistOutputQueue oq, foq;
+ DistEntry *dep = prt->dist_entry;
+ Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ erts_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be
+ removed if port command fails */
+
+ erts_smp_atomic_xchg(&dep->dist_cmd_scheduled, 0);
+
+ erts_smp_de_rlock(dep);
+ flags = dep->flags;
+ status = dep->status;
+ send = dep->send;
+ erts_smp_de_runlock(dep);
+
+ if (status & ERTS_DE_SFLG_EXITING) {
+ erts_do_exit_port(prt, prt->id, am_killed);
+ erts_deref_dist_entry(dep);
+ return reds + ERTS_PORT_REDS_DIST_CMD_EXIT;
+ }
+
+ ASSERT(send);
+
+ /*
+ * We need to remove both out queues from the
+ * dist entry while passing it to port command;
+ * otherwise, port command will free the buffers
+ * in the queues on failure and we'll end up with
+ * a mess.
+ */
+
+ erts_smp_spin_lock(&dep->qlock);
+ oq.first = dep->out_queue.first;
+ oq.last = dep->out_queue.last;
+ dep->out_queue.first = NULL;
+ dep->out_queue.last = NULL;
+ qflgs = dep->qflgs;
+ erts_smp_spin_unlock(&dep->qlock);
+
+ foq.first = dep->finalized_out_queue.first;
+ foq.last = dep->finalized_out_queue.last;
+ dep->finalized_out_queue.first = NULL;
+ dep->finalized_out_queue.last = NULL;
+
+ if (reds > reds_limit)
+ goto preempted;
+
+ prt_busy = (int) (prt->status & ERTS_PORT_SFLG_PORT_BUSY);
+ de_busy = (int) (qflgs & ERTS_DE_QFLG_BUSY);
+
+ if (prt_busy) {
+ if (!de_busy) {
+ erts_smp_spin_lock(&dep->qlock);
+ dep->qflgs |= ERTS_DE_QFLG_BUSY;
+ erts_smp_spin_unlock(&dep->qlock);
+ de_busy = 1;
+ }
+ }
+ else if (foq.first) {
+ int preempt = 0;
+ do {
+ Uint size;
+ ErtsDistOutputBuf *fob;
+
+ size = (*send)(prt, foq.first);
+#ifdef ERTS_RAW_DIST_MSG_DBG
+ erts_fprintf(stderr, ">> ");
+ bw(foq.first->extp, size);
+#endif
+ reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ fob = foq.first;
+ obufsize += size_obuf(fob);
+ foq.first = foq.first->next;
+ free_dist_obuf(fob);
+ preempt = reds > reds_limit || (prt->status & ERTS_PORT_SFLGS_DEAD);
+ if (prt->status & ERTS_PORT_SFLG_PORT_BUSY) {
+ erts_smp_spin_lock(&dep->qlock);
+ dep->qflgs |= ERTS_DE_QFLG_BUSY;
+ erts_smp_spin_unlock(&dep->qlock);
+ de_busy = prt_busy = 1;
+ break;
+ }
+ } while (foq.first && !preempt);
+ if (!foq.first)
+ foq.last = NULL;
+ if (preempt)
+ goto preempted;
+ }
+
+ if (prt_busy) {
+ if (oq.first) {
+ ErtsDistOutputBuf *ob;
+ int preempt;
+ finalize_only:
+ preempt = 0;
+ ob = oq.first;
+ ASSERT(ob);
+ do {
+ ob->extp = erts_encode_ext_dist_header_finalize(ob->extp,
+ dep->cache);
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ *--ob->extp = PASS_THROUGH; /* Old node; 'pass through'
+ needed */
+ ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp);
+ reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
+ preempt = reds > reds_limit;
+ if (preempt)
+ break;
+ ob = ob->next;
+ } while (ob);
+ /*
+ * At least one buffer was finalized; if we got preempted,
+ * ob points to the last buffer that we finalized.
+ */
+ if (foq.last)
+ foq.last->next = oq.first;
+ else
+ foq.first = oq.first;
+ if (!preempt) {
+ /* All buffers finalized */
+ foq.last = oq.last;
+ oq.first = oq.last = NULL;
+ }
+ else {
+ /* Not all buffers finalized; split oq. */
+ foq.last = ob;
+ oq.first = ob->next;
+ if (oq.first)
+ ob->next = NULL;
+ else
+ oq.last = NULL;
+ }
+ if (preempt)
+ goto preempted;
+ }
+ }
+ else {
+ int preempt = 0;
+ while (oq.first && !preempt) {
+ ErtsDistOutputBuf *fob;
+ Uint size;
+ oq.first->extp
+ = erts_encode_ext_dist_header_finalize(oq.first->extp,
+ dep->cache);
+ reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE;
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+ *--oq.first->extp = PASS_THROUGH; /* Old node; 'pass through'
+ needed */
+ ASSERT(&oq.first->data[0] <= oq.first->extp
+ && oq.first->extp < oq.first->ext_endp);
+ size = (*send)(prt, oq.first);
+#ifdef ERTS_RAW_DIST_MSG_DBG
+ erts_fprintf(stderr, ">> ");
+ bw(oq.first->extp, size);
+#endif
+ reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ fob = oq.first;
+ obufsize += size_obuf(fob);
+ oq.first = oq.first->next;
+ free_dist_obuf(fob);
+ preempt = reds > reds_limit || (prt->status & ERTS_PORT_SFLGS_DEAD);
+ if (prt->status & ERTS_PORT_SFLG_PORT_BUSY) {
+ erts_smp_spin_lock(&dep->qlock);
+ dep->qflgs |= ERTS_DE_QFLG_BUSY;
+ erts_smp_spin_unlock(&dep->qlock);
+ de_busy = prt_busy = 1;
+ if (oq.first && !preempt)
+ goto finalize_only;
+ }
+ }
+
+ ASSERT(!oq.first || preempt);
+
+ /*
+ * Preempt if not all buffers have been handled.
+ */
+ if (preempt && oq.first)
+ goto preempted;
+
+#ifdef DEBUG
+ oq.last = NULL;
+#endif
+ ASSERT(!oq.first);
+ ASSERT(!foq.first && !foq.last);
+
+ /*
+ * Everything that was buffered when we started have now been
+ * written to the port. If port isn't busy but dist entry is
+ * and we havn't got too muched queued on dist entry, set
+ * dist entry in a non-busy state and resume suspended
+ * processes.
+ */
+ erts_smp_spin_lock(&dep->qlock);
+ ASSERT(dep->qsize >= obufsize);
+ dep->qsize -= obufsize;
+ obufsize = 0;
+ if (de_busy && !prt_busy && dep->qsize < ERTS_DE_BUSY_LIMIT) {
+ ErtsProcList *suspendees;
+ int resumed;
+ suspendees = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY);
+ erts_smp_spin_unlock(&dep->qlock);
+
+ resumed = erts_resume_processes(suspendees);
+ reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED;
+ de_busy = 0;
+ }
+ else
+ erts_smp_spin_unlock(&dep->qlock);
+ }
+
+ ASSERT(!oq.first && !oq.last);
+
+ done:
+
+ if (obufsize != 0) {
+ ASSERT(obufsize > 0);
+ erts_smp_spin_lock(&dep->qlock);
+ ASSERT(dep->qsize >= obufsize);
+ dep->qsize -= obufsize;
+ erts_smp_spin_unlock(&dep->qlock);
+ }
+
+ ASSERT(foq.first || !foq.last);
+ ASSERT(!foq.first || foq.last);
+ ASSERT(!dep->finalized_out_queue.first);
+ ASSERT(!dep->finalized_out_queue.last);
+
+ if (foq.first) {
+ dep->finalized_out_queue.first = foq.first;
+ dep->finalized_out_queue.last = foq.last;
+ }
+
+ /* Avoid wrapping reduction counter... */
+ if (reds > INT_MAX/2)
+ reds = INT_MAX/2;
+
+ erts_deref_dist_entry(dep);
+
+ return reds;
+
+ preempted:
+
+ ASSERT(oq.first || !oq.last);
+ ASSERT(!oq.first || oq.last);
+
+ if (prt->status & ERTS_PORT_SFLGS_DEAD) {
+ /*
+ * Port died during port command; clean up 'oq'
+ * and 'foq'. Things buffered in dist entry after
+ * we begun processing the queues have already been
+ * cleaned up when port terminated.
+ */
+
+ if (oq.first)
+ oq.last->next = foq.first;
+ else
+ oq.first = foq.first;
+
+ while (oq.first) {
+ ErtsDistOutputBuf *fob = oq.first;
+ oq.first = oq.first->next;
+ obufsize += size_obuf(fob);
+ free_dist_obuf(fob);
+ }
+
+ foq.first = NULL;
+ foq.last = NULL;
+
+#ifdef DEBUG
+ erts_smp_spin_lock(&dep->qlock);
+ ASSERT(dep->qsize == obufsize);
+ erts_smp_spin_unlock(&dep->qlock);
+#endif
+ }
+ else {
+ if (oq.first) {
+ /*
+ * Unhandle buffers need to be put back first
+ * in out_queue.
+ */
+ erts_smp_spin_lock(&dep->qlock);
+ dep->qsize -= obufsize;
+ obufsize = 0;
+ oq.last->next = dep->out_queue.first;
+ dep->out_queue.first = oq.first;
+ if (!dep->out_queue.last)
+ dep->out_queue.last = oq.last;
+ erts_smp_spin_unlock(&dep->qlock);
+ }
+
+ erts_schedule_dist_command(prt, NULL);
+ }
+ goto done;
+}
+
+void
+erts_dist_port_not_busy(Port *prt)
+{
+ erts_schedule_dist_command(prt, NULL);
+}
+
+void
+erts_kill_dist_connection(DistEntry *dep, Uint32 connection_id)
+{
+ erts_smp_de_rwlock(dep);
+ if (is_internal_port(dep->cid)
+ && connection_id == dep->connection_id
+ && !(dep->status & ERTS_DE_SFLG_EXITING)) {
+
+ dep->status |= ERTS_DE_SFLG_EXITING;
+
+ erts_smp_spin_lock(&dep->qlock);
+ ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT));
+ dep->qflgs |= ERTS_DE_QFLG_EXIT;
+ erts_smp_spin_unlock(&dep->qlock);
+
+ erts_schedule_dist_command(NULL, dep);
+ }
+ erts_smp_de_rwunlock(dep);
+}
+
+struct print_to_data {
+ int to;
+ void *arg;
+};
+
+static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp)
+{
+ int to = ((struct print_to_data *) vptdp)->to;
+ void *arg = ((struct print_to_data *) vptdp)->arg;
+ Process *rp;
+ ErtsMonitor *rmon;
+ rp = erts_pid2proc_unlocked(mon->pid);
+ if (!rp || (rmon = erts_lookup_monitor(rp->monitors, mon->ref)) == NULL) {
+ erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->pid);
+ } else if (mon->type == MON_ORIGIN) {
+ /* Local pid is being monitored */
+ erts_print(to, arg, "Remotely monitored by: %T %T\n",
+ mon->pid, rmon->pid);
+ } else {
+ erts_print(to, arg, "Remote monitoring: %T ", mon->pid);
+ if (is_not_atom(rmon->pid))
+ erts_print(to, arg, "%T\n", rmon->pid);
+ else
+ erts_print(to, arg, "{%T, %T}\n",
+ rmon->name,
+ rmon->pid); /* which in this case is the
+ remote system name... */
+ }
+}
+
+static void print_monitor_info(int to, void *arg, ErtsMonitor *mon)
+{
+ struct print_to_data ptd = {to, arg};
+ erts_doforall_monitors(mon,&doit_print_monitor_info,&ptd);
+}
+
+typedef struct {
+ struct print_to_data *ptdp;
+ Eterm from;
+} PrintLinkContext;
+
+static void doit_print_link_info2(ErtsLink *lnk, void *vpplc)
+{
+ PrintLinkContext *pplc = (PrintLinkContext *) vpplc;
+ erts_print(pplc->ptdp->to, pplc->ptdp->arg, "Remote link: %T %T\n",
+ pplc->from, lnk->pid);
+}
+
+static void doit_print_link_info(ErtsLink *lnk, void *vptdp)
+{
+ if (is_internal_pid(lnk->pid) && erts_pid2proc_unlocked(lnk->pid)) {
+ PrintLinkContext plc = {(struct print_to_data *) vptdp, lnk->pid};
+ erts_doforall_links(ERTS_LINK_ROOT(lnk), &doit_print_link_info2, &plc);
+ }
+}
+
+static void print_link_info(int to, void *arg, ErtsLink *lnk)
+{
+ struct print_to_data ptd = {to, arg};
+ erts_doforall_links(lnk, &doit_print_link_info, (void *) &ptd);
+}
+
+typedef struct {
+ struct print_to_data ptd;
+ Eterm sysname;
+} PrintNodeLinkContext;
+
+
+static void doit_print_nodelink_info(ErtsLink *lnk, void *vpcontext)
+{
+ PrintNodeLinkContext *pcontext = vpcontext;
+
+ if (is_internal_pid(lnk->pid) && erts_pid2proc_unlocked(lnk->pid))
+ erts_print(pcontext->ptd.to, pcontext->ptd.arg,
+ "Remote monitoring: %T %T\n", lnk->pid, pcontext->sysname);
+}
+
+static void print_nodelink_info(int to, void *arg, ErtsLink *lnk, Eterm sysname)
+{
+ PrintNodeLinkContext context = {{to, arg}, sysname};
+ erts_doforall_links(lnk, &doit_print_nodelink_info, &context);
+}
+
+
+static int
+info_dist_entry(int to, void *arg, DistEntry *dep, int visible, int connected)
+{
+
+ if (visible && connected) {
+ erts_print(to, arg, "=visible_node:");
+ } else if (connected) {
+ erts_print(to, arg, "=hidden_node:");
+ } else {
+ erts_print(to, arg, "=not_connected:");
+ }
+ erts_print(to, arg, "%d\n", dist_entry_channel_no(dep));
+
+ if(connected && is_nil(dep->cid)) {
+ erts_print(to, arg,
+ "Error: Not connected node still registered as connected:%T\n",
+ dep->sysname);
+ return 0;
+ }
+
+ if(!connected && is_not_nil(dep->cid)) {
+ erts_print(to, arg,
+ "Error: Connected node not registered as connected:%T\n",
+ dep->sysname);
+ return 0;
+ }
+
+ erts_print(to, arg, "Name: %T", dep->sysname);
+#ifdef DEBUG
+ erts_print(to, arg, " (refc=%d)", erts_refc_read(&dep->refc, 1));
+#endif
+ erts_print(to, arg, "\n");
+ if (!connected && is_nil(dep->cid)) {
+ if (dep->nlinks) {
+ erts_print(to, arg, "Error: Got links to not connected node:%T\n",
+ dep->sysname);
+ }
+ return 0;
+ }
+
+ erts_print(to, arg, "Controller: %T\n", dep->cid, to);
+
+ erts_print_node_info(to, arg, dep->sysname, NULL, NULL);
+ print_monitor_info(to, arg, dep->monitors);
+ print_link_info(to, arg, dep->nlinks);
+ print_nodelink_info(to, arg, dep->node_links, dep->sysname);
+
+ return 0;
+
+}
+int distribution_info(int to, void *arg) /* Called by break handler */
+{
+ DistEntry *dep;
+
+ erts_print(to, arg, "=node:%T\n", erts_this_dist_entry->sysname);
+
+ if (erts_this_node->sysname == am_Noname) {
+ erts_print(to, arg, "=no_distribution\n");
+ return(0);
+ }
+
+#if 0
+ if (!erts_visible_dist_entries && !erts_hidden_dist_entries)
+ erts_print(to, arg, "Alive but not holding any connections \n");
+#endif
+
+ for(dep = erts_visible_dist_entries; dep; dep = dep->next) {
+ info_dist_entry(to, arg, dep, 1, 1);
+ }
+
+ for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
+ info_dist_entry(to, arg, dep, 0, 1);
+ }
+
+ for (dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
+ info_dist_entry(to, arg, dep, 0, 0);
+ }
+
+ return(0);
+}
+
+/****************************************************************************
+ DISTRIBUTION BIFS:
+
+ setnode/2 -- start distribution
+ setnode/3 -- set node controller
+
+ node/1 -- return objects node name
+ node/0 -- return this node name
+ nodes/0 -- return a list of all (non hidden) nodes
+ is_alive -- return true if distribution is running else false
+ monitor_node -- turn on/off node monitoring
+
+ node controller only:
+ dist_exit/3 -- send exit signals from remote to local process
+ dist_link/2 -- link a remote process to a local
+ dist_unlink/2 -- unlink a remote from a local
+****************************************************************************/
+
+
+
+/**********************************************************************
+ ** Set the node name of current node fail if node already is set.
+ ** setnode(name@host, Creation)
+ ** loads functions pointer to trap_functions from module erlang.
+ ** erlang:dsend/2
+ ** erlang:dlink/1
+ ** erlang:dunlink/1
+ ** erlang:dmonitor_node/3
+ ** erlang:dgroup_leader/2
+ ** erlang:dexit/2
+ ** -- are these needed ?
+ ** dexit/1
+ ***********************************************************************/
+
+BIF_RETTYPE setnode_2(BIF_ALIST_2)
+{
+ Process *net_kernel;
+ Uint creation;
+
+ /* valid creation ? */
+ if(!term_to_Uint(BIF_ARG_2, &creation))
+ goto error;
+ if(creation > 3)
+ goto error;
+
+ /* valid node name ? */
+ if (!is_node_name_atom(BIF_ARG_1))
+ goto error;
+
+ if (BIF_ARG_1 == am_Noname) /* cant use this name !! */
+ goto error;
+ if (erts_is_alive) /* must not be alive! */
+ goto error;
+
+ /* Check that all trap functions are defined !! */
+ if (dsend2_trap->address == NULL ||
+ dsend3_trap->address == NULL ||
+ /* dsend_nosuspend_trap->address == NULL ||*/
+ dlink_trap->address == NULL ||
+ dunlink_trap->address == NULL ||
+ dmonitor_node_trap->address == NULL ||
+ dgroup_leader_trap->address == NULL ||
+ dmonitor_p_trap->address == NULL ||
+ dexit_trap->address == NULL) {
+ goto error;
+ }
+
+ net_kernel = erts_whereis_process(BIF_P, ERTS_PROC_LOCK_MAIN,
+ am_net_kernel, ERTS_PROC_LOCK_MAIN, 0);
+ if (!net_kernel)
+ goto error;
+
+ /* By setting dist_entry==erts_this_dist_entry and DISTRIBUTION on
+ net_kernel do_net_exist will be called when net_kernel
+ is terminated !! */
+ (void *) ERTS_PROC_SET_DIST_ENTRY(net_kernel,
+ ERTS_PROC_LOCK_MAIN,
+ erts_this_dist_entry);
+ erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ net_kernel->flags |= F_DISTRIBUTION;
+
+ if (net_kernel != BIF_P)
+ erts_smp_proc_unlock(net_kernel, ERTS_PROC_LOCK_MAIN);
+
+#ifdef DEBUG
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ ASSERT(!erts_visible_dist_entries && !erts_hidden_dist_entries);
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+#endif
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC);
+ erts_set_this_node(BIF_ARG_1, (Uint32) creation);
+ erts_is_alive = 1;
+ send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL);
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ BIF_RET(am_true);
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/**********************************************************************
+ ** Allocate a dist entry, set node name install the connection handler
+ ** setnode_3({name@host, Creation}, Cid, {Type, Version, Initial, IC, OC})
+ ** Type = flag field, where the flags are specified in dist.h
+ ** Version = distribution version, >= 1
+ ** IC = in_cookie (ignored)
+ ** OC = out_cookie (ignored)
+ **
+ ** Note that in distribution protocols above 1, the Initial parameter
+ ** is always NIL and the cookies are always the atom '', cookies are not
+ ** sent in the distribution messages but are only used in
+ ** the handshake.
+ **
+ ***********************************************************************/
+
+BIF_RETTYPE setnode_3(BIF_ALIST_3)
+{
+ BIF_RETTYPE ret;
+ Uint flags;
+ unsigned long version;
+ Eterm ic, oc;
+ Eterm *tp;
+ DistEntry *dep = NULL;
+ Port *pp = NULL;
+
+ /* Prepare for success */
+ ERTS_BIF_PREP_RET(ret, am_true);
+
+ /*
+ * Check and pick out arguments
+ */
+
+ if (!is_node_name_atom(BIF_ARG_1) ||
+ is_not_internal_port(BIF_ARG_2) ||
+ (erts_this_node->sysname == am_Noname)) {
+ goto badarg;
+ }
+
+ if (!is_tuple(BIF_ARG_3))
+ goto badarg;
+ tp = tuple_val(BIF_ARG_3);
+ if (*tp++ != make_arityval(4))
+ goto badarg;
+ if (!is_small(*tp))
+ goto badarg;
+ flags = unsigned_val(*tp++);
+ if (!is_small(*tp) || (version = unsigned_val(*tp)) == 0)
+ goto badarg;
+ ic = *(++tp);
+ oc = *(++tp);
+ if (!is_atom(ic) || !is_atom(oc))
+ goto badarg;
+
+ /* DFLAG_EXTENDED_REFERENCES is compulsory from R9 and forward */
+ if (!(DFLAG_EXTENDED_REFERENCES & flags)) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "%T", BIF_P->id);
+ if (BIF_P->reg)
+ erts_dsprintf(dsbufp, " (%T)", BIF_P->reg->name);
+ erts_dsprintf(dsbufp,
+ " attempted to enable connection to node %T "
+ "which is not able to handle extended references.\n",
+ BIF_ARG_1);
+ erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
+ goto badarg;
+ }
+
+ /*
+ * Arguments seem to be in order.
+ */
+
+ /* get dist_entry */
+ dep = erts_find_or_insert_dist_entry(BIF_ARG_1);
+ if (dep == erts_this_dist_entry)
+ goto badarg;
+ else if (!dep)
+ goto system_limit; /* Should never happen!!! */
+
+ pp = erts_id2port(BIF_ARG_2, BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_de_rwlock(dep);
+
+ if (!pp || (pp->status & ERTS_PORT_SFLG_EXITING))
+ goto badarg;
+
+ if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
+ goto badarg;
+
+ if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep)
+ goto done; /* Already set */
+
+ if (dep->status & ERTS_DE_SFLG_EXITING) {
+ /* Suspend on dist entry waiting for the exit to finish */
+ ErtsProcList *plp = erts_proclist_create(BIF_P);
+ plp->next = NULL;
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ erts_smp_spin_lock(&dep->qlock);
+ if (dep->suspended.last)
+ dep->suspended.last->next = plp;
+ else
+ dep->suspended.first = plp;
+ dep->suspended.last = plp;
+ erts_smp_spin_unlock(&dep->qlock);
+ goto yield;
+ }
+
+ ASSERT(!(dep->status & ERTS_DE_SFLG_EXITING));
+
+ if (pp->dist_entry || is_not_nil(dep->cid))
+ goto badarg;
+
+ erts_port_status_bor_set(pp, ERTS_PORT_SFLG_DISTRIBUTION);
+
+ pp->dist_entry = dep;
+
+ dep->version = version;
+ dep->creation = 0;
+
+ ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
+
+#if 1
+ dep->send = (pp->drv_ptr->outputv
+ ? dist_port_commandv
+ : dist_port_command);
+#else
+ dep->send = dist_port_command;
+#endif
+ ASSERT(dep->send);
+
+#ifdef DEBUG
+ erts_smp_spin_lock(&dep->qlock);
+ ASSERT(dep->qsize == 0);
+ erts_smp_spin_unlock(&dep->qlock);
+#endif
+
+ erts_set_dist_entry_connected(dep, BIF_ARG_2, flags);
+
+ if (flags & DFLAG_DIST_HDR_ATOM_CACHE)
+ create_cache(dep);
+
+ erts_smp_de_rwunlock(dep);
+ dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+
+ send_nodes_mon_msgs(BIF_P,
+ am_nodeup,
+ BIF_ARG_1,
+ flags & DFLAG_PUBLISHED ? am_visible : am_hidden,
+ NIL);
+ done:
+
+ if (dep && dep != erts_this_dist_entry) {
+ erts_smp_de_rwunlock(dep);
+ erts_deref_dist_entry(dep);
+ }
+
+ if (pp)
+ erts_smp_port_unlock(pp);
+
+ return ret;
+
+ yield:
+ ERTS_BIF_PREP_YIELD3(ret, bif_export[BIF_setnode_3], BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ goto done;
+
+ badarg:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ goto done;
+
+ system_limit:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT);
+ goto done;
+}
+
+
+/**********************************************************************/
+/* dist_exit(Local, Term, Remote) -> Bool */
+
+BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
+{
+ Eterm local;
+ Eterm remote;
+ DistEntry *rdep;
+
+ local = BIF_ARG_1;
+ remote = BIF_ARG_3;
+
+ /* Check that remote is a remote process */
+ if (is_not_external_pid(remote))
+ goto error;
+
+ rdep = external_dist_entry(remote);
+
+ if(rdep == erts_this_dist_entry)
+ goto error;
+
+ /* Check that local is local */
+ if (is_internal_pid(local)) {
+ Process *lp;
+ ErtsProcLocks lp_locks;
+ if (BIF_P->id == local) {
+ lp_locks = ERTS_PROC_LOCKS_ALL;
+ lp = BIF_P;
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ else {
+ lp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ lp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN,
+ local, lp_locks,
+ ERTS_P2P_FLG_SMP_INC_REFC);
+ if (!lp) {
+ BIF_RET(am_true); /* ignore */
+ }
+ }
+
+ (void) erts_send_exit_signal(BIF_P,
+ remote,
+ lp,
+ &lp_locks,
+ BIF_ARG_2,
+ NIL,
+ NULL,
+ 0);
+#ifdef ERTS_SMP
+ if (lp == BIF_P)
+ lp_locks &= ~ERTS_PROC_LOCK_MAIN;
+#endif
+ erts_smp_proc_unlock(lp, lp_locks);
+ if (lp != BIF_P)
+ erts_smp_proc_dec_refc(lp);
+ else {
+ /*
+ * We may have exited current process and may have to take action.
+ */
+ ERTS_BIF_CHK_EXITED(BIF_P);
+ ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+ }
+ }
+ else if (is_external_pid(local)
+ && external_dist_entry(local) == erts_this_dist_entry) {
+ BIF_RET(am_true); /* ignore */
+ }
+ else
+ goto error;
+ BIF_RET(am_true);
+
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/**********************************************************************/
+/* node(Object) -> Node */
+
+BIF_RETTYPE node_1(BIF_ALIST_1)
+{
+ if (is_not_node_container(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+ BIF_RET(node_container_node_name(BIF_ARG_1));
+}
+
+/**********************************************************************/
+/* node() -> Node */
+
+BIF_RETTYPE node_0(BIF_ALIST_0)
+{
+ BIF_RET(erts_this_dist_entry->sysname);
+}
+
+
+/**********************************************************************/
+/* nodes() -> [ Node ] */
+
+#if 0 /* Done in erlang.erl instead. */
+BIF_RETTYPE nodes_0(BIF_ALIST_0)
+{
+ return nodes_1(BIF_P, am_visible);
+}
+#endif
+
+
+BIF_RETTYPE nodes_1(BIF_ALIST_1)
+{
+ Eterm result;
+ int length;
+ Eterm* hp;
+ int not_connected = 0;
+ int visible = 0;
+ int hidden = 0;
+ int this = 0;
+ Uint buf[2]; /* For one cons-cell */
+ DistEntry *dep;
+ Eterm arg_list = BIF_ARG_1;
+#ifdef DEBUG
+ Eterm* endp;
+#endif
+ if (is_atom(BIF_ARG_1))
+ arg_list = CONS(buf, BIF_ARG_1, NIL);
+
+ while (is_list(arg_list)) {
+ switch(CAR(list_val(arg_list))) {
+ case am_visible: visible = 1; break;
+ case am_hidden: hidden = 1; break;
+ case am_known: visible = hidden = not_connected = this = 1; break;
+ case am_this: this = 1; break;
+ case am_connected: visible = hidden = 1; break;
+ default: BIF_ERROR(BIF_P, BADARG); break;
+ }
+ arg_list = CDR(list_val(arg_list));
+ }
+
+ if (is_not_nil(arg_list))
+ BIF_ERROR(BIF_P, BADARG);
+
+ length = 0;
+
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+
+ ASSERT(erts_no_of_not_connected_dist_entries >= 0);
+ ASSERT(erts_no_of_hidden_dist_entries >= 0);
+ ASSERT(erts_no_of_visible_dist_entries >= 0);
+ if(not_connected)
+ length += erts_no_of_not_connected_dist_entries;
+ if(hidden)
+ length += erts_no_of_hidden_dist_entries;
+ if(visible)
+ length += erts_no_of_visible_dist_entries;
+ if(this)
+ length++;
+
+ result = NIL;
+
+ if (length == 0) {
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ BIF_RET(result);
+ }
+
+ hp = HAlloc(BIF_P, 2*length);
+
+#ifdef DEBUG
+ endp = hp + length*2;
+#endif
+ if(not_connected)
+ for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
+ result = CONS(hp, dep->sysname, result);
+ hp += 2;
+ }
+ if(hidden)
+ for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
+ result = CONS(hp, dep->sysname, result);
+ hp += 2;
+ }
+ if(visible)
+ for(dep = erts_visible_dist_entries; dep; dep = dep->next) {
+ result = CONS(hp, dep->sysname, result);
+ hp += 2;
+ }
+ if(this) {
+ result = CONS(hp, erts_this_dist_entry->sysname, result);
+ hp += 2;
+ }
+ ASSERT(endp == hp);
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ BIF_RET(result);
+}
+
+/**********************************************************************/
+/* is_alive() -> Bool */
+
+BIF_RETTYPE is_alive_0(BIF_ALIST_0)
+{
+ Eterm res = erts_is_alive ? am_true : am_false;
+ BIF_RET(res);
+}
+
+/**********************************************************************/
+/* erlang:monitor_node(Node, Bool, Options) -> Bool */
+
+BIF_RETTYPE monitor_node_3(BIF_ALIST_3)
+{
+ DistEntry *dep;
+ ErtsLink *lnk;
+ Eterm l;
+
+ for (l = BIF_ARG_3; l != NIL && is_list(l); l = CDR(list_val(l))) {
+ Eterm t = CAR(list_val(l));
+ /* allow_passive_connect the only available option right now */
+ if (t != am_allow_passive_connect) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ }
+ if (l != NIL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (is_not_atom(BIF_ARG_1) ||
+ ((BIF_ARG_2 != am_true) && (BIF_ARG_2 != am_false)) ||
+ ((erts_this_node->sysname == am_Noname)
+ && (BIF_ARG_1 != erts_this_node->sysname))) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ dep = erts_sysname_to_connected_dist_entry(BIF_ARG_1);
+ if (!dep) {
+ do_trap:
+ BIF_TRAP3(dmonitor_node_trap, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+ if (dep == erts_this_dist_entry)
+ goto done;
+
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_smp_de_rlock(dep);
+ if (ERTS_DE_IS_NOT_CONNECTED(dep)) {
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_smp_de_runlock(dep);
+ goto do_trap;
+ }
+ erts_smp_de_links_lock(dep);
+ erts_smp_de_runlock(dep);
+
+ if (BIF_ARG_2 == am_true) {
+ ASSERT(dep->cid != NIL);
+ lnk = erts_add_or_lookup_link(&(dep->node_links), LINK_NODE,
+ BIF_P->id);
+ ++ERTS_LINK_REFC(lnk);
+ lnk = erts_add_or_lookup_link(&(BIF_P->nlinks), LINK_NODE, BIF_ARG_1);
+ ++ERTS_LINK_REFC(lnk);
+ }
+ else {
+ lnk = erts_lookup_link(dep->node_links, BIF_P->id);
+ if (lnk != NULL) {
+ if ((--ERTS_LINK_REFC(lnk)) == 0) {
+ erts_destroy_link(erts_remove_link(&(dep->node_links),
+ BIF_P->id));
+ }
+ }
+ lnk = erts_lookup_link(BIF_P->nlinks, BIF_ARG_1);
+ if (lnk != NULL) {
+ if ((--ERTS_LINK_REFC(lnk)) == 0) {
+ erts_destroy_link(erts_remove_link(&(BIF_P->nlinks),
+ BIF_ARG_1));
+ }
+ }
+ }
+
+ erts_smp_de_links_unlock(dep);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+
+ done:
+ erts_deref_dist_entry(dep);
+ BIF_RET(am_true);
+}
+
+/* monitor_node(Node, Bool) -> Bool */
+
+BIF_RETTYPE monitor_node_2(BIF_ALIST_2)
+{
+ BIF_RET(monitor_node_3(BIF_P,BIF_ARG_1,BIF_ARG_2,NIL));
+}
+
+BIF_RETTYPE net_kernel_dflag_unicode_io_1(BIF_ALIST_1)
+{
+ DistEntry *de;
+ Uint32 f;
+ if (is_not_pid(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ de = pid_dist_entry(BIF_ARG_1);
+ ASSERT(de != NULL);
+ if (de == erts_this_dist_entry) {
+ BIF_RET(am_true);
+ }
+ erts_smp_de_rlock(de);
+ f = de->flags;
+ erts_smp_de_runlock(de);
+ BIF_RET(((f & DFLAG_UNICODE_IO) ? am_true : am_false));
+}
+
+/*
+ * The major part of the implementation of net_kernel:monitor_nodes/[1,2]
+ * follows.
+ *
+ * Currently net_kernel:monitor_nodes/[1,2] calls process_flag/2 which in
+ * turn calls erts_monitor_nodes(). If the process_flag() call fails (with
+ * badarg), the code in net_kernel determines what type of error to return.
+ * This in order to simplify the task of being backward compatible.
+ */
+
+#define ERTS_NODES_MON_OPT_TYPE_VISIBLE (((Uint16) 1) << 0)
+#define ERTS_NODES_MON_OPT_TYPE_HIDDEN (((Uint16) 1) << 1)
+#define ERTS_NODES_MON_OPT_DOWN_REASON (((Uint16) 1) << 2)
+
+#define ERTS_NODES_MON_OPT_TYPES \
+ (ERTS_NODES_MON_OPT_TYPE_VISIBLE|ERTS_NODES_MON_OPT_TYPE_HIDDEN)
+
+typedef struct ErtsNodesMonitor_ ErtsNodesMonitor;
+struct ErtsNodesMonitor_ {
+ ErtsNodesMonitor *prev;
+ ErtsNodesMonitor *next;
+ Process *proc;
+ Uint16 opts;
+ Uint16 no;
+};
+
+static erts_smp_mtx_t nodes_monitors_mtx;
+static ErtsNodesMonitor *nodes_monitors;
+static ErtsNodesMonitor *nodes_monitors_end;
+
+/*
+ * Nodes monitors are stored in a double linked list. 'nodes_monitors'
+ * points to the beginning of the list and 'nodes_monitors_end' points
+ * to the end of the list.
+ *
+ * There might be more than one entry per process in the list. If so,
+ * they are located in sequence. The 'nodes_monitors' field of the
+ * process struct refers to the first element in the sequence
+ * corresponding to the process in question.
+ */
+
+static void
+init_nodes_monitors(void)
+{
+ erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors");
+ nodes_monitors = NULL;
+ nodes_monitors_end = NULL;
+}
+
+static ERTS_INLINE Uint
+nodes_mon_msg_sz(ErtsNodesMonitor *nmp, Eterm what, Eterm reason)
+{
+ Uint sz;
+ if (!nmp->opts) {
+ sz = 3;
+ }
+ else {
+ sz = 0;
+
+ if (nmp->opts & ERTS_NODES_MON_OPT_TYPES)
+ sz += 2 + 3;
+
+ if (what == am_nodedown
+ && (nmp->opts & ERTS_NODES_MON_OPT_DOWN_REASON)) {
+ if (is_not_immed(reason))
+ sz += size_object(reason);
+ sz += 2 + 3;
+ }
+
+ sz += 4;
+ }
+ return sz;
+}
+
+static ERTS_INLINE void
+send_nodes_mon_msg(Process *rp,
+ ErtsProcLocks *rp_locksp,
+ ErtsNodesMonitor *nmp,
+ Eterm node,
+ Eterm what,
+ Eterm type,
+ Eterm reason,
+ Uint sz)
+{
+ Eterm msg;
+ ErlHeapFragment* bp;
+ ErlOffHeap *ohp;
+ Eterm *hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, rp_locksp);
+#ifdef DEBUG
+ Eterm *hend = hp + sz;
+#endif
+
+ if (!nmp->opts) {
+ msg = TUPLE2(hp, what, node);
+#ifdef DEBUG
+ hp += 3;
+#endif
+ }
+ else {
+ Eterm tup;
+ Eterm info = NIL;
+
+ if (nmp->opts & (ERTS_NODES_MON_OPT_TYPE_VISIBLE
+ | ERTS_NODES_MON_OPT_TYPE_HIDDEN)) {
+
+ tup = TUPLE2(hp, am_node_type, type);
+ hp += 3;
+ info = CONS(hp, tup, info);
+ hp += 2;
+ }
+
+ if (what == am_nodedown
+ && (nmp->opts & ERTS_NODES_MON_OPT_DOWN_REASON)) {
+ Eterm rsn_cpy;
+
+ if (is_immed(reason))
+ rsn_cpy = reason;
+ else {
+ Eterm rsn_sz = size_object(reason);
+ rsn_cpy = copy_struct(reason, rsn_sz, &hp, ohp);
+ }
+
+ tup = TUPLE2(hp, am_nodedown_reason, rsn_cpy);
+ hp += 3;
+ info = CONS(hp, tup, info);
+ hp += 2;
+ }
+
+ msg = TUPLE3(hp, what, node, info);
+#ifdef DEBUG
+ hp += 4;
+#endif
+ }
+
+ ASSERT(hend == hp);
+ erts_queue_message(rp, rp_locksp, bp, msg, NIL);
+}
+
+static void
+send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reason)
+{
+ ErtsNodesMonitor *nmp;
+ ErtsProcLocks rp_locks = 0; /* Init to shut up false warning */
+ Process *rp = NULL;
+
+ ASSERT(is_immed(what));
+ ASSERT(is_immed(node));
+ ASSERT(is_immed(type));
+
+ ERTS_SMP_LC_ASSERT(!c_p
+ || (erts_proc_lc_my_proc_locks(c_p)
+ == ERTS_PROC_LOCK_MAIN));
+ erts_smp_mtx_lock(&nodes_monitors_mtx);
+
+ for (nmp = nodes_monitors; nmp; nmp = nmp->next) {
+ int i;
+ Uint16 no;
+ Uint sz;
+
+ ASSERT(nmp->proc != NULL);
+
+ if (!nmp->opts) {
+ if (type != am_visible)
+ continue;
+ }
+ else {
+ switch (type) {
+ case am_hidden:
+ if (!(nmp->opts & ERTS_NODES_MON_OPT_TYPE_HIDDEN))
+ continue;
+ break;
+ case am_visible:
+ if ((nmp->opts & ERTS_NODES_MON_OPT_TYPES)
+ && !(nmp->opts & ERTS_NODES_MON_OPT_TYPE_VISIBLE))
+ continue;
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "Bad node type found\n");
+ }
+ }
+
+ if (rp != nmp->proc) {
+ if (rp) {
+ if (rp == c_p)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+
+ rp = nmp->proc;
+ rp_locks = 0;
+ if (rp == c_p)
+ rp_locks |= ERTS_PROC_LOCK_MAIN;
+ }
+
+ ASSERT(rp);
+
+ sz = nodes_mon_msg_sz(nmp, what, reason);
+
+ for (i = 0, no = nmp->no; i < no; i++)
+ send_nodes_mon_msg(rp,
+ &rp_locks,
+ nmp,
+ node,
+ what,
+ type,
+ reason,
+ sz);
+ }
+
+ if (rp) {
+ if (rp == c_p)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+
+ erts_smp_mtx_unlock(&nodes_monitors_mtx);
+}
+
+static Eterm
+insert_nodes_monitor(Process *c_p, Uint32 opts)
+{
+ Uint16 no = 1;
+ Eterm res = am_false;
+ ErtsNodesMonitor *xnmp, *nmp;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx));
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
+
+ xnmp = c_p->nodes_monitors;
+ if (xnmp) {
+ ASSERT(!xnmp->prev || xnmp->prev->proc != c_p);
+
+ while (1) {
+ ASSERT(xnmp->proc == c_p);
+ if (xnmp->opts == opts)
+ break;
+ if (!xnmp->next || xnmp->next->proc != c_p)
+ break;
+ xnmp = xnmp->next;
+ }
+ ASSERT(xnmp);
+ ASSERT(xnmp->proc == c_p);
+ ASSERT(xnmp->opts == opts
+ || !xnmp->next
+ || xnmp->next->proc != c_p);
+
+ if (xnmp->opts != opts)
+ goto alloc_new;
+ else {
+ res = am_true;
+ no = xnmp->no++;
+ if (!xnmp->no) {
+ /*
+ * 'no' wrapped; transfer all prevous monitors to new
+ * element (which will be the next element in the list)
+ * and set this to one...
+ */
+ xnmp->no = 1;
+ goto alloc_new;
+ }
+ }
+ }
+ else {
+ alloc_new:
+ nmp = erts_alloc(ERTS_ALC_T_NODES_MON, sizeof(ErtsNodesMonitor));
+ nmp->proc = c_p;
+ nmp->opts = opts;
+ nmp->no = no;
+
+ if (xnmp) {
+ ASSERT(nodes_monitors);
+ ASSERT(c_p->nodes_monitors);
+ nmp->next = xnmp->next;
+ nmp->prev = xnmp;
+ xnmp->next = nmp;
+ if (nmp->next) {
+ ASSERT(nodes_monitors_end != xnmp);
+ ASSERT(nmp->next->prev == xnmp);
+ nmp->next->prev = nmp;
+ }
+ else {
+ ASSERT(nodes_monitors_end == xnmp);
+ nodes_monitors_end = nmp;
+ }
+ }
+ else {
+ ASSERT(!c_p->nodes_monitors);
+ c_p->nodes_monitors = nmp;
+ nmp->next = NULL;
+ nmp->prev = nodes_monitors_end;
+ if (nodes_monitors_end) {
+ ASSERT(nodes_monitors);
+ nodes_monitors_end->next = nmp;
+ }
+ else {
+ ASSERT(!nodes_monitors);
+ nodes_monitors = nmp;
+ }
+ nodes_monitors_end = nmp;
+ }
+ }
+ return res;
+}
+
+static Eterm
+remove_nodes_monitors(Process *c_p, Uint32 opts, int all)
+{
+ Eterm res = am_false;
+ ErtsNodesMonitor *nmp;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx));
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
+
+ nmp = c_p->nodes_monitors;
+ ASSERT(!nmp || !nmp->prev || nmp->prev->proc != c_p);
+
+ while (nmp && nmp->proc == c_p) {
+ if (!all && nmp->opts != opts)
+ nmp = nmp->next;
+ else { /* if (all || nmp->opts == opts) */
+ ErtsNodesMonitor *free_nmp;
+ res = am_true;
+ if (nmp->prev) {
+ ASSERT(nodes_monitors != nmp);
+ nmp->prev->next = nmp->next;
+ }
+ else {
+ ASSERT(nodes_monitors == nmp);
+ nodes_monitors = nmp->next;
+ }
+ if (nmp->next) {
+ ASSERT(nodes_monitors_end != nmp);
+ nmp->next->prev = nmp->prev;
+ }
+ else {
+ ASSERT(nodes_monitors_end == nmp);
+ nodes_monitors_end = nmp->prev;
+ }
+ free_nmp = nmp;
+ nmp = nmp->next;
+ if (c_p->nodes_monitors == free_nmp)
+ c_p->nodes_monitors = nmp && nmp->proc == c_p ? nmp : NULL;
+ erts_free(ERTS_ALC_T_NODES_MON, free_nmp);
+ }
+ }
+
+ ASSERT(!all || !c_p->nodes_monitors);
+ return res;
+}
+
+void
+erts_delete_nodes_monitors(Process *c_p, ErtsProcLocks locks)
+{
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ if (c_p) {
+ ErtsProcLocks might_unlock = locks & ~ERTS_PROC_LOCK_MAIN;
+ if (might_unlock)
+ erts_proc_lc_might_unlock(c_p, might_unlock);
+ }
+#endif
+ if (erts_smp_mtx_trylock(&nodes_monitors_mtx) == EBUSY) {
+ ErtsProcLocks unlock_locks = locks & ~ERTS_PROC_LOCK_MAIN;
+ if (c_p && unlock_locks)
+ erts_smp_proc_unlock(c_p, unlock_locks);
+ erts_smp_mtx_lock(&nodes_monitors_mtx);
+ if (c_p && unlock_locks)
+ erts_smp_proc_lock(c_p, unlock_locks);
+ }
+ remove_nodes_monitors(c_p, 0, 1);
+ erts_smp_mtx_unlock(&nodes_monitors_mtx);
+}
+
+Eterm
+erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist)
+{
+ Eterm res;
+ Eterm opts_list = olist;
+ Uint16 opts = (Uint16) 0;
+
+ ASSERT(c_p);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+
+ if (on != am_true && on != am_false)
+ return THE_NON_VALUE;
+
+ if (is_not_nil(opts_list)) {
+ int all = 0, visible = 0, hidden = 0;
+
+ while (is_list(opts_list)) {
+ Eterm *cp = list_val(opts_list);
+ Eterm opt = CAR(cp);
+ opts_list = CDR(cp);
+ if (opt == am_nodedown_reason)
+ opts |= ERTS_NODES_MON_OPT_DOWN_REASON;
+ else if (is_tuple(opt)) {
+ Eterm* tp = tuple_val(opt);
+ if (arityval(tp[0]) != 2)
+ return THE_NON_VALUE;
+ switch (tp[1]) {
+ case am_node_type:
+ switch (tp[2]) {
+ case am_visible:
+ if (hidden || all)
+ return THE_NON_VALUE;
+ opts |= ERTS_NODES_MON_OPT_TYPE_VISIBLE;
+ visible = 1;
+ break;
+ case am_hidden:
+ if (visible || all)
+ return THE_NON_VALUE;
+ opts |= ERTS_NODES_MON_OPT_TYPE_HIDDEN;
+ hidden = 1;
+ break;
+ case am_all:
+ if (visible || hidden)
+ return THE_NON_VALUE;
+ opts |= ERTS_NODES_MON_OPT_TYPES;
+ all = 1;
+ break;
+ default:
+ return THE_NON_VALUE;
+ }
+ break;
+ default:
+ return THE_NON_VALUE;
+ }
+ }
+ else {
+ return THE_NON_VALUE;
+ }
+ }
+
+ if (is_not_nil(opts_list))
+ return THE_NON_VALUE;
+ }
+
+ erts_smp_mtx_lock(&nodes_monitors_mtx);
+
+ if (on == am_true)
+ res = insert_nodes_monitor(c_p, opts);
+ else
+ res = remove_nodes_monitors(c_p, opts, 0);
+
+ erts_smp_mtx_unlock(&nodes_monitors_mtx);
+
+ return res;
+}
+
+/*
+ * Note, this function is only used for debuging.
+ */
+
+Eterm
+erts_processes_monitoring_nodes(Process *c_p)
+{
+ ErtsNodesMonitor *nmp;
+ Eterm res;
+ Eterm *hp;
+ Eterm **hpp;
+ Uint sz;
+ Uint *szp;
+#ifdef DEBUG
+ Eterm *hend;
+#endif
+
+ ASSERT(c_p);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+ erts_smp_mtx_lock(&nodes_monitors_mtx);
+
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+
+ bld_result:
+ res = NIL;
+
+ for (nmp = nodes_monitors_end; nmp; nmp = nmp->prev) {
+ Uint16 i;
+ for (i = 0; i < nmp->no; i++) {
+ Eterm olist = NIL;
+ if (nmp->opts & ERTS_NODES_MON_OPT_TYPES) {
+ Eterm type;
+ switch (nmp->opts & ERTS_NODES_MON_OPT_TYPES) {
+ case ERTS_NODES_MON_OPT_TYPES: type = am_all; break;
+ case ERTS_NODES_MON_OPT_TYPE_VISIBLE: type = am_visible; break;
+ case ERTS_NODES_MON_OPT_TYPE_HIDDEN: type = am_hidden; break;
+ default: erl_exit(ERTS_ABORT_EXIT, "Bad node type found\n");
+ }
+ olist = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 2,
+ am_node_type,
+ type),
+ olist);
+ }
+ if (nmp->opts & ERTS_NODES_MON_OPT_DOWN_REASON)
+ olist = erts_bld_cons(hpp, szp, am_nodedown_reason, olist);
+ res = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 2,
+ nmp->proc->id,
+ olist),
+ res);
+ }
+ }
+
+ if (!hpp) {
+ hp = HAlloc(c_p, sz);
+#ifdef DEBUG
+ hend = hp + sz;
+#endif
+ hpp = &hp;
+ szp = NULL;
+ goto bld_result;
+ }
+
+ ASSERT(hp == hend);
+
+ erts_smp_mtx_unlock(&nodes_monitors_mtx);
+
+ return res;
+}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
new file mode 100644
index 0000000000..ea1abcaeed
--- /dev/null
+++ b/erts/emulator/beam/dist.h
@@ -0,0 +1,290 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __DIST_H__
+#define __DIST_H__
+
+#include "erl_process.h"
+#include "erl_node_tables.h"
+
+#define DFLAG_PUBLISHED 0x01
+#define DFLAG_ATOM_CACHE 0x02
+#define DFLAG_EXTENDED_REFERENCES 0x04
+#define DFLAG_DIST_MONITOR 0x08
+#define DFLAG_FUN_TAGS 0x10
+#define DFLAG_DIST_MONITOR_NAME 0x20
+#define DFLAG_HIDDEN_ATOM_CACHE 0x40
+#define DFLAG_NEW_FUN_TAGS 0x80
+#define DFLAG_EXTENDED_PIDS_PORTS 0x100
+#define DFLAG_EXPORT_PTR_TAG 0x200
+#define DFLAG_BIT_BINARIES 0x400
+#define DFLAG_NEW_FLOATS 0x800
+#define DFLAG_UNICODE_IO 0x1000
+#define DFLAG_DIST_HDR_ATOM_CACHE 0x2000
+#define DFLAG_SMALL_ATOM_TAGS 0x4000
+
+/* All flags that should be enabled when term_to_binary/1 is used. */
+#define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \
+ | DFLAG_NEW_FUN_TAGS \
+ | DFLAG_EXTENDED_PIDS_PORTS \
+ | DFLAG_EXPORT_PTR_TAG \
+ | DFLAG_BIT_BINARIES)
+
+/* opcodes used in distribution messages */
+#define DOP_LINK 1
+#define DOP_SEND 2
+#define DOP_EXIT 3
+#define DOP_UNLINK 4
+#define DOP_NODE_LINK 5
+#define DOP_REG_SEND 6
+#define DOP_GROUP_LEADER 7
+#define DOP_EXIT2 8
+
+#define DOP_SEND_TT 12
+#define DOP_EXIT_TT 13
+#define DOP_REG_SEND_TT 16
+#define DOP_EXIT2_TT 18
+
+#define DOP_MONITOR_P 19
+#define DOP_DEMONITOR_P 20
+#define DOP_MONITOR_P_EXIT 21
+
+/* distribution trap functions */
+extern Export* dsend2_trap;
+extern Export* dsend3_trap;
+/*extern Export* dsend_nosuspend_trap;*/
+extern Export* dlink_trap;
+extern Export* dunlink_trap;
+extern Export* dmonitor_node_trap;
+extern Export* dgroup_leader_trap;
+extern Export* dexit_trap;
+extern Export* dmonitor_p_trap;
+
+typedef enum {
+ ERTS_DSP_NO_LOCK,
+ ERTS_DSP_RLOCK,
+ ERTS_DSP_RWLOCK
+} ErtsDSigPrepLock;
+
+
+typedef struct {
+ Process *proc;
+ DistEntry *dep;
+ Eterm cid;
+ Eterm connection_id;
+ int no_suspend;
+} ErtsDSigData;
+
+#define ERTS_DE_IS_NOT_CONNECTED(DEP) \
+ (ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \
+ || erts_lc_rwmtx_is_rwlocked(&(DEP)->rwmtx)), \
+ (is_nil((DEP)->cid) || ((DEP)->status & ERTS_DE_SFLG_EXITING)))
+
+#define ERTS_DE_IS_CONNECTED(DEP) \
+ (!ERTS_DE_IS_NOT_CONNECTED((DEP)))
+
+
+extern int erts_is_alive;
+
+/*
+ * erts_dsig_prepare() prepares a send of a distributed signal.
+ * One of the values defined below are returned. If the returned
+ * value is another than ERTS_DSIG_PREP_CONNECTED, the
+ * distributed signal cannot be sent before apropriate actions
+ * have been taken. Apropriate actions would typically be setting
+ * up the connection.
+ */
+
+/* Connected; signal can be sent. */
+#define ERTS_DSIG_PREP_CONNECTED 0
+/* Not connected; connection needs to be set up. */
+#define ERTS_DSIG_PREP_NOT_CONNECTED 1
+/* Caller would be suspended on send operation. */
+#define ERTS_DSIG_PREP_WOULD_SUSPEND 2
+/* System not alive (distributed) */
+#define ERTS_DSIG_PREP_NOT_ALIVE 3
+
+ERTS_GLB_INLINE int erts_dsig_prepare(ErtsDSigData *,
+ DistEntry *,
+ Process *,
+ ErtsDSigPrepLock,
+ int);
+
+ERTS_GLB_INLINE
+void erts_schedule_dist_command(Port *, DistEntry *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_dsig_prepare(ErtsDSigData *dsdp,
+ DistEntry *dep,
+ Process *proc,
+ ErtsDSigPrepLock dspl,
+ int no_suspend)
+{
+ int failure;
+ if (!erts_is_alive)
+ return ERTS_DSIG_PREP_NOT_ALIVE;
+ if (!dep)
+ return ERTS_DSIG_PREP_NOT_CONNECTED;
+ if (dspl == ERTS_DSP_RWLOCK)
+ erts_smp_de_rwlock(dep);
+ else
+ erts_smp_de_rlock(dep);
+ if (ERTS_DE_IS_NOT_CONNECTED(dep)) {
+ failure = ERTS_DSIG_PREP_NOT_CONNECTED;
+ goto fail;
+ }
+ if (no_suspend) {
+ failure = ERTS_DSIG_PREP_CONNECTED;
+ erts_smp_spin_lock(&dep->qlock);
+ if (dep->qflgs & ERTS_DE_QFLG_BUSY)
+ failure = ERTS_DSIG_PREP_WOULD_SUSPEND;
+ erts_smp_spin_unlock(&dep->qlock);
+ if (failure == ERTS_DSIG_PREP_WOULD_SUSPEND)
+ goto fail;
+ }
+ dsdp->proc = proc;
+ dsdp->dep = dep;
+ dsdp->cid = dep->cid;
+ dsdp->connection_id = dep->connection_id;
+ dsdp->no_suspend = no_suspend;
+ if (dspl == ERTS_DSP_NO_LOCK)
+ erts_smp_de_runlock(dep);
+ return ERTS_DSIG_PREP_CONNECTED;
+
+ fail:
+ if (dspl == ERTS_DSP_RWLOCK)
+ erts_smp_de_rwunlock(dep);
+ else
+ erts_smp_de_runlock(dep);
+ return failure;
+
+}
+
+ERTS_GLB_INLINE
+void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
+{
+ DistEntry *dep;
+ Eterm id;
+
+ if (prt) {
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ASSERT((erts_port_status_get(prt) & ERTS_PORT_SFLGS_DEAD) == 0);
+ ASSERT(prt->dist_entry);
+
+ dep = prt->dist_entry;
+ id = prt->id;
+ }
+ else {
+ ASSERT(dist_entry);
+ ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx)
+ || erts_lc_rwmtx_is_rwlocked(&dist_entry->rwmtx));
+ ASSERT(is_internal_port(dist_entry->cid));
+
+ dep = dist_entry;
+ id = dep->cid;
+ }
+
+ if (!erts_smp_atomic_xchg(&dep->dist_cmd_scheduled, 1)) {
+ (void) erts_port_task_schedule(id,
+ &dep->dist_cmd,
+ ERTS_PORT_TASK_DIST_CMD,
+ (ErlDrvEvent) -1,
+ NULL);
+ }
+}
+
+#endif
+
+typedef struct {
+ ErtsLink *d_lnk;
+ ErtsLink *d_sub_lnk;
+} ErtsDistLinkData;
+
+ERTS_GLB_INLINE void erts_remove_dist_link(ErtsDistLinkData *,
+ Eterm,
+ Eterm,
+ DistEntry *);
+ERTS_GLB_INLINE int erts_was_dist_link_removed(ErtsDistLinkData *);
+ERTS_GLB_INLINE void erts_destroy_dist_link(ErtsDistLinkData *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_remove_dist_link(ErtsDistLinkData *dldp,
+ Eterm lid,
+ Eterm rid,
+ DistEntry *dep)
+{
+ erts_smp_de_links_lock(dep);
+ dldp->d_lnk = erts_lookup_link(dep->nlinks, lid);
+ if (!dldp->d_lnk)
+ dldp->d_sub_lnk = NULL;
+ else {
+ dldp->d_sub_lnk = erts_remove_link(&ERTS_LINK_ROOT(dldp->d_lnk), rid);
+ dldp->d_lnk = (ERTS_LINK_ROOT(dldp->d_lnk)
+ ? NULL
+ : erts_remove_link(&dep->nlinks, lid));
+ }
+ erts_smp_de_links_unlock(dep);
+}
+
+ERTS_GLB_INLINE int
+erts_was_dist_link_removed(ErtsDistLinkData *dldp)
+{
+ return dldp->d_sub_lnk != NULL;
+}
+
+ERTS_GLB_INLINE void
+erts_destroy_dist_link(ErtsDistLinkData *dldp)
+{
+ if (dldp->d_lnk)
+ erts_destroy_link(dldp->d_lnk);
+ if (dldp->d_sub_lnk)
+ erts_destroy_link(dldp->d_sub_lnk);
+}
+
+#endif
+
+/*
+ * erts_dsig_send_* return values.
+ */
+#define ERTS_DSIG_SEND_OK 0
+#define ERTS_DSIG_SEND_YIELD 1
+
+extern int erts_dsig_send_link(ErtsDSigData *, Eterm, Eterm);
+extern int erts_dsig_send_msg(ErtsDSigData *, Eterm, Eterm);
+extern int erts_dsig_send_exit_tt(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
+extern int erts_dsig_send_unlink(ErtsDSigData *, Eterm, Eterm);
+extern int erts_dsig_send_reg_msg(ErtsDSigData *, Eterm, Eterm);
+extern int erts_dsig_send_group_leader(ErtsDSigData *, Eterm, Eterm);
+extern int erts_dsig_send_exit(ErtsDSigData *, Eterm, Eterm, Eterm);
+extern int erts_dsig_send_exit2(ErtsDSigData *, Eterm, Eterm, Eterm);
+extern int erts_dsig_send_demonitor(ErtsDSigData *, Eterm, Eterm, Eterm, int);
+extern int erts_dsig_send_monitor(ErtsDSigData *, Eterm, Eterm, Eterm);
+extern int erts_dsig_send_m_exit(ErtsDSigData *, Eterm, Eterm, Eterm, Eterm);
+
+extern int erts_dist_command(Port *prt, int reds);
+extern void erts_dist_port_not_busy(Port *prt);
+extern void erts_kill_dist_connection(DistEntry *dep, Uint32);
+
+extern Uint erts_dist_cache_size(void);
+
+#endif
diff --git a/erts/emulator/beam/elib_malloc.c b/erts/emulator/beam/elib_malloc.c
new file mode 100644
index 0000000000..b18c48d8d6
--- /dev/null
+++ b/erts/emulator/beam/elib_malloc.c
@@ -0,0 +1,2334 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** Description: Faster malloc().
+*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+
+#ifdef ENABLE_ELIB_MALLOC
+
+#undef THREAD_SAFE_ELIB_MALLOC
+#ifdef USE_THREADS
+#define THREAD_SAFE_ELIB_MALLOC 1
+#else
+#define THREAD_SAFE_ELIB_MALLOC 0
+#endif
+
+#include "erl_driver.h"
+#include "erl_threads.h"
+#include "elib_stat.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+/* To avoid clobbering of names becaure of reclaim on VxWorks,
+ we undefine all possible malloc, calloc etc. */
+#undef malloc
+#undef calloc
+#undef free
+#undef realloc
+
+#define ELIB_INLINE /* inline all possible functions */
+
+#ifndef ELIB_ALIGN
+#define ELIB_ALIGN sizeof(double)
+#endif
+
+#ifndef ELIB_HEAP_SIZE
+#define ELIB_HEAP_SIZE (64*1024) /* Default 64K */
+#endif
+
+#ifndef ELIB_HEAP_INCREAMENT
+#define ELIB_HEAP_INCREAMENT (32*1024) /* Default 32K */
+#endif
+
+#ifndef ELIB_FAILURE
+#define ELIB_FAILURE abort()
+#endif
+
+#undef ASSERT
+#ifdef DEBUG
+#define ASSERT(B) \
+ ((void) ((B) ? 1 : (fprintf(stderr, "%s:%d: Assertion failed: %s\n", \
+ __FILE__, __LINE__, #B), abort(), 0)))
+#else
+#define ASSERT(B) ((void) 1)
+#endif
+
+#ifndef USE_RECURSIVE_MALLOC_MUTEX
+#define USE_RECURSIVE_MALLOC_MUTEX 0
+#endif
+
+#if USE_RECURSIVE_MALLOC_MUTEX
+static erts_mtx_t malloc_mutex = ERTS_REC_MTX_INITER;
+#else /* #if USE_RECURSIVE_MALLOC_MUTEX */
+static erts_mtx_t malloc_mutex = ERTS_MTX_INITER;
+#if THREAD_SAFE_ELIB_MALLOC
+static erts_cnd_t malloc_cond = ERTS_CND_INITER;
+#endif
+#endif /* #if USE_RECURSIVE_MALLOC_MUTEX */
+
+typedef unsigned long EWord; /* Assume 32-bit in this implementation */
+typedef unsigned short EHalfWord; /* Assume 16-bit in this implementation */
+typedef unsigned char EByte; /* Assume 8-bit byte */
+
+
+#define elib_printf fprintf
+#define elib_putc fputc
+
+
+#if defined(__STDC__) || defined(__WIN32__)
+#define CONCAT(x,y) x##y
+#else
+#define CONCAT(x,y) x/**/y
+#endif
+
+
+#ifdef ELIB_DEBUG
+#define ELIB_PREFIX(fun, args) CONCAT(elib__,fun) args
+#else
+#define ELIB_PREFIX(fun, args) CONCAT(elib_,fun) args
+#endif
+
+#if defined(__STDC__)
+void *ELIB_PREFIX(malloc, (size_t));
+void *ELIB_PREFIX(calloc, (size_t, size_t));
+void ELIB_PREFIX(cfree, (EWord *));
+void ELIB_PREFIX(free, (EWord *));
+void *ELIB_PREFIX(realloc, (EWord *, size_t));
+void* ELIB_PREFIX(memresize, (EWord *, int));
+void* ELIB_PREFIX(memalign, (int, int));
+void* ELIB_PREFIX(valloc, (int));
+void* ELIB_PREFIX(pvalloc, (int));
+int ELIB_PREFIX(memsize, (EWord *));
+/* Extern interfaces used by VxWorks */
+size_t elib_sizeof(void *);
+void elib_init(EWord *, EWord);
+void elib_force_init(EWord *, EWord);
+#endif
+
+#if defined(__STDC__)
+/* define prototypes for missing */
+void* memalign(size_t a, size_t s);
+void* pvalloc(size_t nb);
+void* memresize(void *p, int nb);
+int memsize(void *p);
+#endif
+
+/* bytes to pages */
+#define PAGES(x) (((x)+page_size-1) / page_size)
+#define PAGE_ALIGN(p) ((char*)((((EWord)(p))+page_size-1)&~(page_size-1)))
+
+/* bytes to words */
+#define WORDS(x) (((x)+sizeof(EWord)-1) / sizeof(EWord))
+
+/* Align an address */
+#define ALIGN(p) ((EWord*)((((EWord)(p)+ELIB_ALIGN-1)&~(ELIB_ALIGN-1))))
+
+/* Calculate the size needed to keep alignment */
+
+#define ALIGN_BSZ(nb) ((nb+sizeof(EWord)+ELIB_ALIGN-1) & ~(ELIB_ALIGN-1))
+
+#define ALIGN_WSZ(nb) WORDS(ALIGN_BSZ(nb))
+
+#define ALIGN_SIZE(nb) (ALIGN_WSZ(nb) - 1)
+
+
+/* PARAMETERS */
+
+#if defined(ELIB_HEAP_SBRK)
+
+#undef PAGE_SIZE
+
+/* Get the system page size (NEED MORE DEFINES HERE) */
+#ifdef _SC_PAGESIZE
+#define PAGE_SIZE sysconf(_SC_PAGESIZE)
+#elif defined(_MSC_VER)
+# ifdef _M_ALPHA
+# define PAGE_SIZE 0x2000
+# else
+# define PAGE_SIZE 0x1000
+# endif
+#else
+#define PAGE_SIZE getpagesize()
+#endif
+
+#define ELIB_EXPAND(need) expand_sbrk(need)
+static FUNCTION(int, expand_sbrk, (EWord));
+
+#elif defined(ELIB_HEAP_FIXED)
+
+#define PAGE_SIZE 1024
+#define ELIB_EXPAND(need) -1
+static EWord fix_heap[WORDS(ELIB_HEAP_SIZE)];
+
+#elif defined(ELIB_HEAP_USER)
+
+#define PAGE_SIZE 1024
+#define ELIB_EXPAND(need) -1
+
+#else
+
+#error "ELIB HEAP TYPE NOT SET"
+
+#endif
+
+
+#define STAT_ALLOCED_BLOCK(SZ) \
+do { \
+ tot_allocated += (SZ); \
+ if (max_allocated < tot_allocated) \
+ max_allocated = tot_allocated; \
+} while (0)
+
+#define STAT_FREED_BLOCK(SZ) \
+do { \
+ tot_allocated -= (SZ); \
+} while (0)
+
+static int max_allocated = 0;
+static int tot_allocated = 0;
+static EWord* eheap; /* Align heap start */
+static EWord* eheap_top; /* Point to end of heap */
+EWord page_size = 0; /* Set by elib_init */
+
+#if defined(ELIB_DEBUG) || defined(DEBUG)
+#define ALIGN_CHECK(a, p) \
+ do { \
+ if ((EWord)(p) & (a-1)) { \
+ elib_printf(stderr, \
+ "RUNTIME ERROR: bad alignment (0x%lx:%d:%d)\n", \
+ (unsigned long) (p), (int) a, __LINE__); \
+ ELIB_FAILURE; \
+ } \
+ } while(0)
+#define ELIB_ALIGN_CHECK(p) ALIGN_CHECK(ELIB_ALIGN, p)
+#else
+#define ALIGN_CHECK(a, p)
+#define ELIB_ALIGN_CHECK(p)
+#endif
+
+#define DYNAMIC 32
+
+/*
+** Free block layout
+** 1 1 30
+** +--------------------------+
+** |F|P| Size |
+** +--------------------------+
+**
+** Where F is the free bit
+** P is the free above bit
+** Size is messured in words and does not include the hdr word
+**
+** If block is on the free list the size is also stored last in the block.
+**
+*/
+typedef struct _free_block FreeBlock;
+struct _free_block {
+ EWord hdr;
+ Uint flags;
+ FreeBlock* parent;
+ FreeBlock* left;
+ FreeBlock* right;
+ EWord v[1];
+};
+
+typedef struct _allocated_block {
+ EWord hdr;
+ EWord v[5];
+} AllocatedBlock;
+
+
+/*
+ * Interface to tree routines.
+ */
+typedef Uint Block_t;
+
+static Block_t* get_free_block(Uint);
+static void link_free_block(Block_t *);
+static void unlink_free_block(Block_t *del);
+
+#define FREE_BIT 0x80000000
+#define FREE_ABOVE_BIT 0x40000000
+#define SIZE_MASK 0x3fffffff /* 2^30 words = 2^32 bytes */
+
+/* Work on both FreeBlock and AllocatedBlock */
+#define SIZEOF(p) ((p)->hdr & SIZE_MASK)
+#define IS_FREE(p) (((p)->hdr & FREE_BIT) != 0)
+#define IS_FREE_ABOVE(p) (((p)->hdr & FREE_ABOVE_BIT) != 0)
+
+/* Given that we have a free block above find its size */
+#define SIZEOF_ABOVE(p) *(((EWord*) (p)) - 1)
+
+#define MIN_BLOCK_SIZE (sizeof(FreeBlock)/sizeof(EWord))
+#define MIN_WORD_SIZE (MIN_BLOCK_SIZE-1)
+#define MIN_BYTE_SIZE (sizeof(FreeBlock)-sizeof(EWord))
+
+#define MIN_ALIGN_SIZE ALIGN_SIZE(MIN_BYTE_SIZE)
+
+
+static AllocatedBlock* heap_head = 0;
+static AllocatedBlock* heap_tail = 0;
+static EWord eheap_size = 0;
+
+static int heap_locked;
+
+static int elib_need_init = 1;
+#if THREAD_SAFE_ELIB_MALLOC
+static int elib_is_initing = 0;
+#endif
+
+typedef FreeBlock RBTree_t;
+
+static RBTree_t* root = NULL;
+
+
+static FUNCTION(void, deallocate, (AllocatedBlock*, int));
+
+/*
+ * Unlink a free block
+ */
+
+#define mark_allocated(p, szp) do { \
+ (p)->hdr = ((p)->hdr & FREE_ABOVE_BIT) | (szp); \
+ (p)->v[szp] &= ~FREE_ABOVE_BIT; \
+ } while(0)
+
+#define mark_free(p, szp) do { \
+ (p)->hdr = FREE_BIT | (szp); \
+ ((FreeBlock *)p)->v[szp-sizeof(FreeBlock)/sizeof(EWord)+1] = (szp); \
+ } while(0)
+
+#if 0
+/* Help macros to log2 */
+#define LOG_1(x) (((x) > 1) ? 1 : 0)
+#define LOG_2(x) (((x) > 3) ? 2+LOG_1((x) >> 2) : LOG_1(x))
+#define LOG_4(x) (((x) > 15) ? 4+LOG_2((x) >> 4) : LOG_2(x))
+#define LOG_8(x) (((x) > 255) ? 8+LOG_4((x)>>8) : LOG_4(x))
+#define LOG_16(x) (((x) > 65535) ? 16+LOG_8((x)>>16) : LOG_8(x))
+
+#define log2(x) LOG_16(x)
+#endif
+
+/*
+ * Split a block to be allocated.
+ * Mark block as ALLOCATED and clear
+ * FREE_ABOVE_BIT on next block
+ *
+ * nw is SIZE aligned and szp is SIZE aligned + 1
+ */
+static void
+split_block(FreeBlock* p, EWord nw, EWord szp)
+{
+ EWord szq;
+ FreeBlock* q;
+
+ szq = szp - nw;
+ /* Preserve FREE_ABOVE bit in p->hdr !!! */
+
+ if (szq >= MIN_ALIGN_SIZE+1) {
+ szq--;
+ p->hdr = (p->hdr & FREE_ABOVE_BIT) | nw;
+
+ q = (FreeBlock*) (((EWord*) p) + nw + 1);
+ mark_free(q, szq);
+ link_free_block((Block_t *) q);
+
+ q = (FreeBlock*) (((EWord*) q) + szq + 1);
+ q->hdr |= FREE_ABOVE_BIT;
+ }
+ else {
+ mark_allocated((AllocatedBlock*)p, szp);
+ }
+}
+
+/*
+ * Find a free block
+ */
+static FreeBlock*
+alloc_block(EWord nw)
+{
+ for (;;) {
+ FreeBlock* p = (FreeBlock *) get_free_block(nw);
+
+ if (p != NULL) {
+ return p;
+ } else if (ELIB_EXPAND(nw+MIN_WORD_SIZE)) {
+ return 0;
+ }
+ }
+}
+
+
+size_t elib_sizeof(void *p)
+{
+ AllocatedBlock* pp;
+
+ if (p != 0) {
+ pp = (AllocatedBlock*) (((char *)p)-1);
+ return SIZEOF(pp);
+ }
+ return 0;
+}
+
+static void locked_elib_init(EWord*, EWord);
+static void init_elib_malloc(EWord*, EWord);
+
+/*
+** Initialize the elib
+** The addr and sz is only used when compiled with EXPAND_ADDR
+*/
+/* Not static, this is used by VxWorks */
+void elib_init(EWord* addr, EWord sz)
+{
+ if (!elib_need_init)
+ return;
+ erts_mtx_lock(&malloc_mutex);
+ locked_elib_init(addr, sz);
+ erts_mtx_unlock(&malloc_mutex);
+}
+
+static void locked_elib_init(EWord* addr, EWord sz)
+{
+ if (!elib_need_init)
+ return;
+
+#if THREAD_SAFE_ELIB_MALLOC
+
+#if !USE_RECURSIVE_MALLOC_MUTEX
+ {
+ static erts_tid_t initer_tid;
+
+ if(elib_is_initing) {
+
+ if(erts_equal_tids(initer_tid, erts_thr_self()))
+ return;
+
+ /* Wait until initializing thread is done with initialization */
+
+ while(elib_need_init)
+ erts_cnd_wait(&malloc_cond, &malloc_mutex);
+
+ return;
+ }
+ else {
+ initer_tid = erts_thr_self();
+ elib_is_initing = 1;
+ }
+ }
+#else
+ if(elib_is_initing)
+ return;
+ elib_is_initing = 1;
+#endif
+
+#endif /* #if THREAD_SAFE_ELIB_MALLOC */
+
+ /* Do the actual initialization of the malloc implementation */
+ init_elib_malloc(addr, sz);
+
+#if THREAD_SAFE_ELIB_MALLOC
+
+#if !USE_RECURSIVE_MALLOC_MUTEX
+ erts_mtx_unlock(&malloc_mutex);
+#endif
+
+ /* Recursive calls to malloc are allowed here... */
+ erts_mtx_set_forksafe(&malloc_mutex);
+
+#if !USE_RECURSIVE_MALLOC_MUTEX
+ erts_mtx_lock(&malloc_mutex);
+ elib_is_initing = 0;
+#endif
+
+#endif /* #if THREAD_SAFE_ELIB_MALLOC */
+
+ elib_need_init = 0;
+
+#if THREAD_SAFE_ELIB_MALLOC && !USE_RECURSIVE_MALLOC_MUTEX
+ erts_cnd_broadcast(&malloc_cond);
+#endif
+
+}
+
+static void init_elib_malloc(EWord* addr, EWord sz)
+{
+ int i;
+ FreeBlock* freep;
+ EWord tmp_sz;
+#ifdef ELIB_HEAP_SBRK
+ char* top;
+ EWord n;
+#endif
+
+ max_allocated = 0;
+ tot_allocated = 0;
+ root = NULL;
+
+ /* Get the page size (may involve system call!!!) */
+ page_size = PAGE_SIZE;
+
+#if defined(ELIB_HEAP_SBRK)
+ sz = PAGES(ELIB_HEAP_SIZE)*page_size;
+
+ if ((top = (char*) sbrk(0)) == (char*)-1) {
+ elib_printf(stderr, "could not initialize elib, sbrk(0)");
+ ELIB_FAILURE;
+ }
+ n = PAGE_ALIGN(top) - top;
+ if ((top = (char*) sbrk(n)) == (char*)-1) {
+ elib_printf(stderr, "could not initialize elib, sbrk(n)");
+ ELIB_FAILURE;
+ }
+ if ((eheap = (EWord*) sbrk(sz)) == (EWord*)-1) {
+ elib_printf(stderr, "could not initialize elib, sbrk(SIZE)");
+ ELIB_FAILURE;
+ }
+ sz = WORDS(ELIB_HEAP_SIZE);
+#elif defined(ELIB_HEAP_FIXED)
+ eheap = fix_heap;
+ sz = WORDS(ELIB_HEAP_SIZE);
+#elif defined(ELIB_HEAP_USER)
+ eheap = addr;
+ sz = WORDS(sz);
+#else
+ return -1;
+#endif
+ eheap_size = 0;
+
+ /* Make sure that the first word of the heap_head is aligned */
+ addr = ALIGN(eheap+1);
+ sz -= ((addr - 1) - eheap); /* Subtract unusable size */
+ eheap_top = eheap = addr - 1; /* Set new aligned heap start */
+
+ eheap_top[sz-1] = 0; /* Heap stop mark */
+
+ addr = eheap;
+ heap_head = (AllocatedBlock*) addr;
+ heap_head->hdr = MIN_ALIGN_SIZE;
+ for (i = 0; i < MIN_ALIGN_SIZE; i++)
+ heap_head->v[i] = 0;
+
+ addr += (MIN_ALIGN_SIZE+1);
+ freep = (FreeBlock*) addr;
+ tmp_sz = sz - (((MIN_ALIGN_SIZE+1) + MIN_BLOCK_SIZE) + 1 + 1);
+ mark_free(freep, tmp_sz);
+ link_free_block((Block_t *) freep);
+
+ /* No need to align heap tail */
+ heap_tail = (AllocatedBlock*) &eheap_top[sz-MIN_BLOCK_SIZE-1];
+ heap_tail->hdr = FREE_ABOVE_BIT | MIN_WORD_SIZE;
+ heap_tail->v[0] = 0;
+ heap_tail->v[1] = 0;
+ heap_tail->v[2] = 0;
+
+ eheap_top += sz;
+ eheap_size += sz;
+
+ heap_locked = 0;
+}
+
+#ifdef ELIB_HEAP_USER
+void elib_force_init(EWord* addr, EWord sz)
+{
+ elib_need_init = 1;
+ elib_init(addr,sz);
+}
+#endif
+
+#ifdef ELIB_HEAP_SBRK
+
+/*
+** need in number of words (should include head and tail words)
+*/
+static int expand_sbrk(EWord sz)
+{
+ EWord* p;
+ EWord bytes = sz * sizeof(EWord);
+ EWord size;
+ AllocatedBlock* tail;
+
+ if (bytes < ELIB_HEAP_SIZE)
+ size = PAGES(ELIB_HEAP_INCREAMENT)*page_size;
+ else
+ size = PAGES(bytes)*page_size;
+
+ if ((p = (EWord*) sbrk(size)) == ((EWord*) -1))
+ return -1;
+
+ if (p != eheap_top) {
+ elib_printf(stderr, "panic: sbrk moved\n");
+ ELIB_FAILURE;
+ }
+
+ sz = WORDS(size);
+
+ /* Set new endof heap marker and a new heap tail */
+ eheap_top[sz-1] = 0;
+
+ tail = (AllocatedBlock*) &eheap_top[sz-MIN_BLOCK_SIZE-1];
+ tail->hdr = FREE_ABOVE_BIT | MIN_WORD_SIZE;
+ tail->v[0] = 0;
+ tail->v[1] = 0;
+ tail->v[2] = 0;
+
+ /* Patch old tail with new appended size */
+ heap_tail->hdr = (heap_tail->hdr & FREE_ABOVE_BIT) |
+ (MIN_WORD_SIZE+1+(sz-MIN_BLOCK_SIZE-1));
+ deallocate(heap_tail, 0);
+
+ heap_tail = tail;
+
+ eheap_size += sz;
+ eheap_top += sz;
+
+ return 0;
+}
+
+#endif /* ELIB_HEAP_SBRK */
+
+
+/*
+** Scan heap and check for corrupted heap
+*/
+int elib_check_heap(void)
+{
+ AllocatedBlock* p = heap_head;
+ EWord sz;
+
+ if (heap_locked) {
+ elib_printf(stderr, "heap is locked no info avaiable\n");
+ return 0;
+ }
+
+ while((sz = SIZEOF(p)) != 0) {
+ if (IS_FREE(p)) {
+ if (p->v[sz-1] != sz) {
+ elib_printf(stderr, "panic: heap corrupted\r\n");
+ ELIB_FAILURE;
+ }
+ p = (AllocatedBlock*) (p->v + sz);
+ if (!IS_FREE_ABOVE(p)) {
+ elib_printf(stderr, "panic: heap corrupted\r\n");
+ ELIB_FAILURE;
+ }
+ }
+ else
+ p = (AllocatedBlock*) (p->v + sz);
+ }
+ return 1;
+}
+
+/*
+** Load the byte vector pointed to by v of length vsz
+** with a heap image
+** The scale is defined by vsz and the current heap size
+** free = 0, full = 255
+**
+**
+*/
+int elib_heap_map(EByte* v, int vsz)
+{
+ AllocatedBlock* p = heap_head;
+ EWord sz;
+ int gsz = eheap_size / vsz; /* The granuality used */
+ int fsz = 0;
+ int usz = 0;
+
+ if (gsz == 0)
+ return -1; /* too good reolution */
+
+ while((sz = SIZEOF(p)) != 0) {
+ if (IS_FREE(p)) {
+ fsz += sz;
+ if ((fsz + usz) > gsz) {
+ *v++ = (255*usz)/gsz;
+ fsz -= (gsz - usz);
+ usz = 0;
+ while(fsz >= gsz) {
+ *v++ = 0;
+ fsz -= gsz;
+ }
+ }
+ }
+ else {
+ usz += sz;
+ if ((fsz + usz) > gsz) {
+ *v++ = 255 - (255*fsz)/gsz;
+ usz -= (gsz - fsz);
+ fsz = 0;
+ while(usz >= gsz) {
+ *v++ = 255;
+ usz -= gsz;
+ }
+ }
+ }
+ p = (AllocatedBlock*) (p->v + sz);
+ }
+ return 0;
+}
+
+/*
+** Generate a histogram of free/allocated blocks
+** Count granuality of 10 gives
+** (0-10],(10-100],(100-1000],(1000-10000] ...
+** (0-2], (2-4], (4-8], (8-16], ....
+*/
+static int i_logb(EWord size, int base)
+{
+ int lg = 0;
+ while(size >= base) {
+ size /= base;
+ lg++;
+ }
+ return lg;
+}
+
+int elib_histo(EWord* vf, EWord* va, int vsz, int base)
+{
+ AllocatedBlock* p = heap_head;
+ EWord sz;
+ int i;
+ int linear;
+
+ if ((vsz <= 1) || (vf == 0 && va == 0))
+ return -1;
+
+ if (base < 0) {
+ linear = 1;
+ base = -base;
+ }
+ else
+ linear = 0;
+
+ if (base <= 1)
+ return -1;
+
+ if (vf != 0) {
+ for (i = 0; i < vsz; i++)
+ vf[i] = 0;
+ }
+ if (va != 0) {
+ for (i = 0; i < vsz; i++)
+ va[i] = 0;
+ }
+
+ while((sz = SIZEOF(p)) != 0) {
+ if (IS_FREE(p)) {
+ if (vf != 0) {
+ int val;
+ if (linear)
+ val = sz / base;
+ else
+ val = i_logb(sz, base);
+ if (val >= vsz)
+ vf[vsz-1]++;
+ else
+ vf[val]++;
+ }
+ }
+ else {
+ if (va != 0) {
+ int val;
+ if (linear)
+ val = sz / base;
+ else
+ val = i_logb(sz, base);
+ if (val >= vsz)
+ va[vsz-1]++;
+ else
+ va[val]++;
+ }
+ }
+ p = (AllocatedBlock*) (p->v + sz);
+ }
+ return 0;
+}
+
+/*
+** Fill the info structure with actual values
+** Total
+** Allocated
+** Free
+** maxMaxFree
+*/
+void elib_stat(struct elib_stat* info)
+{
+ EWord blks = 0;
+ EWord sz_free = 0;
+ EWord sz_alloc = 0;
+ EWord sz_max_free = 0;
+ EWord sz_min_used = 0x7fffffff;
+ EWord sz;
+ EWord num_free = 0;
+ AllocatedBlock* p = heap_head;
+
+ info->mem_total = eheap_size;
+
+ p = (AllocatedBlock*) (p->v + SIZEOF(p));
+
+ while((sz = SIZEOF(p)) != 0) {
+ blks++;
+ if (IS_FREE(p)) {
+ if (sz > sz_max_free)
+ sz_max_free = sz;
+ sz_free += sz;
+ ++num_free;
+ }
+ else {
+ if (sz < sz_min_used)
+ sz_min_used = sz;
+ sz_alloc += sz;
+ }
+ p = (AllocatedBlock*) (p->v + sz);
+ }
+ info->mem_blocks = blks;
+ info->free_blocks = num_free;
+ info->mem_alloc = sz_alloc;
+ info->mem_free = sz_free;
+ info->min_used = sz_min_used;
+ info->max_free = sz_max_free;
+ info->mem_max_alloc = max_allocated;
+ ASSERT(sz_alloc == tot_allocated);
+}
+
+/*
+** Dump the heap
+*/
+void elib_heap_dump(char* label)
+{
+ AllocatedBlock* p = heap_head;
+ EWord sz;
+
+ elib_printf(stderr, "HEAP DUMP (%s)\n", label);
+ if (!elib_check_heap())
+ return;
+
+ while((sz = SIZEOF(p)) != 0) {
+ if (IS_FREE(p)) {
+ elib_printf(stderr, "%p: FREE, size = %d\n", p, (int) sz);
+ }
+ else {
+ elib_printf(stderr, "%p: USED, size = %d %s\n", p, (int) sz,
+ IS_FREE_ABOVE(p)?"(FREE ABOVE)":"");
+ }
+ p = (AllocatedBlock*) (p->v + sz);
+ }
+}
+
+/*
+** Scan heaps and count:
+** free_size, allocated_size, max_free_block
+*/
+void elib_statistics(void* to)
+{
+ struct elib_stat info;
+ EWord frag;
+
+ if (!elib_check_heap())
+ return;
+
+ elib_stat(&info);
+
+ frag = 1000 - ((1000 * info.max_free) / info.mem_free);
+
+ elib_printf(to, "Heap Statistics: total(%d), blocks(%d), frag(%d.%d%%)\n",
+ info.mem_total, info.mem_blocks,
+ (int) frag/10, (int) frag % 10);
+
+ elib_printf(to, " allocated(%d), free(%d), "
+ "free_blocks(%d)\n",
+ info.mem_alloc, info.mem_free,info.free_blocks);
+ elib_printf(to, " max_free(%d), min_used(%d)\n",
+ info.max_free, info.min_used);
+}
+
+/*
+** Allocate a least nb bytes with alignment a
+** Algorithm:
+** 1) Try locate a block which match exacly among the by direct index.
+** 2) Try using a fix block of greater size
+** 3) Try locate a block by searching in lists where block sizes
+** X may vary between 2^i < X <= 2^(i+1)
+**
+** Reset memory to zero if clear is true
+*/
+static AllocatedBlock* allocate(EWord nb, EWord a, int clear)
+{
+ FreeBlock* p;
+ EWord nw;
+
+ if (a == ELIB_ALIGN) {
+ /*
+ * Common case: Called by malloc(), realloc(), calloc().
+ */
+ nw = nb < MIN_BYTE_SIZE ? MIN_ALIGN_SIZE : ALIGN_SIZE(nb);
+
+ if ((p = alloc_block(nw)) == 0)
+ return NULL;
+ } else {
+ /*
+ * Special case: Called by memalign().
+ */
+ EWord asz, szp, szq, tmpsz;
+ FreeBlock *q;
+
+ if ((p = alloc_block((1+MIN_ALIGN_SIZE)*sizeof(EWord)+a-1+nb)) == 0)
+ return NULL;
+
+ asz = a - ((EWord) ((AllocatedBlock *)p)->v) % a;
+
+ if (asz != a) {
+ /* Enforce the alignment requirement by cutting of a free
+ block at the beginning of the block. */
+
+ if (asz < (1+MIN_ALIGN_SIZE)*sizeof(EWord) && !IS_FREE_ABOVE(p)) {
+ /* Not enough room to cut of a free block;
+ increase align size */
+ asz += (((1+MIN_ALIGN_SIZE)*sizeof(EWord) + a - 1)/a)*a;
+ }
+
+ szq = ALIGN_SIZE(asz - sizeof(EWord));
+ szp = SIZEOF(p) - szq - 1;
+
+ q = p;
+ p = (FreeBlock*) (((EWord*) q) + szq + 1);
+ p->hdr = FREE_ABOVE_BIT | FREE_BIT | szp;
+
+ if (IS_FREE_ABOVE(q)) { /* This should not be possible I think,
+ but just in case... */
+ tmpsz = SIZEOF_ABOVE(q) + 1;
+ szq += tmpsz;
+ q = (FreeBlock*) (((EWord*) q) - tmpsz);
+ unlink_free_block((Block_t *) q);
+ q->hdr = (q->hdr & FREE_ABOVE_BIT) | FREE_BIT | szq;
+ }
+ mark_free(q, szq);
+ link_free_block((Block_t *) q);
+
+ } /* else already had the correct alignment */
+
+ nw = nb < MIN_BYTE_SIZE ? MIN_ALIGN_SIZE : ALIGN_SIZE(nb);
+ }
+
+ split_block(p, nw, SIZEOF(p));
+
+ STAT_ALLOCED_BLOCK(SIZEOF(p));
+
+ if (clear) {
+ EWord* pp = ((AllocatedBlock*)p)->v;
+
+ while(nw--)
+ *pp++ = 0;
+ }
+
+ return (AllocatedBlock*) p;
+}
+
+
+/*
+** Deallocate memory pointed to by p
+** 1. Merge with block above if this block is free
+** 2. Merge with block below if this block is free
+** Link the block to the correct free list
+**
+** p points to the block header!
+**
+*/
+static void deallocate(AllocatedBlock* p, int stat_count)
+{
+ FreeBlock* q;
+ EWord szq;
+ EWord szp;
+
+ szp = SIZEOF(p);
+
+ if (stat_count)
+ STAT_FREED_BLOCK(SIZEOF(p));
+
+ if (IS_FREE_ABOVE(p)) {
+ szq = SIZEOF_ABOVE(p);
+ q = (FreeBlock*) ( ((EWord*) p) - szq - 1);
+ unlink_free_block((Block_t *) q);
+
+ p = (AllocatedBlock*) q;
+ szp += (szq + 1);
+ }
+ q = (FreeBlock*) (p->v + szp);
+ if (IS_FREE(q)) {
+ szq = SIZEOF(q);
+ unlink_free_block((Block_t *) q);
+ szp += (szq + 1);
+ }
+ else
+ q->hdr |= FREE_ABOVE_BIT;
+
+ /* The block above p can NEVER be free !!! */
+ p->hdr = FREE_BIT | szp;
+ p->v[szp-1] = szp;
+
+ link_free_block((Block_t *) p);
+}
+
+/*
+** Reallocate memory
+** If preserve is true then data is moved if neccesary
+*/
+static AllocatedBlock* reallocate(AllocatedBlock* p, EWord nb, int preserve)
+{
+ EWord szp;
+ EWord szq;
+ EWord sz;
+ EWord nw;
+ FreeBlock* q;
+
+ if (nb < MIN_BYTE_SIZE)
+ nw = MIN_ALIGN_SIZE;
+ else
+ nw = ALIGN_SIZE(nb);
+
+ sz = szp = SIZEOF(p);
+
+ STAT_FREED_BLOCK(szp);
+
+ /* Merge with block below */
+ q = (FreeBlock*) (p->v + szp);
+ if (IS_FREE(q)) {
+ szq = SIZEOF(q);
+ unlink_free_block((Block_t *) q);
+ szp += (szq + 1);
+ }
+
+ if (nw <= szp) {
+ split_block((FreeBlock *) p, nw, szp);
+ STAT_ALLOCED_BLOCK(SIZEOF(p));
+ return p;
+ }
+ else {
+ EWord* dp = p->v;
+ AllocatedBlock* npp;
+
+ if (IS_FREE_ABOVE(p)) {
+ szq = SIZEOF_ABOVE(p);
+ if (szq + szp + 1 >= nw) {
+ q = (FreeBlock*) (((EWord*) p) - szq - 1);
+ unlink_free_block((Block_t * )q);
+ szp += (szq + 1);
+ p = (AllocatedBlock*) q;
+
+ if (preserve) {
+ EWord* pp = p->v;
+ while(sz--)
+ *pp++ = *dp++;
+ }
+ split_block((FreeBlock *) p, nw, szp);
+ STAT_ALLOCED_BLOCK(SIZEOF(p));
+ return p;
+ }
+ }
+
+ /*
+ * Update p so that allocate() and deallocate() works.
+ * (Note that allocate() may call expand_sbrk(), which in
+ * in turn calls deallocate().)
+ */
+
+ p->hdr = (p->hdr & FREE_ABOVE_BIT) | szp;
+ p->v[szp] &= ~FREE_ABOVE_BIT;
+
+ npp = allocate(nb, ELIB_ALIGN, 0);
+ if(npp == NULL)
+ return NULL;
+ if (preserve) {
+ EWord* pp = npp->v;
+ while(sz--)
+ *pp++ = *dp++;
+ }
+ deallocate(p, 0);
+ return npp;
+ }
+}
+
+/*
+** What malloc() and friends should do (and return) when the heap is
+** exhausted. [sverkerw]
+*/
+static void* heap_exhausted(void)
+{
+ /* Choose behaviour */
+#if 0
+ /* Crash-and-burn --- leave a usable corpse (hopefully) */
+ abort();
+#endif
+ /* The usual ANSI-compliant behaviour */
+ return NULL;
+}
+
+/*
+** Allocate size bytes of memory
+*/
+void* ELIB_PREFIX(malloc, (size_t nb))
+{
+ void *res;
+ AllocatedBlock* p;
+
+ erts_mtx_lock(&malloc_mutex);
+ if (elib_need_init)
+ locked_elib_init(NULL,(EWord)0);
+
+ if (nb == 0)
+ res = NULL;
+ else if ((p = allocate(nb, ELIB_ALIGN, 0)) != 0) {
+ ELIB_ALIGN_CHECK(p->v);
+ res = p->v;
+ }
+ else
+ res = heap_exhausted();
+
+ erts_mtx_unlock(&malloc_mutex);
+
+ return res;
+}
+
+
+void* ELIB_PREFIX(calloc, (size_t nelem, size_t size))
+{
+ void *res;
+ int nb;
+ AllocatedBlock* p;
+
+ erts_mtx_lock(&malloc_mutex);
+ if (elib_need_init)
+ locked_elib_init(NULL,(EWord)0);
+
+ if ((nb = nelem * size) == 0)
+ res = NULL;
+ else if ((p = allocate(nb, ELIB_ALIGN, 1)) != 0) {
+ ELIB_ALIGN_CHECK(p->v);
+ res = p->v;
+ }
+ else
+ res = heap_exhausted();
+
+ erts_mtx_unlock(&malloc_mutex);
+
+ return res;
+}
+
+/*
+** Free memory allocated by malloc
+*/
+
+void ELIB_PREFIX(free, (EWord* p))
+{
+ erts_mtx_lock(&malloc_mutex);
+ if (elib_need_init)
+ locked_elib_init(NULL,(EWord)0);
+
+ if (p != 0)
+ deallocate((AllocatedBlock*)(p-1), 1);
+
+ erts_mtx_unlock(&malloc_mutex);
+}
+
+void ELIB_PREFIX(cfree, (EWord* p))
+{
+ ELIB_PREFIX(free, (p));
+}
+
+
+/*
+** Realloc the memory allocated in p to nb number of bytes
+**
+*/
+
+void* ELIB_PREFIX(realloc, (EWord* p, size_t nb))
+{
+ void *res = NULL;
+ AllocatedBlock* pp;
+
+ erts_mtx_lock(&malloc_mutex);
+ if (elib_need_init)
+ locked_elib_init(NULL,(EWord)0);
+
+ if (p != 0) {
+ pp = (AllocatedBlock*) (p-1);
+ if (nb > 0) {
+ if ((pp = reallocate(pp, nb, 1)) != 0) {
+ ELIB_ALIGN_CHECK(pp->v);
+ res = pp->v;
+ }
+ }
+ else
+ deallocate(pp, 1);
+ }
+ else if (nb > 0) {
+ if ((pp = allocate(nb, ELIB_ALIGN, 0)) != 0) {
+ ELIB_ALIGN_CHECK(pp->v);
+ res = pp->v;
+ }
+ else
+ res = heap_exhausted();
+ }
+
+ erts_mtx_unlock(&malloc_mutex);
+
+ return res;
+}
+
+/*
+** Resize the memory area pointed to by p with nb number of bytes
+*/
+void* ELIB_PREFIX(memresize, (EWord* p, int nb))
+{
+ void *res = NULL;
+ AllocatedBlock* pp;
+
+ erts_mtx_lock(&malloc_mutex);
+ if (elib_need_init)
+ locked_elib_init(NULL,(EWord)0);
+
+ if (p != 0) {
+ pp = (AllocatedBlock*) (p-1);
+ if (nb > 0) {
+ if ((pp = reallocate(pp, nb, 0)) != 0) {
+ ELIB_ALIGN_CHECK(pp->v);
+ res = pp->v;
+ }
+ }
+ else
+ deallocate(pp, 1);
+ }
+ else if (nb > 0) {
+ if ((pp = allocate(nb, ELIB_ALIGN, 0)) != 0) {
+ ELIB_ALIGN_CHECK(pp->v);
+ res = pp->v;
+ }
+ else
+ res = heap_exhausted();
+ }
+
+ erts_mtx_unlock(&malloc_mutex);
+
+ return res;
+}
+
+
+/* Create aligned memory a must be a power of 2 !!! */
+
+void* ELIB_PREFIX(memalign, (int a, int nb))
+{
+ void *res;
+ AllocatedBlock* p;
+
+ erts_mtx_lock(&malloc_mutex);
+ if (elib_need_init)
+ locked_elib_init(NULL,(EWord)0);
+
+ if (nb == 0 || a <= 0)
+ res = NULL;
+ else if ((p = allocate(nb, a, 0)) != 0) {
+ ALIGN_CHECK(a, p->v);
+ res = p->v;
+ }
+ else
+ res = heap_exhausted();
+
+ erts_mtx_unlock(&malloc_mutex);
+
+ return res;
+}
+
+void* ELIB_PREFIX(valloc, (int nb))
+{
+ return ELIB_PREFIX(memalign, (page_size, nb));
+}
+
+
+void* ELIB_PREFIX(pvalloc, (int nb))
+{
+ return ELIB_PREFIX(memalign, (page_size, PAGES(nb)*page_size));
+}
+/* Return memory size for pointer p in bytes */
+
+int ELIB_PREFIX(memsize, (p))
+EWord* p;
+{
+ return SIZEOF((AllocatedBlock*)(p-1))*4;
+}
+
+
+/*
+** --------------------------------------------------------------------------
+** DEBUG LIBRARY
+** --------------------------------------------------------------------------
+*/
+
+#ifdef ELIB_DEBUG
+
+#define IN_HEAP(p) (((p) >= (char*) eheap) && (p) < (char*) eheap_top)
+/*
+** ptr_to_block: return the pointer to heap block pointed into by ptr
+** Returns 0 if not pointing into a block
+*/
+
+static EWord* ptr_to_block(char* ptr)
+{
+ AllocatedBlock* p = heap_head;
+ EWord sz;
+
+ while((sz = SIZEOF(p)) != 0) {
+ if ((ptr >= (char*) p->v) && (ptr < (char*)(p->v+sz)))
+ return p->v;
+ p = (AllocatedBlock*) (p->v + sz);
+ }
+ return 0;
+}
+
+/*
+** Validate a pointer
+** returns:
+** 0 - if points to start of a block
+** 1 - if points outsize heap
+** -1 - if points inside block
+**
+*/
+static int check_pointer(char* ptr)
+{
+ if (IN_HEAP(ptr)) {
+ if (ptr_to_block(ptr) == 0)
+ return 1;
+ return 0;
+ }
+ return -1;
+}
+
+/*
+** Validate a memory area
+** returns:
+** 0 - if area is included in a block
+** -1 - if area overlap a heap block
+** 1 - if area is outside heap
+*/
+static int check_area(char* ptr, int n)
+{
+ if (IN_HEAP(ptr)) {
+ if (IN_HEAP(ptr+n-1)) {
+ EWord* p1 = ptr_to_block(ptr);
+ EWord* p2 = ptr_to_block(ptr+n-1);
+
+ if (p1 == p2)
+ return (p1 == 0) ? -1 : 0;
+ return -1;
+ }
+ }
+ else if (IN_HEAP(ptr+n-1))
+ return -1;
+ return 1;
+}
+
+/*
+** Check if a block write will overwrite heap block
+*/
+static void check_write(char* ptr, int n, char* file, int line, char* fun)
+{
+ if (check_area(ptr, n) == -1) {
+ elib_printf(stderr, "RUNTIME ERROR: %s heap overwrite\n", fun);
+ elib_printf(stderr, "File: %s Line: %d\n", file, line);
+ ELIB_FAILURE;
+ }
+}
+
+/*
+** Check if a pointer is an allocated object
+*/
+static void check_allocated_block(char* ptr, char* file, int line, char* fun)
+{
+ EWord* q;
+
+ if (!IN_HEAP(ptr) || ((q=ptr_to_block(ptr)) == 0) || (ptr != (char*) q)) {
+ elib_printf(stderr, "RUNTIME ERROR: %s non heap pointer\n", fun);
+ elib_printf(stderr, "File: %s Line: %d\n", file, line);
+ ELIB_FAILURE;
+ }
+
+ if (IS_FREE((AllocatedBlock*)(q-1))) {
+ elib_printf(stderr, "RUNTIME ERROR: %s free pointer\n", fun);
+ elib_printf(stderr, "File: %s Line: %d\n", file, line);
+ ELIB_FAILURE;
+ }
+
+}
+
+/*
+** --------------------------------------------------------------------------
+** DEBUG VERSIONS (COMPILED WITH THE ELIB.H)
+** --------------------------------------------------------------------------
+*/
+
+void* elib_dbg_malloc(int n, char* file, int line)
+{
+ return elib__malloc(n);
+}
+
+void* elib_dbg_calloc(int n, int s, char* file, int line)
+{
+ return elib__calloc(n, s);
+}
+
+void* elib_dbg_realloc(EWord* p, int n, char* file, int line)
+{
+ if (p == 0)
+ return elib__malloc(n);
+ check_allocated_block(p, file, line, "elib_realloc");
+ return elib__realloc(p, n);
+}
+
+void elib_dbg_free(EWord* p, char* file, int line)
+{
+ if (p == 0)
+ return;
+ check_allocated_block(p, file, line, "elib_free");
+ elib__free(p);
+}
+
+void elib_dbg_cfree(EWord* p, char* file, int line)
+{
+ if (p == 0)
+ return;
+ check_allocated_block(p, file, line, "elib_free");
+ elib__cfree(p);
+}
+
+void* elib_dbg_memalign(int a, int n, char* file, int line)
+{
+ return elib__memalign(a, n);
+}
+
+void* elib_dbg_valloc(int n, char* file, int line)
+{
+ return elib__valloc(n);
+}
+
+void* elib_dbg_pvalloc(int n, char* file, int line)
+{
+ return elib__pvalloc(n);
+}
+
+void* elib_dbg_memresize(EWord* p, int n, char* file, int line)
+{
+ if (p == 0)
+ return elib__malloc(n);
+ check_allocated_block(p, file, line, "elib_memresize");
+ return elib__memresize(p, n);
+}
+
+int elib_dbg_memsize(void* p, char* file, int line)
+{
+ check_allocated_block(p, file, line, "elib_memsize");
+ return elib__memsize(p);
+}
+
+/*
+** --------------------------------------------------------------------------
+** LINK TIME FUNCTIONS (NOT COMPILED CALLS)
+** --------------------------------------------------------------------------
+*/
+
+void* elib_malloc(int n)
+{
+ return elib_dbg_malloc(n, "", -1);
+}
+
+void* elib_calloc(int n, int s)
+{
+ return elib_dbg_calloc(n, s, "", -1);
+}
+
+void* elib_realloc(EWord* p, int n)
+{
+ return elib_dbg_realloc(p, n, "", -1);
+}
+
+void elib_free(EWord* p)
+{
+ elib_dbg_free(p, "", -1);
+}
+
+void elib_cfree(EWord* p)
+{
+ elib_dbg_cfree(p, "", -1);
+}
+
+void* elib_memalign(int a, int n)
+{
+ return elib_dbg_memalign(a, n, "", -1);
+}
+
+void* elib_valloc(int n)
+{
+ return elib_dbg_valloc(n, "", -1);
+}
+
+void* elib_pvalloc(int n)
+{
+ return elib_dbg_pvalloc(n, "", -1);
+}
+
+void* elib_memresize(EWord* p, int n)
+{
+ return elib_dbg_memresize(p, n, "", -1);
+}
+
+
+int elib_memsize(EWord* p)
+{
+ return elib_dbg_memsize(p, "", -1);
+}
+
+#endif /* ELIB_DEBUG */
+
+/*
+** --------------------------------------------------------------------------
+** Map c library functions to elib
+** --------------------------------------------------------------------------
+*/
+
+#if defined(ELIB_ALLOC_IS_CLIB)
+void* malloc(size_t nb)
+{
+ return elib_malloc(nb);
+}
+
+void* calloc(size_t nelem, size_t size)
+{
+ return elib_calloc(nelem, size);
+}
+
+
+void free(void *p)
+{
+ elib_free(p);
+}
+
+void cfree(void *p)
+{
+ elib_cfree(p);
+}
+
+void* realloc(void* p, size_t nb)
+{
+ return elib_realloc(p, nb);
+}
+
+
+void* memalign(size_t a, size_t s)
+{
+ return elib_memalign(a, s);
+}
+
+void* valloc(size_t nb)
+{
+ return elib_valloc(nb);
+}
+
+void* pvalloc(size_t nb)
+{
+ return elib_pvalloc(nb);
+}
+
+#if 0
+void* memresize(void* p, int nb)
+{
+ return elib_memresize(p, nb);
+}
+
+int memsize(void* p)
+{
+ return elib_memsize(p);
+}
+#endif
+#endif /* ELIB_ALLOC_IS_CLIB */
+
+#endif /* ENABLE_ELIB_MALLOC */
+
+void elib_ensure_initialized(void)
+{
+#ifdef ENABLE_ELIB_MALLOC
+#ifndef ELIB_DONT_INITIALIZE
+ elib_init(NULL, 0);
+#endif
+#endif
+}
+
+#ifdef ENABLE_ELIB_MALLOC
+/**
+ ** A Slightly modified version of the "address order best fit" algorithm
+ ** used in erl_bestfit_alloc.c. Comments refer to that implementation.
+ **/
+
+/*
+ * Description: A combined "address order best fit"/"best fit" allocator
+ * based on a Red-Black (binary search) Tree. The search,
+ * insert, and delete operations are all O(log n) operations
+ * on a Red-Black Tree. In the "address order best fit" case
+ * n equals number of free blocks, and in the "best fit" case
+ * n equals number of distinct sizes of free blocks. Red-Black
+ * Trees are described in "Introduction to Algorithms", by
+ * Thomas H. Cormen, Charles E. Leiserson, and
+ * Ronald L. Riverest.
+ *
+ * This module is a callback-module for erl_alloc_util.c
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef DEBUG
+#if 0
+#define HARD_DEBUG
+#endif
+#else
+#undef HARD_DEBUG
+#endif
+
+#define SZ_MASK SIZE_MASK
+#define FLG_MASK (~(SZ_MASK))
+
+#define BLK_SZ(B) (*((Block_t *) (B)) & SZ_MASK)
+
+#define TREE_NODE_FLG (((Uint) 1) << 0)
+#define RED_FLG (((Uint) 1) << 1)
+#ifdef HARD_DEBUG
+# define LEFT_VISITED_FLG (((Uint) 1) << 2)
+# define RIGHT_VISITED_FLG (((Uint) 1) << 3)
+#endif
+
+#define IS_TREE_NODE(N) (((RBTree_t *) (N))->flags & TREE_NODE_FLG)
+#define IS_LIST_ELEM(N) (!IS_TREE_NODE(((RBTree_t *) (N))))
+
+#define SET_TREE_NODE(N) (((RBTree_t *) (N))->flags |= TREE_NODE_FLG)
+#define SET_LIST_ELEM(N) (((RBTree_t *) (N))->flags &= ~TREE_NODE_FLG)
+
+#define IS_RED(N) (((RBTree_t *) (N)) \
+ && ((RBTree_t *) (N))->flags & RED_FLG)
+#define IS_BLACK(N) (!IS_RED(((RBTree_t *) (N))))
+
+#define SET_RED(N) (((RBTree_t *) (N))->flags |= RED_FLG)
+#define SET_BLACK(N) (((RBTree_t *) (N))->flags &= ~RED_FLG)
+
+#undef ASSERT
+#define ASSERT ASSERT_EXPR
+
+#if 1
+#define RBT_ASSERT ASSERT
+#else
+#define RBT_ASSERT(x)
+#endif
+
+
+#ifdef HARD_DEBUG
+static RBTree_t * check_tree(Uint);
+#endif
+
+#ifdef ERTS_INLINE
+# ifndef ERTS_CAN_INLINE
+# define ERTS_CAN_INLINE 1
+# endif
+#else
+# if defined(__GNUC__)
+# define ERTS_CAN_INLINE 1
+# define ERTS_INLINE __inline__
+# elif defined(__WIN32__)
+# define ERTS_CAN_INLINE 1
+# define ERTS_INLINE __inline
+# else
+# define ERTS_CAN_INLINE 0
+# define ERTS_INLINE
+# endif
+#endif
+
+/* Types... */
+#if 0
+typedef struct RBTree_t_ RBTree_t;
+
+struct RBTree_t_ {
+ Block_t hdr;
+ Uint flags;
+ RBTree_t *parent;
+ RBTree_t *left;
+ RBTree_t *right;
+};
+#endif
+
+#if 0
+typedef struct {
+ RBTree_t t;
+ RBTree_t *next;
+} RBTreeList_t;
+
+#define LIST_NEXT(N) (((RBTreeList_t *) (N))->next)
+#define LIST_PREV(N) (((RBTreeList_t *) (N))->t.parent)
+#endif
+
+#ifdef DEBUG
+
+/* Destroy all tree fields */
+#define DESTROY_TREE_NODE(N) \
+ sys_memset((void *) (((Block_t *) (N)) + 1), \
+ 0xff, \
+ (sizeof(RBTree_t) - sizeof(Block_t)))
+
+/* Destroy all tree and list fields */
+#define DESTROY_LIST_ELEM(N) \
+ sys_memset((void *) (((Block_t *) (N)) + 1), \
+ 0xff, \
+ (sizeof(RBTreeList_t) - sizeof(Block_t)))
+
+#else
+
+#define DESTROY_TREE_NODE(N)
+#define DESTROY_LIST_ELEM(N)
+
+#endif
+
+
+/*
+ * Red-Black Tree operations needed
+ */
+
+static ERTS_INLINE void
+left_rotate(RBTree_t **root, RBTree_t *x)
+{
+ RBTree_t *y = x->right;
+ x->right = y->left;
+ if (y->left)
+ y->left->parent = x;
+ y->parent = x->parent;
+ if (!y->parent) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == x->parent->left)
+ x->parent->left = y;
+ else {
+ RBT_ASSERT(x == x->parent->right);
+ x->parent->right = y;
+ }
+ y->left = x;
+ x->parent = y;
+}
+
+static ERTS_INLINE void
+right_rotate(RBTree_t **root, RBTree_t *x)
+{
+ RBTree_t *y = x->left;
+ x->left = y->right;
+ if (y->right)
+ y->right->parent = x;
+ y->parent = x->parent;
+ if (!y->parent) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == x->parent->right)
+ x->parent->right = y;
+ else {
+ RBT_ASSERT(x == x->parent->left);
+ x->parent->left = y;
+ }
+ y->right = x;
+ x->parent = y;
+}
+
+
+/*
+ * Replace node x with node y
+ * NOTE: block header of y is not changed
+ */
+static ERTS_INLINE void
+replace(RBTree_t **root, RBTree_t *x, RBTree_t *y)
+{
+
+ if (!x->parent) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == x->parent->left)
+ x->parent->left = y;
+ else {
+ RBT_ASSERT(x == x->parent->right);
+ x->parent->right = y;
+ }
+ if (x->left) {
+ RBT_ASSERT(x->left->parent == x);
+ x->left->parent = y;
+ }
+ if (x->right) {
+ RBT_ASSERT(x->right->parent == x);
+ x->right->parent = y;
+ }
+
+ y->flags = x->flags;
+ y->parent = x->parent;
+ y->right = x->right;
+ y->left = x->left;
+
+ DESTROY_TREE_NODE(x);
+
+}
+
+static void
+tree_insert_fixup(RBTree_t *blk)
+{
+ RBTree_t *x = blk, *y;
+
+ /*
+ * Rearrange the tree so that it satisfies the Red-Black Tree properties
+ */
+
+ RBT_ASSERT(x != root && IS_RED(x->parent));
+ do {
+
+ /*
+ * x and its parent are both red. Move the red pair up the tree
+ * until we get to the root or until we can separate them.
+ */
+
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_BLACK(x->parent->parent));
+ RBT_ASSERT(x->parent->parent);
+
+ if (x->parent == x->parent->parent->left) {
+ y = x->parent->parent->right;
+ if (IS_RED(y)) {
+ SET_BLACK(y);
+ x = x->parent;
+ SET_BLACK(x);
+ x = x->parent;
+ SET_RED(x);
+ }
+ else {
+
+ if (x == x->parent->right) {
+ x = x->parent;
+ left_rotate(&root, x);
+ }
+
+ RBT_ASSERT(x == x->parent->parent->left->left);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent));
+ RBT_ASSERT(IS_BLACK(x->parent->parent));
+ RBT_ASSERT(IS_BLACK(y));
+
+ SET_BLACK(x->parent);
+ SET_RED(x->parent->parent);
+ right_rotate(&root, x->parent->parent);
+
+ RBT_ASSERT(x == x->parent->left);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent->right));
+ RBT_ASSERT(IS_BLACK(x->parent));
+ break;
+ }
+ }
+ else {
+ RBT_ASSERT(x->parent == x->parent->parent->right);
+ y = x->parent->parent->left;
+ if (IS_RED(y)) {
+ SET_BLACK(y);
+ x = x->parent;
+ SET_BLACK(x);
+ x = x->parent;
+ SET_RED(x);
+ }
+ else {
+
+ if (x == x->parent->left) {
+ x = x->parent;
+ right_rotate(&root, x);
+ }
+
+ RBT_ASSERT(x == x->parent->parent->right->right);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent));
+ RBT_ASSERT(IS_BLACK(x->parent->parent));
+ RBT_ASSERT(IS_BLACK(y));
+
+ SET_BLACK(x->parent);
+ SET_RED(x->parent->parent);
+ left_rotate(&root, x->parent->parent);
+
+ RBT_ASSERT(x == x->parent->right);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent->left));
+ RBT_ASSERT(IS_BLACK(x->parent));
+ break;
+ }
+ }
+ } while (x != root && IS_RED(x->parent));
+
+ SET_BLACK(root);
+}
+
+static void
+unlink_free_block(Block_t *del)
+{
+ Uint spliced_is_black;
+ RBTree_t *x, *y, *z = (RBTree_t *) del;
+ RBTree_t null_x; /* null_x is used to get the fixup started when we
+ splice out a node without children. */
+
+ null_x.parent = NULL;
+
+#ifdef HARD_DEBUG
+ check_tree(0);
+#endif
+
+ /* Remove node from tree... */
+
+ /* Find node to splice out */
+ if (!z->left || !z->right)
+ y = z;
+ else
+ /* Set y to z:s successor */
+ for(y = z->right; y->left; y = y->left);
+ /* splice out y */
+ x = y->left ? y->left : y->right;
+ spliced_is_black = IS_BLACK(y);
+ if (x) {
+ x->parent = y->parent;
+ }
+ else if (!x && spliced_is_black) {
+ x = &null_x;
+ x->flags = 0;
+ SET_BLACK(x);
+ x->right = x->left = NULL;
+ x->parent = y->parent;
+ y->left = x;
+ }
+
+ if (!y->parent) {
+ RBT_ASSERT(root == y);
+ root = x;
+ }
+ else if (y == y->parent->left)
+ y->parent->left = x;
+ else {
+ RBT_ASSERT(y == y->parent->right);
+ y->parent->right = x;
+ }
+ if (y != z) {
+ /* We spliced out the successor of z; replace z by the successor */
+ replace(&root, z, y);
+ }
+
+ if (spliced_is_black) {
+ /* We removed a black node which makes the resulting tree
+ violate the Red-Black Tree properties. Fixup tree... */
+
+ while (IS_BLACK(x) && x->parent) {
+
+ /*
+ * x has an "extra black" which we move up the tree
+ * until we reach the root or until we can get rid of it.
+ *
+ * y is the sibbling of x
+ */
+
+ if (x == x->parent->left) {
+ y = x->parent->right;
+ RBT_ASSERT(y);
+ if (IS_RED(y)) {
+ RBT_ASSERT(y->right);
+ RBT_ASSERT(y->left);
+ SET_BLACK(y);
+ RBT_ASSERT(IS_BLACK(x->parent));
+ SET_RED(x->parent);
+ left_rotate(&root, x->parent);
+ y = x->parent->right;
+ }
+ RBT_ASSERT(y);
+ RBT_ASSERT(IS_BLACK(y));
+ if (IS_BLACK(y->left) && IS_BLACK(y->right)) {
+ SET_RED(y);
+ x = x->parent;
+ }
+ else {
+ if (IS_BLACK(y->right)) {
+ SET_BLACK(y->left);
+ SET_RED(y);
+ right_rotate(&root, y);
+ y = x->parent->right;
+ }
+ RBT_ASSERT(y);
+ if (IS_RED(x->parent)) {
+
+ SET_BLACK(x->parent);
+ SET_RED(y);
+ }
+ RBT_ASSERT(y->right);
+ SET_BLACK(y->right);
+ left_rotate(&root, x->parent);
+ x = root;
+ break;
+ }
+ }
+ else {
+ RBT_ASSERT(x == x->parent->right);
+ y = x->parent->left;
+ RBT_ASSERT(y);
+ if (IS_RED(y)) {
+ RBT_ASSERT(y->right);
+ RBT_ASSERT(y->left);
+ SET_BLACK(y);
+ RBT_ASSERT(IS_BLACK(x->parent));
+ SET_RED(x->parent);
+ right_rotate(&root, x->parent);
+ y = x->parent->left;
+ }
+ RBT_ASSERT(y);
+ RBT_ASSERT(IS_BLACK(y));
+ if (IS_BLACK(y->right) && IS_BLACK(y->left)) {
+ SET_RED(y);
+ x = x->parent;
+ }
+ else {
+ if (IS_BLACK(y->left)) {
+ SET_BLACK(y->right);
+ SET_RED(y);
+ left_rotate(&root, y);
+ y = x->parent->left;
+ }
+ RBT_ASSERT(y);
+ if (IS_RED(x->parent)) {
+ SET_BLACK(x->parent);
+ SET_RED(y);
+ }
+ RBT_ASSERT(y->left);
+ SET_BLACK(y->left);
+ right_rotate(&root, x->parent);
+ x = root;
+ break;
+ }
+ }
+ }
+ SET_BLACK(x);
+
+ if (null_x.parent) {
+ if (null_x.parent->left == &null_x)
+ null_x.parent->left = NULL;
+ else {
+ RBT_ASSERT(null_x.parent->right == &null_x);
+ null_x.parent->right = NULL;
+ }
+ RBT_ASSERT(!null_x.left);
+ RBT_ASSERT(!null_x.right);
+ }
+ else if (root == &null_x) {
+ root = NULL;
+ RBT_ASSERT(!null_x.left);
+ RBT_ASSERT(!null_x.right);
+ }
+ }
+
+
+ DESTROY_TREE_NODE(del);
+
+#ifdef HARD_DEBUG
+ check_tree(0);
+#endif
+
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * "Address order best fit" specific callbacks. *
+\* */
+
+static void
+link_free_block(Block_t *block)
+{
+ RBTree_t *blk = (RBTree_t *) block;
+ Uint blk_sz = BLK_SZ(blk);
+
+ blk->flags = 0;
+ blk->left = NULL;
+ blk->right = NULL;
+
+ if (!root) {
+ blk->parent = NULL;
+ SET_BLACK(blk);
+ root = blk;
+ } else {
+ RBTree_t *x = root;
+ while (1) {
+ Uint size;
+
+ size = BLK_SZ(x);
+
+ if (blk_sz < size || (blk_sz == size && blk < x)) {
+ if (!x->left) {
+ blk->parent = x;
+ x->left = blk;
+ break;
+ }
+ x = x->left;
+ }
+ else {
+ if (!x->right) {
+ blk->parent = x;
+ x->right = blk;
+ break;
+ }
+ x = x->right;
+ }
+
+ }
+
+ /* Insert block into size tree */
+ RBT_ASSERT(blk->parent);
+
+ SET_RED(blk);
+ if (IS_RED(blk->parent)) {
+ tree_insert_fixup(blk);
+ }
+ }
+
+#ifdef HARD_DEBUG
+ check_tree(0);
+#endif
+}
+
+
+static Block_t *
+get_free_block(Uint size)
+{
+ RBTree_t *x = root;
+ RBTree_t *blk = NULL;
+ Uint blk_sz;
+
+ while (x) {
+ blk_sz = BLK_SZ(x);
+ if (blk_sz < size) {
+ x = x->right;
+ }
+ else {
+ blk = x;
+ x = x->left;
+ }
+ }
+
+ if (!blk)
+ return NULL;
+
+#ifdef HARD_DEBUG
+ ASSERT(blk == check_tree(size));
+#endif
+
+ unlink_free_block((Block_t *) blk);
+
+ return (Block_t *) blk;
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Debug functions *
+\* */
+
+
+#ifdef HARD_DEBUG
+
+#define IS_LEFT_VISITED(FB) ((FB)->flags & LEFT_VISITED_FLG)
+#define IS_RIGHT_VISITED(FB) ((FB)->flags & RIGHT_VISITED_FLG)
+
+#define SET_LEFT_VISITED(FB) ((FB)->flags |= LEFT_VISITED_FLG)
+#define SET_RIGHT_VISITED(FB) ((FB)->flags |= RIGHT_VISITED_FLG)
+
+#define UNSET_LEFT_VISITED(FB) ((FB)->flags &= ~LEFT_VISITED_FLG)
+#define UNSET_RIGHT_VISITED(FB) ((FB)->flags &= ~RIGHT_VISITED_FLG)
+
+
+#if 0
+# define PRINT_TREE
+#else
+# undef PRINT_TREE
+#endif
+
+#ifdef PRINT_TREE
+static void print_tree(void);
+#endif
+
+/*
+ * Checks that the order between parent and children are correct,
+ * and that the Red-Black Tree properies are satisfied. if size > 0,
+ * check_tree() returns a node that satisfies "best fit" resp.
+ * "address order best fit".
+ *
+ * The Red-Black Tree properies are:
+ * 1. Every node is either red or black.
+ * 2. Every leaf (NIL) is black.
+ * 3. If a node is red, then both its children are black.
+ * 4. Every simple path from a node to a descendant leaf
+ * contains the same number of black nodes.
+ */
+
+static RBTree_t *
+check_tree(Uint size)
+{
+ RBTree_t *res = NULL;
+ Sint blacks;
+ Sint curr_blacks;
+ RBTree_t *x;
+
+#ifdef PRINT_TREE
+ print_tree();
+#endif
+
+ if (!root)
+ return res;
+
+ x = root;
+ ASSERT(IS_BLACK(x));
+ ASSERT(!x->parent);
+ curr_blacks = 1;
+ blacks = -1;
+
+ while (x) {
+ if (!IS_LEFT_VISITED(x)) {
+ SET_LEFT_VISITED(x);
+ if (x->left) {
+ x = x->left;
+ if (IS_BLACK(x))
+ curr_blacks++;
+ continue;
+ }
+ else {
+ if (blacks < 0)
+ blacks = curr_blacks;
+ ASSERT(blacks == curr_blacks);
+ }
+ }
+
+ if (!IS_RIGHT_VISITED(x)) {
+ SET_RIGHT_VISITED(x);
+ if (x->right) {
+ x = x->right;
+ if (IS_BLACK(x))
+ curr_blacks++;
+ continue;
+ }
+ else {
+ if (blacks < 0)
+ blacks = curr_blacks;
+ ASSERT(blacks == curr_blacks);
+ }
+ }
+
+
+ if (IS_RED(x)) {
+ ASSERT(IS_BLACK(x->right));
+ ASSERT(IS_BLACK(x->left));
+ }
+
+ ASSERT(x->parent || x == root);
+
+ if (x->left) {
+ ASSERT(x->left->parent == x);
+ ASSERT(BLK_SZ(x->left) < BLK_SZ(x)
+ || (BLK_SZ(x->left) == BLK_SZ(x) && x->left < x));
+ }
+
+ if (x->right) {
+ ASSERT(x->right->parent == x);
+ ASSERT(BLK_SZ(x->right) > BLK_SZ(x)
+ || (BLK_SZ(x->right) == BLK_SZ(x) && x->right > x));
+ }
+
+ if (size && BLK_SZ(x) >= size) {
+ if (!res
+ || BLK_SZ(x) < BLK_SZ(res)
+ || (BLK_SZ(x) == BLK_SZ(res) && x < res))
+ res = x;
+ }
+
+ UNSET_LEFT_VISITED(x);
+ UNSET_RIGHT_VISITED(x);
+ if (IS_BLACK(x))
+ curr_blacks--;
+ x = x->parent;
+
+ }
+
+ ASSERT(curr_blacks == 0);
+
+ UNSET_LEFT_VISITED(root);
+ UNSET_RIGHT_VISITED(root);
+
+ return res;
+
+}
+
+
+#ifdef PRINT_TREE
+#define INDENT_STEP 2
+
+#include <stdio.h>
+
+static void
+print_tree_aux(RBTree_t *x, int indent)
+{
+ int i;
+
+ if (!x) {
+ for (i = 0; i < indent; i++) {
+ putc(' ', stderr);
+ }
+ fprintf(stderr, "BLACK: nil\r\n");
+ }
+ else {
+ print_tree_aux(x->right, indent + INDENT_STEP);
+ for (i = 0; i < indent; i++) {
+ putc(' ', stderr);
+ }
+ fprintf(stderr, "%s: sz=%lu addr=0x%lx\r\n",
+ IS_BLACK(x) ? "BLACK" : "RED",
+ BLK_SZ(x),
+ (Uint) x);
+ print_tree_aux(x->left, indent + INDENT_STEP);
+ }
+}
+
+
+static void
+print_tree(void)
+{
+ fprintf(stderr, " --- Size-Adress tree begin ---\r\n");
+ print_tree_aux(root, 0);
+ fprintf(stderr, " --- Size-Adress tree end ---\r\n");
+}
+
+#endif
+
+#endif
+
+#endif /* ENABLE_ELIB_MALLOC */
diff --git a/erts/emulator/beam/elib_memmove.c b/erts/emulator/beam/elib_memmove.c
new file mode 100644
index 0000000000..d2fe8649ed
--- /dev/null
+++ b/erts/emulator/beam/elib_memmove.c
@@ -0,0 +1,113 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * This memmove assumes that both src and dst are aligned on an address
+ * divisible by 4 and that n is a multiple of four.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifndef HAVE_MEMMOVE
+
+#define MEMCPY_LIMIT 12
+typedef unsigned long u_long;
+typedef unsigned short u_short;
+
+static void copy_high(dst, src, n)
+char* dst; char* src; int n;
+{
+ dst += n;
+ src += n;
+
+ if (n >= MEMCPY_LIMIT) {
+ while(((u_long) dst) & 3) {
+ *--dst = *--src;
+ n--;
+ }
+ if ((((u_long) src) & 3) == 0) {
+ while(n >= sizeof(u_long)) {
+ src -= sizeof(u_long);
+ dst -= sizeof(u_long);
+ *((u_long*)dst) = *((u_long*)src);
+ n -= sizeof(u_long);
+ }
+ }
+ else if ((((u_short) src) & 3) == 2) {
+ while(n >= sizeof(u_short)) {
+ src -= sizeof(u_short);
+ dst -= sizeof(u_short);
+ *((u_short*)dst) = *((u_short*)src);
+ n -= sizeof(u_short);
+ }
+ }
+ }
+ while(n > 0) {
+ *--dst = *--src;
+ n--;
+ }
+}
+
+static void copy_low(dst, src, n)
+char* dst; char* src; int n;
+{
+ if (n >= MEMCPY_LIMIT) {
+ while(((u_long) dst) & 3) {
+ *dst++ = *src++;
+ n--;
+ }
+ if ((((u_long) src) & 3) == 0) {
+ while(n >= sizeof(u_long)) {
+ *((u_long*)dst) = *((u_long*)src);
+ src += sizeof(u_long);
+ dst += sizeof(u_long);
+ n -= sizeof(u_long);
+ }
+ }
+ else if ((((u_long) src) & 3) == 2) {
+ while(n >= sizeof(u_short)) {
+ *((u_short*)dst) = *((u_short*)src);
+ src += sizeof(u_short);
+ dst += sizeof(u_short);
+ n -= sizeof(u_short);
+ }
+ }
+ }
+ while(n > 0) {
+ *dst++ = *src++;
+ n--;
+ }
+}
+
+/*
+** Move memory (with overlap)
+*/
+void* memmove(dst, src, n)
+char* dst; char* src; int n;
+{
+ if (dst < src)
+ copy_low(dst, src, n);
+ else if (dst > src)
+ copy_high(dst, src, n);
+ return dst;
+}
+
+#endif /* HAVE_MEMMOVE */
diff --git a/erts/emulator/beam/elib_stat.h b/erts/emulator/beam/elib_stat.h
new file mode 100644
index 0000000000..d8c7f31737
--- /dev/null
+++ b/erts/emulator/beam/elib_stat.h
@@ -0,0 +1,45 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** Interface to elib statistics
+**
+*/
+#ifndef __ELIB_STAT_H__
+#define __ELIB_STAT_H__
+
+struct elib_stat {
+ int mem_total; /* Number of heap words */
+ int mem_blocks; /* Number of block */
+ int mem_alloc; /* Number of words in use */
+ int mem_free; /* Number of words free */
+ int min_used; /* Size of the smallest block used */
+ int max_free; /* Size of the largest free block */
+ int free_blocks; /* Number of fragments in free list */
+ int mem_max_alloc;/* Max number of words in use */
+};
+
+EXTERN_FUNCTION(void, elib_statistics, (void*));
+EXTERN_FUNCTION(int, elib_check_heap, (_VOID_));
+EXTERN_FUNCTION(void, elib_heap_dump, (char*));
+EXTERN_FUNCTION(void, elib_stat, (struct elib_stat*));
+EXTERN_FUNCTION(int, elib_heap_map, (unsigned char*, int));
+EXTERN_FUNCTION(int, elib_histo, (unsigned long*, unsigned long*, int, int));
+
+#endif
diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c
new file mode 100644
index 0000000000..e8b594bb47
--- /dev/null
+++ b/erts/emulator/beam/erl_afit_alloc.c
@@ -0,0 +1,256 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description: A fast allocator intended for temporary allocation.
+ * When allocating, only the first block in the free list
+ * is inspected, if this block doesn't fit a new carrier
+ * is created. NOTE: this allocator can behave really bad
+ * if misused.
+ *
+ * This module is a callback-module for erl_alloc_util.c
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "global.h"
+#define GET_ERL_AF_ALLOC_IMPL
+#include "erl_afit_alloc.h"
+
+
+#define MIN_MBC_SZ (16*1024)
+#define MIN_MBC_FIRST_FREE_SZ (4*1024)
+
+/* Prototypes of callback functions */
+static Block_t * get_free_block (Allctr_t *, Uint,
+ Block_t *, Uint);
+static void link_free_block (Allctr_t *, Block_t *);
+static void unlink_free_block (Allctr_t *, Block_t *);
+
+
+static Eterm info_options (Allctr_t *, char *, int *,
+ void *arg, Uint **, Uint *);
+static void init_atoms (void);
+
+static int atoms_initialized = 0;
+
+void
+erts_afalc_init(void)
+{
+ atoms_initialized = 0;
+}
+
+Allctr_t *
+erts_afalc_start(AFAllctr_t *afallctr,
+ AFAllctrInit_t *afinit,
+ AllctrInit_t *init)
+{
+ AFAllctr_t nulled_state = {{0}};
+ /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
+ warning. gcc warns if {0} is used as initializer of a struct when
+ the first member is a struct (not if, for example, the third member
+ is a struct). */
+ Allctr_t *allctr = (Allctr_t *) afallctr;
+
+ sys_memcpy((void *) afallctr, (void *) &nulled_state, sizeof(AFAllctr_t));
+
+ allctr->mbc_header_size = sizeof(Carrier_t);
+ allctr->min_mbc_size = MIN_MBC_SZ;
+ allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
+ allctr->min_block_size = sizeof(AFFreeBlock_t);
+ allctr->vsn_str = ERTS_ALC_AF_ALLOC_VSN_STR;
+
+ /* Callback functions */
+ allctr->get_free_block = get_free_block;
+ allctr->link_free_block = link_free_block;
+ allctr->unlink_free_block = unlink_free_block;
+ allctr->info_options = info_options;
+
+ allctr->get_next_mbc_size = NULL;
+ allctr->creating_mbc = NULL;
+ allctr->destroying_mbc = NULL;
+ allctr->init_atoms = init_atoms;
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ allctr->check_block = NULL;
+ allctr->check_mbc = NULL;
+#endif
+
+ allctr->atoms_initialized = 0;
+
+ if (!erts_alcu_start(allctr, init))
+ return NULL;
+
+ return allctr;
+}
+
+static Block_t *
+get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size)
+{
+ AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
+
+ ASSERT(!cand_blk || cand_size >= size);
+
+ if (afallctr->free_list && BLK_SZ(afallctr->free_list) >= size) {
+ AFFreeBlock_t *res = afallctr->free_list;
+ afallctr->free_list = res->next;
+ if (res->next)
+ res->next->prev = NULL;
+ return (Block_t *) res;
+ }
+ else
+ return NULL;
+}
+
+static void
+link_free_block(Allctr_t *allctr, Block_t *block)
+{
+ AFFreeBlock_t *blk = (AFFreeBlock_t *) block;
+ AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
+
+ if (afallctr->free_list && BLK_SZ(afallctr->free_list) > BLK_SZ(blk)) {
+ blk->next = afallctr->free_list->next;
+ blk->prev = afallctr->free_list;
+ afallctr->free_list->next = blk;
+ }
+ else {
+ blk->next = afallctr->free_list;
+ blk->prev = NULL;
+ afallctr->free_list = blk;
+ }
+
+ if (blk->next)
+ blk->next->prev = blk;
+}
+
+static void
+unlink_free_block(Allctr_t *allctr, Block_t *block)
+{
+ AFFreeBlock_t *blk = (AFFreeBlock_t *) block;
+ AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
+
+ if (blk->prev)
+ blk->prev->next = blk->next;
+ else
+ afallctr->free_list = blk->next;
+ if (blk->next)
+ blk->next->prev = blk->prev;
+}
+
+
+static struct {
+ Eterm as;
+ Eterm af;
+#ifdef DEBUG
+ Eterm end_of_atoms;
+#endif
+} am;
+
+static void ERTS_INLINE atom_init(Eterm *atom, char *name)
+{
+ *atom = am_atom_put(name, strlen(name));
+}
+#define AM_INIT(AM) atom_init(&am.AM, #AM)
+
+static void
+init_atoms(void)
+{
+#ifdef DEBUG
+ Eterm *atom;
+#endif
+
+ if (atoms_initialized)
+ return;
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
+ *atom = THE_NON_VALUE;
+ }
+#endif
+
+ AM_INIT(as);
+ AM_INIT(af);
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
+ ASSERT(*atom != THE_NON_VALUE);
+ }
+#endif
+
+ atoms_initialized = 1;
+}
+
+
+#define bld_uint erts_bld_uint
+#define bld_cons erts_bld_cons
+#define bld_tuple erts_bld_tuple
+
+static ERTS_INLINE void
+add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
+{
+ *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 2, el1, el2), *lp);
+}
+
+static Eterm
+info_options(Allctr_t *allctr,
+ char *prefix,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+
+ if (print_to_p) {
+ erts_print(*print_to_p, print_to_arg, "%sas: af\n", prefix);
+ }
+
+ if (hpp || szp) {
+
+ if (!atoms_initialized)
+ erl_exit(1, "%s:%d: Internal error: Atoms not initialized",
+ __FILE__, __LINE__);;
+
+ res = NIL;
+ add_2tup(hpp, szp, &res, am.as, am.af);
+ }
+
+ return res;
+}
+
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * NOTE: erts_afalc_test() is only supposed to be used for testing. *
+ * *
+ * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
+ * to erts_afalc_test() *
+\* */
+
+unsigned long
+erts_afalc_test(unsigned long op, unsigned long a1, unsigned long a2)
+{
+ switch (op) {
+ default: ASSERT(0); return ~((unsigned long) 0);
+ }
+}
diff --git a/erts/emulator/beam/erl_afit_alloc.h b/erts/emulator/beam/erl_afit_alloc.h
new file mode 100644
index 0000000000..ea408a7194
--- /dev/null
+++ b/erts/emulator/beam/erl_afit_alloc.h
@@ -0,0 +1,67 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+#ifndef ERL_AFIT_ALLOC__
+#define ERL_AFIT_ALLOC__
+
+#include "erl_alloc_util.h"
+
+#define ERTS_ALC_AF_ALLOC_VSN_STR "0.9"
+
+typedef struct AFAllctr_t_ AFAllctr_t;
+
+typedef struct {
+ int dummy;
+} AFAllctrInit_t;
+
+#define ERTS_DEFAULT_AF_ALLCTR_INIT { \
+ 0 /* dummy */\
+}
+
+void erts_afalc_init(void);
+Allctr_t *erts_afalc_start(AFAllctr_t *, AFAllctrInit_t *, AllctrInit_t *);
+
+#endif /* #ifndef ERL_BESTFIT_ALLOC__ */
+
+
+
+#if defined(GET_ERL_AF_ALLOC_IMPL) && !defined(ERL_AF_ALLOC_IMPL__)
+#define ERL_AF_ALLOC_IMPL__
+
+#define GET_ERL_ALLOC_UTIL_IMPL
+#include "erl_alloc_util.h"
+
+typedef struct AFFreeBlock_t_ AFFreeBlock_t;
+struct AFFreeBlock_t_ {
+ Block_t block_head;
+ AFFreeBlock_t *prev;
+ AFFreeBlock_t *next;
+};
+
+struct AFAllctr_t_ {
+ Allctr_t allctr; /* Has to be first! */
+
+ AFFreeBlock_t * free_list;
+};
+
+unsigned long erts_afalc_test(unsigned long, unsigned long, unsigned long);
+
+#endif /* #if defined(GET_ERL_AF_ALLOC_IMPL)
+ && !defined(ERL_AF_ALLOC_IMPL__) */
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
new file mode 100644
index 0000000000..b853ec0f01
--- /dev/null
+++ b/erts/emulator/beam/erl_alloc.c
@@ -0,0 +1,3157 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description: Management of memory allocators.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#define ERTS_ALLOC_C__
+#define ERTS_ALC_INTERNAL__
+#include "sys.h"
+#define ERL_THREADS_EMU_INTERNAL__
+#include "erl_threads.h"
+#include "global.h"
+#include "erl_db.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+#include "erl_instrument.h"
+#include "erl_mseg.h"
+#ifdef ELIB_ALLOC_IS_CLIB
+#include "erl_version.h"
+#endif
+#include "erl_monitors.h"
+#include "erl_bif_timer.h"
+#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
+#include "erl_check_io.h"
+#endif
+
+#define GET_ERL_GF_ALLOC_IMPL
+#include "erl_goodfit_alloc.h"
+#define GET_ERL_BF_ALLOC_IMPL
+#include "erl_bestfit_alloc.h"
+#define GET_ERL_AF_ALLOC_IMPL
+#include "erl_afit_alloc.h"
+
+#define ERTS_ALC_DEFAULT_MAX_THR_PREF 16
+
+#if defined(SMALL_MEMORY) || defined(PURIFY) || defined(VALGRIND)
+#define AU_ALLOC_DEFAULT_ENABLE(X) 0
+#else
+#define AU_ALLOC_DEFAULT_ENABLE(X) (X)
+#endif
+
+#ifdef DEBUG
+static Uint install_debug_functions(void);
+#endif
+extern void elib_ensure_initialized(void);
+
+ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
+ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
+ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1];
+
+#define ERTS_MIN(A, B) ((A) < (B) ? (A) : (B))
+#define ERTS_MAX(A, B) ((A) > (B) ? (A) : (B))
+
+typedef union {
+ GFAllctr_t gfa;
+ char align_gfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(GFAllctr_t))];
+ BFAllctr_t bfa;
+ char align_bfa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(BFAllctr_t))];
+ AFAllctr_t afa;
+ char align_afa[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(AFAllctr_t))];
+} ErtsAllocatorState_t;
+
+static ErtsAllocatorState_t sl_alloc_state;
+static ErtsAllocatorState_t std_alloc_state;
+static ErtsAllocatorState_t ll_alloc_state;
+static ErtsAllocatorState_t temp_alloc_state;
+static ErtsAllocatorState_t eheap_alloc_state;
+static ErtsAllocatorState_t binary_alloc_state;
+static ErtsAllocatorState_t ets_alloc_state;
+static ErtsAllocatorState_t driver_alloc_state;
+
+ErtsAlcType_t erts_fix_core_allocator_ix;
+#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
+static void *(*fix_core_allocator)(ErtsAlcType_t, void *, Uint);
+static void *fix_core_extra;
+static void *fix_core_alloc(Uint size)
+{
+ void *res;
+ res = (*fix_core_allocator)(ERTS_ALC_T_UNDEF, fix_core_extra, size);
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_alloc(res,
+ ERTS_ALC_A_FIXED_SIZE,
+ erts_fix_core_allocator_ix,
+ size);
+ return res;
+}
+#endif
+
+enum allctr_type {
+ GOODFIT,
+ BESTFIT,
+ AFIT
+};
+
+struct au_init {
+ int enable;
+ int thr_spec;
+ enum allctr_type atype;
+ struct {
+ AllctrInit_t util;
+ GFAllctrInit_t gf;
+ BFAllctrInit_t bf;
+ AFAllctrInit_t af;
+ } init;
+ struct {
+ int mmbcs;
+ int lmbcs;
+ int smbcs;
+ int mmmbc;
+ } default_;
+};
+
+#define DEFAULT_ALLCTR_INIT { \
+ ERTS_DEFAULT_ALLCTR_INIT, \
+ ERTS_DEFAULT_GF_ALLCTR_INIT, \
+ ERTS_DEFAULT_BF_ALLCTR_INIT, \
+ ERTS_DEFAULT_AF_ALLCTR_INIT \
+}
+
+typedef struct {
+ int erts_alloc_config;
+#if HAVE_ERTS_MSEG
+ ErtsMsegInit_t mseg;
+#endif
+ int trim_threshold;
+ int top_pad;
+ AlcUInit_t alloc_util;
+ struct {
+ int stat;
+ int map;
+ char *mtrace;
+ char *nodename;
+ } instr;
+ struct au_init sl_alloc;
+ struct au_init std_alloc;
+ struct au_init ll_alloc;
+ struct au_init temp_alloc;
+ struct au_init eheap_alloc;
+ struct au_init binary_alloc;
+ struct au_init ets_alloc;
+ struct au_init driver_alloc;
+} erts_alc_hndl_args_init_t;
+
+#define ERTS_AU_INIT__ {0, 0, GOODFIT, DEFAULT_ALLCTR_INIT, {1,1,1,1}}
+
+#define SET_DEFAULT_ALLOC_OPTS(IP) \
+do { \
+ struct au_init aui__ = ERTS_AU_INIT__; \
+ sys_memcpy((void *) (IP), (void *) &aui__, sizeof(struct au_init)); \
+} while (0)
+
+static void
+set_default_sl_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 1;
+ ip->atype = GOODFIT;
+ ip->init.util.name_prefix = "sl_";
+ ip->init.util.mmmbc = 5;
+ ip->init.util.alloc_no = ERTS_ALC_A_SHORT_LIVED;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 32*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_SHORT_LIVED;
+ ip->init.util.rsbcst = 80;
+}
+
+static void
+set_default_std_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 1;
+ ip->atype = BESTFIT;
+ ip->init.util.name_prefix = "std_";
+ ip->init.util.mmmbc = 5;
+ ip->init.util.alloc_no = ERTS_ALC_A_STANDARD;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 32*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_STANDARD;
+}
+
+static void
+set_default_ll_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 0;
+ ip->atype = BESTFIT;
+ ip->init.bf.ao = 1;
+ ip->init.util.ramv = 0;
+ ip->init.util.mmsbc = 0;
+ ip->init.util.mmmbc = 0;
+ ip->init.util.sbct = ~((Uint) 0);
+ ip->init.util.name_prefix = "ll_";
+ ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED;
+ ip->init.util.asbcst = 0;
+ ip->init.util.rsbcst = 0;
+ ip->init.util.rsbcmt = 0;
+ ip->init.util.rmbcmt = 0;
+}
+
+static void
+set_default_temp_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 1;
+ ip->atype = AFIT;
+ ip->init.util.name_prefix = "temp_";
+ ip->init.util.alloc_no = ERTS_ALC_A_TEMPORARY;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 32*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_TEMPORARY;
+ ip->init.util.rsbcst = 90;
+ ip->init.util.rmbcmt = 100;
+}
+
+static void
+set_default_eheap_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 1;
+ ip->atype = GOODFIT;
+ ip->init.util.mmmbc = 100;
+ ip->init.util.name_prefix = "eheap_";
+ ip->init.util.alloc_no = ERTS_ALC_A_EHEAP;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 512*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 256*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_EHEAP;
+ ip->init.util.rsbcst = 50;
+}
+
+static void
+set_default_binary_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 1;
+ ip->atype = BESTFIT;
+ ip->init.util.mmmbc = 50;
+ ip->init.util.name_prefix = "binary_";
+ ip->init.util.alloc_no = ERTS_ALC_A_BINARY;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 32*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_BINARY;
+}
+
+static void
+set_default_ets_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 1;
+ ip->atype = BESTFIT;
+ ip->init.util.mmmbc = 100;
+ ip->init.util.name_prefix = "ets_";
+ ip->init.util.alloc_no = ERTS_ALC_A_ETS;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 32*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_ETS;
+}
+
+static void
+set_default_driver_alloc_opts(struct au_init *ip)
+{
+ SET_DEFAULT_ALLOC_OPTS(ip);
+ ip->enable = AU_ALLOC_DEFAULT_ENABLE(1);
+ ip->thr_spec = 1;
+ ip->atype = BESTFIT;
+ ip->init.util.name_prefix = "driver_";
+ ip->init.util.alloc_no = ERTS_ALC_A_DRIVER;
+#ifndef SMALL_MEMORY
+ ip->init.util.mmbcs = 128*1024; /* Main carrier size */
+#else
+ ip->init.util.mmbcs = 32*1024; /* Main carrier size */
+#endif
+ ip->init.util.ts = ERTS_ALC_MTA_DRIVER;
+}
+
+#ifdef ERTS_SMP
+
+static void
+adjust_tpref(struct au_init *ip, int no_sched)
+{
+ if (ip->thr_spec) {
+ Uint allocs;
+ if (ip->thr_spec < 0) {/* User specified amount */
+ allocs = abs(ip->thr_spec);
+ if (allocs > no_sched)
+ allocs = no_sched;
+ }
+ else if (no_sched > ERTS_ALC_DEFAULT_MAX_THR_PREF)
+ allocs = ERTS_ALC_DEFAULT_MAX_THR_PREF;
+ else
+ allocs = no_sched;
+ if (allocs <= 1)
+ ip->thr_spec = 0;
+ else {
+ ip->thr_spec = (int) allocs;
+ ip->thr_spec *= -1; /* thread preferred */
+
+ /* If default ... */
+
+ /* ... shrink main multi-block carrier size */
+ if (ip->default_.mmbcs)
+ ip->init.util.mmbcs /= ERTS_MIN(4, allocs);
+ /* ... shrink largest multi-block carrier size */
+ if (ip->default_.lmbcs)
+ ip->init.util.lmbcs /= ERTS_MIN(2, allocs);
+ /* ... shrink smallest multi-block carrier size */
+ if (ip->default_.smbcs)
+ ip->init.util.smbcs /= ERTS_MIN(4, allocs);
+ /* ... and more than three allocators shrink
+ max mseg multi-block carriers */
+ if (ip->default_.mmmbc && allocs > 2) {
+ ip->init.util.mmmbc /= ERTS_MIN(4, allocs - 1);
+ if (ip->init.util.mmmbc < 3)
+ ip->init.util.mmmbc = 3;
+ }
+ }
+ }
+}
+
+#endif
+
+static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
+
+static void
+set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init);
+
+static void
+start_au_allocator(ErtsAlcType_t alctr_n,
+ struct au_init *init,
+ ErtsAllocatorState_t *state);
+
+static void
+refuse_af_strategy(struct au_init *init)
+{
+ if (init->atype == AFIT)
+ init->atype = GOODFIT;
+}
+
+static void init_thr_ix(int static_ixs);
+
+void
+erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
+{
+ Uint extra_block_size = 0;
+ int i;
+ erts_alc_hndl_args_init_t init = {
+ 0,
+#if HAVE_ERTS_MSEG
+ ERTS_MSEG_INIT_DEFAULT_INITIALIZER,
+#endif
+ ERTS_DEFAULT_TRIM_THRESHOLD,
+ ERTS_DEFAULT_TOP_PAD,
+ ERTS_DEFAULT_ALCU_INIT
+ };
+
+ erts_sys_alloc_init();
+ init_thr_ix(erts_no_schedulers);
+ erts_init_utils_mem();
+
+ set_default_sl_alloc_opts(&init.sl_alloc);
+ set_default_std_alloc_opts(&init.std_alloc);
+ set_default_ll_alloc_opts(&init.ll_alloc);
+ set_default_temp_alloc_opts(&init.temp_alloc);
+ set_default_eheap_alloc_opts(&init.eheap_alloc);
+ set_default_binary_alloc_opts(&init.binary_alloc);
+ set_default_ets_alloc_opts(&init.ets_alloc);
+ set_default_driver_alloc_opts(&init.driver_alloc);
+
+ if (argc && argv)
+ handle_args(argc, argv, &init);
+
+ if (erts_no_schedulers <= 1) {
+ init.sl_alloc.thr_spec = 0;
+ init.std_alloc.thr_spec = 0;
+ init.ll_alloc.thr_spec = 0;
+ init.eheap_alloc.thr_spec = 0;
+ init.binary_alloc.thr_spec = 0;
+ init.ets_alloc.thr_spec = 0;
+ init.driver_alloc.thr_spec = 0;
+ }
+
+ if (init.erts_alloc_config) {
+ /* Adjust flags that erts_alloc_config won't like */
+ init.temp_alloc.thr_spec = 0;
+ init.sl_alloc.thr_spec = 0;
+ init.std_alloc.thr_spec = 0;
+ init.ll_alloc.thr_spec = 0;
+ init.eheap_alloc.thr_spec = 0;
+ init.binary_alloc.thr_spec = 0;
+ init.ets_alloc.thr_spec = 0;
+ init.driver_alloc.thr_spec = 0;
+ }
+
+#ifdef ERTS_SMP
+ /* Only temp_alloc can use thread specific interface */
+ if (init.temp_alloc.thr_spec)
+ init.temp_alloc.thr_spec = erts_no_schedulers;
+
+ /* Others must use thread preferred interface */
+ adjust_tpref(&init.sl_alloc, erts_no_schedulers);
+ adjust_tpref(&init.std_alloc, erts_no_schedulers);
+ adjust_tpref(&init.ll_alloc, erts_no_schedulers);
+ adjust_tpref(&init.eheap_alloc, erts_no_schedulers);
+ adjust_tpref(&init.binary_alloc, erts_no_schedulers);
+ adjust_tpref(&init.ets_alloc, erts_no_schedulers);
+ adjust_tpref(&init.driver_alloc, erts_no_schedulers);
+
+#else
+ /* No thread specific if not smp */
+ init.temp_alloc.thr_spec = 0;
+#endif
+
+ /*
+ * The following allocators cannot be run with afit strategy.
+ * Make sure they don't...
+ */
+ refuse_af_strategy(&init.sl_alloc);
+ refuse_af_strategy(&init.std_alloc);
+ refuse_af_strategy(&init.ll_alloc);
+ refuse_af_strategy(&init.eheap_alloc);
+ refuse_af_strategy(&init.binary_alloc);
+ refuse_af_strategy(&init.ets_alloc);
+ refuse_af_strategy(&init.driver_alloc);
+
+#ifdef ERTS_SMP
+ if (!init.temp_alloc.thr_spec)
+ refuse_af_strategy(&init.temp_alloc);
+#endif
+
+ erts_mtrace_pre_init();
+#if HAVE_ERTS_MSEG
+ erts_mseg_init(&init.mseg);
+#endif
+ erts_alcu_init(&init.alloc_util);
+ erts_afalc_init();
+ erts_bfalc_init();
+ erts_gfalc_init();
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ erts_allctrs[i].alloc = NULL;
+ erts_allctrs[i].realloc = NULL;
+ erts_allctrs[i].free = NULL;
+ erts_allctrs[i].extra = NULL;
+ erts_allctrs_info[i].alloc_util = 0;
+ erts_allctrs_info[i].enabled = 0;
+ erts_allctrs_info[i].thr_spec = 0;
+ erts_allctrs_info[i].extra = NULL;
+ }
+
+#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
+#if !defined(PURIFY) && !defined(VALGRIND)
+ erts_allctrs[ERTS_ALC_A_FIXED_SIZE].alloc = erts_fix_alloc;
+ erts_allctrs[ERTS_ALC_A_FIXED_SIZE].realloc = erts_fix_realloc;
+ erts_allctrs[ERTS_ALC_A_FIXED_SIZE].free = erts_fix_free;
+ erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled = 1;
+#else
+ erts_allctrs[ERTS_ALC_A_FIXED_SIZE].alloc = erts_sys_alloc;
+ erts_allctrs[ERTS_ALC_A_FIXED_SIZE].realloc = erts_sys_realloc;
+ erts_allctrs[ERTS_ALC_A_FIXED_SIZE].free = erts_sys_free;
+ erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled = 0;
+#endif
+#endif
+
+ erts_allctrs[ERTS_ALC_A_SYSTEM].alloc = erts_sys_alloc;
+ erts_allctrs[ERTS_ALC_A_SYSTEM].realloc = erts_sys_realloc;
+ erts_allctrs[ERTS_ALC_A_SYSTEM].free = erts_sys_free;
+ erts_allctrs_info[ERTS_ALC_A_SYSTEM].enabled = 1;
+
+ set_au_allocator(ERTS_ALC_A_TEMPORARY, &init.temp_alloc);
+ set_au_allocator(ERTS_ALC_A_SHORT_LIVED, &init.sl_alloc);
+ set_au_allocator(ERTS_ALC_A_STANDARD, &init.std_alloc);
+ set_au_allocator(ERTS_ALC_A_LONG_LIVED, &init.ll_alloc);
+ set_au_allocator(ERTS_ALC_A_EHEAP, &init.eheap_alloc);
+ set_au_allocator(ERTS_ALC_A_BINARY, &init.binary_alloc);
+ set_au_allocator(ERTS_ALC_A_ETS, &init.ets_alloc);
+ set_au_allocator(ERTS_ALC_A_DRIVER, &init.driver_alloc);
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ if (!erts_allctrs[i].alloc)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Missing alloc function for %s\n", ERTS_ALC_A2AD(i));
+ if (!erts_allctrs[i].realloc)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Missing realloc function for %s\n", ERTS_ALC_A2AD(i));
+ if (!erts_allctrs[i].free)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Missing free function for %s\n", ERTS_ALC_A2AD(i));
+ }
+
+ sys_alloc_opt(SYS_ALLOC_OPT_TRIM_THRESHOLD, init.trim_threshold);
+ sys_alloc_opt(SYS_ALLOC_OPT_TOP_PAD, init.top_pad);
+
+ if (erts_allctrs_info[ERTS_FIX_CORE_ALLOCATOR].enabled)
+ erts_fix_core_allocator_ix = ERTS_FIX_CORE_ALLOCATOR;
+ else
+ erts_fix_core_allocator_ix = ERTS_ALC_A_SYSTEM;
+
+ erts_mtrace_init(init.instr.mtrace, init.instr.nodename);
+
+ start_au_allocator(ERTS_ALC_A_TEMPORARY,
+ &init.temp_alloc,
+ &temp_alloc_state);
+
+ start_au_allocator(ERTS_ALC_A_SHORT_LIVED,
+ &init.sl_alloc,
+ &sl_alloc_state);
+
+ start_au_allocator(ERTS_ALC_A_STANDARD,
+ &init.std_alloc,
+ &std_alloc_state);
+
+ start_au_allocator(ERTS_ALC_A_LONG_LIVED,
+ &init.ll_alloc,
+ &ll_alloc_state);
+
+ start_au_allocator(ERTS_ALC_A_EHEAP,
+ &init.eheap_alloc,
+ &eheap_alloc_state);
+
+ start_au_allocator(ERTS_ALC_A_BINARY,
+ &init.binary_alloc,
+ &binary_alloc_state);
+
+ start_au_allocator(ERTS_ALC_A_ETS,
+ &init.ets_alloc,
+ &ets_alloc_state);
+
+ start_au_allocator(ERTS_ALC_A_DRIVER,
+ &init.driver_alloc,
+ &driver_alloc_state);
+
+ fix_core_allocator = erts_allctrs[erts_fix_core_allocator_ix].alloc;
+ fix_core_extra = erts_allctrs[erts_fix_core_allocator_ix].extra;
+
+ erts_mtrace_install_wrapper_functions();
+ extra_block_size += erts_instr_init(init.instr.stat, init.instr.map);
+
+#ifdef DEBUG
+ extra_block_size += install_debug_functions();
+#endif
+
+#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
+
+ erts_init_fix_alloc(extra_block_size, fix_core_alloc);
+
+
+#if !defined(PURIFY) && !defined(VALGRIND)
+ erts_set_fix_size(ERTS_ALC_T_PROC, sizeof(Process));
+ erts_set_fix_size(ERTS_ALC_T_DB_TABLE, sizeof(DbTable));
+ erts_set_fix_size(ERTS_ALC_T_ATOM, sizeof(Atom));
+ erts_set_fix_size(ERTS_ALC_T_EXPORT, sizeof(Export));
+ erts_set_fix_size(ERTS_ALC_T_MODULE, sizeof(Module));
+ erts_set_fix_size(ERTS_ALC_T_REG_PROC, sizeof(RegProc));
+ erts_set_fix_size(ERTS_ALC_T_MONITOR_SH, ERTS_MONITOR_SH_SIZE*sizeof(Uint));
+ erts_set_fix_size(ERTS_ALC_T_NLINK_SH, ERTS_LINK_SH_SIZE*sizeof(Uint));
+ erts_set_fix_size(ERTS_ALC_T_FUN_ENTRY, sizeof(ErlFunEntry));
+#ifdef ERTS_ALC_T_DRV_EV_D_STATE
+ erts_set_fix_size(ERTS_ALC_T_DRV_EV_D_STATE,
+ sizeof(ErtsDrvEventDataState));
+#endif
+#ifdef ERTS_ALC_T_DRV_SEL_D_STATE
+ erts_set_fix_size(ERTS_ALC_T_DRV_SEL_D_STATE,
+ sizeof(ErtsDrvSelectDataState));
+#endif
+#endif
+#endif
+
+}
+
+static void
+set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init)
+{
+ ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
+ ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
+ ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
+
+ if (!init->enable) {
+ af->alloc = erts_sys_alloc;
+ af->realloc = erts_sys_realloc;
+ af->free = erts_sys_free;
+ af->extra = NULL;
+ ai->alloc_util = 0;
+ ai->enabled = 0;
+ ai->extra = NULL;
+ return;
+ }
+
+ tspec->enabled = 0;
+ tspec->all_thr_safe = 0;
+ ai->thr_spec = 0;
+#ifdef USE_THREADS
+ if (init->thr_spec) {
+ if (init->thr_spec > 0) {
+ af->alloc = erts_alcu_alloc_thr_spec;
+ if (init->init.util.ramv)
+ af->realloc = erts_alcu_realloc_mv_thr_spec;
+ else
+ af->realloc = erts_alcu_realloc_thr_spec;
+ af->free = erts_alcu_free_thr_spec;
+ }
+ else {
+ af->alloc = erts_alcu_alloc_thr_pref;
+ if (init->init.util.ramv)
+ af->realloc = erts_alcu_realloc_mv_thr_pref;
+ else
+ af->realloc = erts_alcu_realloc_thr_pref;
+ af->free = erts_alcu_free_thr_pref;
+ tspec->all_thr_safe = 1;
+ }
+
+ tspec->enabled = 1;
+ tspec->size = abs(init->thr_spec) + 1;
+
+ ai->thr_spec = tspec->size;
+ }
+ else if (init->init.util.ts) {
+ af->alloc = erts_alcu_alloc_ts;
+ if (init->init.util.ramv)
+ af->realloc = erts_alcu_realloc_mv_ts;
+ else
+ af->realloc = erts_alcu_realloc_ts;
+ af->free = erts_alcu_free_ts;
+ }
+ else
+#endif
+ {
+ af->alloc = erts_alcu_alloc;
+ if (init->init.util.ramv)
+ af->realloc = erts_alcu_realloc_mv;
+ else
+ af->realloc = erts_alcu_realloc;
+ af->free = erts_alcu_free;
+ }
+ af->extra = NULL;
+ ai->alloc_util = 1;
+ ai->enabled = 1;
+}
+
+static void
+start_au_allocator(ErtsAlcType_t alctr_n,
+ struct au_init *init,
+ ErtsAllocatorState_t *state)
+{
+ int i;
+ int size = 1;
+ void *as0;
+ enum allctr_type atype;
+ ErtsAllocatorFunctions_t *af = &erts_allctrs[alctr_n];
+ ErtsAllocatorInfo_t *ai = &erts_allctrs_info[alctr_n];
+ ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[alctr_n];
+
+ if (!init->enable)
+ return;
+
+ if (init->thr_spec) {
+ void *states = erts_sys_alloc(0,
+ NULL,
+ ((sizeof(Allctr_t *)
+ * (tspec->size + 1))
+ + (sizeof(ErtsAllocatorState_t)
+ * tspec->size)
+ + ERTS_CACHE_LINE_SIZE - 1));
+ if (!states)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Failed to allocate allocator states for %salloc\n",
+ init->init.util.name_prefix);
+ tspec->allctr = (Allctr_t **) states;
+ states = ((char *) states) + sizeof(Allctr_t *) * (tspec->size + 1);
+ states = ((((Uint) states) & ERTS_CACHE_LINE_MASK)
+ ? (void *) ((((Uint) states) & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE)
+ : (void *) states);
+ tspec->allctr[0] = init->thr_spec > 0 ? (Allctr_t *) state : (Allctr_t *) NULL;
+ size = tspec->size;
+ for (i = 1; i < size; i++)
+ tspec->allctr[i] = (Allctr_t *)
+ &((ErtsAllocatorState_t *) states)[i-1];
+ }
+
+ for (i = 0; i < size; i++) {
+ void *as;
+ atype = init->atype;
+
+ if (!init->thr_spec)
+ as0 = state;
+ else {
+ as0 = (void *) tspec->allctr[i];
+ if (!as0)
+ continue;
+ if (i == 0) {
+ if (atype == AFIT)
+ atype = GOODFIT;
+ init->init.util.ts = 1;
+ }
+ else {
+ if (init->thr_spec < 0) {
+ init->init.util.ts = 1;
+ init->init.util.tspec = 0;
+ init->init.util.tpref = -1*init->thr_spec;
+ }
+ else {
+ init->init.util.ts = 0;
+ init->init.util.tspec = init->thr_spec + 1;
+ init->init.util.tpref = 0;
+ }
+ }
+ }
+
+ switch (atype) {
+ case GOODFIT:
+ as = (void *) erts_gfalc_start((GFAllctr_t *) as0,
+ &init->init.gf,
+ &init->init.util);
+ break;
+ case BESTFIT:
+ as = (void *) erts_bfalc_start((BFAllctr_t *) as0,
+ &init->init.bf,
+ &init->init.util);
+ break;
+ case AFIT:
+ as = (void *) erts_afalc_start((AFAllctr_t *) as0,
+ &init->init.af,
+ &init->init.util);
+ break;
+ default:
+ as = NULL;
+ ASSERT(0);
+ }
+
+ if (!as)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Failed to start %salloc\n", init->init.util.name_prefix);
+
+ ASSERT(as == (void *) as0);
+ af->extra = as;
+ }
+
+ if (init->thr_spec) {
+ af->extra = tspec;
+ init->init.util.ts = 1;
+ }
+
+ ai->extra = af->extra;
+}
+
+
+static void bad_param(char *param_start, char *param_end)
+{
+ size_t len = param_end - param_start;
+ char param[100];
+ if (len > 99)
+ len = 99;
+ sys_memcpy((void *) param, (void *) param_start, len);
+ param[len] = '\0';
+ erts_fprintf(stderr, "bad \"%s\" parameter\n", param);
+ erts_usage();
+}
+
+static void bad_value(char *param_start, char *param_end, char *value)
+{
+ size_t len = param_end - param_start;
+ char param[100];
+ if (len > 99)
+ len = 99;
+ sys_memcpy((void *) param, (void *) param_start, len);
+ param[len] = '\0';
+ erts_fprintf(stderr, "bad \"%s\" value: %s\n", param, value);
+ erts_usage();
+}
+
+/* Get arg marks argument as handled by
+ putting NULL in argv */
+static char *
+get_value(char* rest, char** argv, int* ip)
+{
+ char *param = argv[*ip]+1;
+ argv[*ip] = NULL;
+ if (*rest == '\0') {
+ char *next = argv[*ip + 1];
+ if (next[0] == '-'
+ && next[1] == '-'
+ && next[2] == '\0') {
+ bad_value(param, rest, "");
+ }
+ (*ip)++;
+ argv[*ip] = NULL;
+ return next;
+ }
+ return rest;
+}
+
+static ERTS_INLINE int
+has_prefix(const char *prefix, const char *string)
+{
+ int i;
+ for (i = 0; prefix[i]; i++)
+ if (prefix[i] != string[i])
+ return 0;
+ return 1;
+}
+
+static int
+get_bool_value(char *param_end, char** argv, int* ip)
+{
+ char *param = argv[*ip]+1;
+ char *value = get_value(param_end, argv, ip);
+ if (strcmp(value, "true") == 0)
+ return 1;
+ else if (strcmp(value, "false") == 0)
+ return 0;
+ else
+ bad_value(param, param_end, value);
+ return -1;
+}
+
+static Uint
+get_kb_value(char *param_end, char** argv, int* ip)
+{
+ Sint tmp;
+ Uint max = ((~((Uint) 0))/1024) + 1;
+ char *rest;
+ char *param = argv[*ip]+1;
+ char *value = get_value(param_end, argv, ip);
+ errno = 0;
+ tmp = (Sint) strtol(value, &rest, 10);
+ if (errno != 0 || rest == value || tmp < 0 || max < ((Uint) tmp))
+ bad_value(param, param_end, value);
+ if (max == (Uint) tmp)
+ return ~((Uint) 0);
+ else
+ return ((Uint) tmp)*1024;
+}
+
+static Uint
+get_amount_value(char *param_end, char** argv, int* ip)
+{
+ Sint tmp;
+ char *rest;
+ char *param = argv[*ip]+1;
+ char *value = get_value(param_end, argv, ip);
+ errno = 0;
+ tmp = (Sint) strtol(value, &rest, 10);
+ if (errno != 0 || rest == value || tmp < 0)
+ bad_value(param, param_end, value);
+ return (Uint) tmp;
+}
+
+static int
+get_bool_or_possitive_amount_value(int *bool, Uint *amount,
+ char *param_end, char** argv, int* ip)
+{
+ char *param = argv[*ip]+1;
+ char *value = get_value(param_end, argv, ip);
+ if (strcmp(value, "true") == 0) {
+ *bool = 1;
+ return 1;
+ }
+ else if (strcmp(value, "false") == 0) {
+ *bool = 0;
+ return 1;
+ }
+ else {
+ Sint tmp;
+ char *rest;
+ errno = 0;
+ tmp = (Sint) strtol(value, &rest, 10);
+ if (errno != 0 || rest == value || tmp <= 0) {
+ bad_value(param, param_end, value);
+ return -1;
+ }
+ *amount = (Uint) tmp;
+ return 0;
+ }
+}
+
+static void
+handle_au_arg(struct au_init *auip,
+ char* sub_param,
+ char** argv,
+ int* ip)
+{
+ char *param = argv[*ip]+1;
+
+ switch (sub_param[0]) {
+ case 'a':
+ if(has_prefix("asbcst", sub_param)) {
+ auip->init.util.asbcst = get_kb_value(sub_param + 6, argv, ip);
+ }
+ else if(has_prefix("as", sub_param)) {
+ char *alg = get_value(sub_param + 2, argv, ip);
+ if (strcmp("bf", alg) == 0) {
+ auip->atype = BESTFIT;
+ auip->init.bf.ao = 0;
+ }
+ else if (strcmp("aobf", alg) == 0) {
+ auip->atype = BESTFIT;
+ auip->init.bf.ao = 1;
+ }
+ else if (strcmp("gf", alg) == 0) {
+ auip->atype = GOODFIT;
+ }
+ else if (strcmp("af", alg) == 0) {
+ auip->atype = AFIT;
+ }
+ else {
+ bad_value(param, sub_param + 1, alg);
+ }
+ }
+ else
+ goto bad_switch;
+ break;
+ case 'e':
+ auip->enable = get_bool_value(sub_param+1, argv, ip);
+ break;
+ case 'l':
+ if (has_prefix("lmbcs", sub_param)) {
+ auip->default_.lmbcs = 0;
+ auip->init.util.lmbcs = get_kb_value(sub_param + 5, argv, ip);
+ }
+ else
+ goto bad_switch;
+ break;
+ case 'm':
+ if (has_prefix("mbcgs", sub_param)) {
+ auip->init.util.mbcgs = get_amount_value(sub_param + 5, argv, ip);
+
+ }
+ else if (has_prefix("mbsd", sub_param)) {
+ auip->init.gf.mbsd = get_amount_value(sub_param + 4, argv, ip);
+ if (auip->init.gf.mbsd < 1)
+ auip->init.gf.mbsd = 1;
+ }
+ else if (has_prefix("mmbcs", sub_param)) {
+ auip->default_.mmbcs = 0;
+ auip->init.util.mmbcs = get_kb_value(sub_param + 5, argv, ip);
+ }
+ else if (has_prefix("mmmbc", sub_param)) {
+ auip->default_.mmmbc = 0;
+ auip->init.util.mmmbc = get_amount_value(sub_param + 5, argv, ip);
+ }
+ else if (has_prefix("mmsbc", sub_param)) {
+ auip->init.util.mmsbc = get_amount_value(sub_param + 5, argv, ip);
+ }
+ else
+ goto bad_switch;
+ break;
+ case 'r':
+ if(has_prefix("rsbcmt", sub_param)) {
+ auip->init.util.rsbcmt = get_amount_value(sub_param + 6, argv, ip);
+ if (auip->init.util.rsbcmt > 100)
+ auip->init.util.rsbcmt = 100;
+ }
+ else if(has_prefix("rsbcst", sub_param)) {
+ auip->init.util.rsbcst = get_amount_value(sub_param + 6, argv, ip);
+ if (auip->init.util.rsbcst > 100)
+ auip->init.util.rsbcst = 100;
+ }
+ else if (has_prefix("rmbcmt", sub_param)) {
+ auip->init.util.rmbcmt = get_amount_value(sub_param + 6, argv, ip);
+ if (auip->init.util.rmbcmt > 100)
+ auip->init.util.rmbcmt = 100;
+ }
+ else if (has_prefix("ramv", sub_param)) {
+ auip->init.util.ramv = get_bool_value(sub_param + 4, argv, ip);
+ }
+ else
+ goto bad_switch;
+ break;
+ case 's':
+ if(has_prefix("sbct", sub_param)) {
+ auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
+ }
+ else if (has_prefix("smbcs", sub_param)) {
+ auip->default_.smbcs = 0;
+ auip->init.util.smbcs = get_kb_value(sub_param + 5, argv, ip);
+ }
+ else
+ goto bad_switch;
+ break;
+ case 't': {
+ Uint no;
+ int enable;
+ int res = get_bool_or_possitive_amount_value(&enable,
+ &no,
+ sub_param+1,
+ argv,
+ ip);
+ if (res > 0)
+ auip->thr_spec = enable ? 1 : 0;
+ else if (res == 0) {
+ int allocs = (int) no;
+ if (allocs < 0)
+ allocs = INT_MIN;
+ else {
+ allocs *= -1;
+ }
+ auip->thr_spec = allocs;
+ }
+ break;
+ }
+ default:
+ bad_switch:
+ bad_param(param, sub_param);
+ }
+}
+
+static void
+handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
+{
+ struct au_init *aui[] = {
+ &init->binary_alloc,
+ &init->std_alloc,
+ &init->ets_alloc,
+ &init->eheap_alloc,
+ &init->ll_alloc,
+ &init->driver_alloc,
+ &init->sl_alloc,
+ &init->temp_alloc
+ };
+ int aui_sz = (int) sizeof(aui)/sizeof(aui[0]);
+ char *arg;
+ char *rest;
+ int i, j;
+
+ i = 1;
+
+ ASSERT(argc && argv && init);
+
+ while (i < *argc) {
+ if(argv[i][0] == '-') {
+ char *param = argv[i]+1;
+ switch (argv[i][1]) {
+ case 'M':
+ switch (argv[i][2]) {
+ case 'B':
+ handle_au_arg(&init->binary_alloc, &argv[i][3], argv, &i);
+ break;
+ case 'D':
+ handle_au_arg(&init->std_alloc, &argv[i][3], argv, &i);
+ break;
+ case 'E':
+ handle_au_arg(&init->ets_alloc, &argv[i][3], argv, &i);
+ break;
+ case 'F': /* fix_alloc */
+ if (has_prefix("e", param+2)) {
+ arg = get_value(param+3, argv, &i);
+ if (strcmp("true", arg) != 0)
+ bad_value(param, param+3, arg);
+ }
+ else
+ bad_param(param, param+2);
+ break;
+ case 'H':
+ handle_au_arg(&init->eheap_alloc, &argv[i][3], argv, &i);
+ break;
+ case 'L':
+ handle_au_arg(&init->ll_alloc, &argv[i][3], argv, &i);
+ break;
+ case 'M':
+ if (has_prefix("amcbf", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.amcbf =
+#endif
+ get_kb_value(argv[i]+8, argv, &i);
+ }
+ else if (has_prefix("rmcbf", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.rmcbf =
+#endif
+ get_amount_value(argv[i]+8, argv, &i);
+ }
+ else if (has_prefix("mcs", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.mcs =
+#endif
+ get_amount_value(argv[i]+6, argv, &i);
+ }
+ else if (has_prefix("cci", argv[i]+3)) {
+#if HAVE_ERTS_MSEG
+ init->mseg.cci =
+#endif
+ get_amount_value(argv[i]+6, argv, &i);
+ }
+ else {
+ bad_param(param, param+2);
+ }
+ break;
+ case 'R':
+ handle_au_arg(&init->driver_alloc, &argv[i][3], argv, &i);
+ break;
+ case 'S':
+ handle_au_arg(&init->sl_alloc, &argv[i][3], argv, &i);
+ break;
+ case 'T':
+ handle_au_arg(&init->temp_alloc, &argv[i][3], argv, &i);
+ break;
+ case 'Y': { /* sys_alloc */
+ if (has_prefix("tt", param+2)) {
+ /* set trim threshold */
+ arg = get_value(param+4, argv, &i);
+ errno = 0;
+ init->trim_threshold = (int) strtol(arg, &rest, 10);
+ if (errno != 0
+ || rest == arg
+ || init->trim_threshold < 0
+ || (INT_MAX/1024) < init->trim_threshold) {
+ bad_value(param, param+4, arg);
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using trim threshold: %d\n",
+ init->trim_threshold));
+ init->trim_threshold *= 1024;
+ }
+ else if (has_prefix("tp", param+2)) {
+ /* set top pad */
+ arg = get_value(param+4, argv, &i);
+ errno = 0;
+ init->top_pad = (int) strtol(arg, &rest, 10);
+ if (errno != 0
+ || rest == arg
+ || init->top_pad < 0
+ || (INT_MAX/1024) < init->top_pad) {
+ bad_value(param, param+4, arg);
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using top pad: %d\n",init->top_pad));
+ init->top_pad *= 1024;
+ }
+ else if (has_prefix("m", param+2)) {
+ /* Has been handled by erlexec */
+ (void) get_value(param+3, argv, &i);
+ }
+ else if (has_prefix("e", param+2)) {
+ arg = get_value(param+3, argv, &i);
+ if (strcmp("true", arg) != 0)
+ bad_value(param, param+3, arg);
+ }
+ else
+ bad_param(param, param+2);
+ break;
+ }
+ case 'e':
+ switch (argv[i][3]) {
+ case 'a': {
+ int a;
+ arg = get_value(argv[i]+4, argv, &i);
+ if (strcmp("min", arg) == 0) {
+ for (a = 0; a < aui_sz; a++)
+ aui[a]->enable = 0;
+ }
+ else if (strcmp("max", arg) == 0) {
+ for (a = 0; a < aui_sz; a++)
+ aui[a]->enable = 1;
+ }
+ else if (strcmp("config", arg) == 0) {
+ init->erts_alloc_config = 1;
+ }
+ else if (strcmp("r9c", arg) == 0
+ || strcmp("r10b", arg) == 0
+ || strcmp("r11b", arg) == 0) {
+ set_default_sl_alloc_opts(&init->sl_alloc);
+ set_default_std_alloc_opts(&init->std_alloc);
+ set_default_ll_alloc_opts(&init->ll_alloc);
+ set_default_temp_alloc_opts(&init->temp_alloc);
+ set_default_eheap_alloc_opts(&init->eheap_alloc);
+ set_default_binary_alloc_opts(&init->binary_alloc);
+ set_default_ets_alloc_opts(&init->ets_alloc);
+ set_default_driver_alloc_opts(&init->driver_alloc);
+
+ init->driver_alloc.enable = 0;
+ if (strcmp("r9c", arg) == 0) {
+ init->sl_alloc.enable = 0;
+ init->std_alloc.enable = 0;
+ init->binary_alloc.enable = 0;
+ init->ets_alloc.enable = 0;
+ }
+
+ for (a = 0; a < aui_sz; a++) {
+ aui[a]->thr_spec = 0;
+ aui[a]->init.util.ramv = 0;
+ aui[a]->init.util.mmmbc = 10;
+ aui[a]->init.util.lmbcs = 5*1024*1024;
+ }
+ }
+ else {
+ bad_param(param, param+3);
+ }
+ break;
+ }
+ default:
+ bad_param(param, param+1);
+ }
+ break;
+ case 'i':
+ switch (argv[i][3]) {
+ case 's':
+ arg = get_value(argv[i]+4, argv, &i);
+ if (strcmp("true", arg) == 0)
+ init->instr.stat = 1;
+ else if (strcmp("false", arg) == 0)
+ init->instr.stat = 0;
+ else
+ bad_value(param, param+3, arg);
+ break;
+ case 'm':
+ arg = get_value(argv[i]+4, argv, &i);
+ if (strcmp("true", arg) == 0)
+ init->instr.map = 1;
+ else if (strcmp("false", arg) == 0)
+ init->instr.map = 0;
+ else
+ bad_value(param, param+3, arg);
+ break;
+ case 't':
+ init->instr.mtrace = get_value(argv[i]+4, argv, &i);
+ break;
+ default:
+ bad_param(param, param+2);
+ }
+ break;
+ case 'u':
+ if (has_prefix("ycs", argv[i]+3)) {
+ init->alloc_util.ycs
+ = get_kb_value(argv[i]+6, argv, &i);
+ }
+ else if (has_prefix("mmc", argv[i]+3)) {
+ init->alloc_util.mmc
+ = get_amount_value(argv[i]+6, argv, &i);
+ }
+ else {
+ int a;
+ int start = i;
+ char *param = argv[i];
+ char *val = i+1 < *argc ? argv[i+1] : NULL;
+
+ for (a = 0; a < aui_sz; a++) {
+ if (a > 0) {
+ ASSERT(i == start || i == start+1);
+ argv[start] = param;
+ if (i != start)
+ argv[start + 1] = val;
+ i = start;
+ }
+ handle_au_arg(aui[a], &argv[i][3], argv, &i);
+ }
+ }
+ break;
+ default:
+ bad_param(param, param+1);
+ }
+ break;
+ case '-':
+ if (argv[i][2] == '\0') {
+ /* End of system flags reached */
+ if (init->instr.mtrace
+ /* || init->instr.stat
+ || init->instr.map */) {
+ while (i < *argc) {
+ if(strcmp(argv[i], "-sname") == 0
+ || strcmp(argv[i], "-name") == 0) {
+ if (i + 1 <*argc) {
+ init->instr.nodename = argv[i+1];
+ break;
+ }
+ }
+ i++;
+ }
+ }
+ goto args_parsed;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ i++;
+ }
+
+ args_parsed:
+ /* Handled arguments have been marked with NULL. Slide arguments
+ not handled towards the beginning of argv. */
+ for (i = 0, j = 0; i < *argc; i++) {
+ if (argv[i])
+ argv[j++] = argv[i];
+ }
+ *argc = j;
+
+}
+
+static char *type_no_str(ErtsAlcType_t n)
+{
+
+#if ERTS_ALC_N_MIN != 0
+ if (n < ERTS_ALC_N_MIN)
+ return NULL;
+#endif
+ if (n > ERTS_ALC_N_MAX)
+ return NULL;
+ return (char *) ERTS_ALC_N2TD(n);
+}
+
+#define type_str(T) type_no_str(ERTS_ALC_T2N((T)))
+
+erts_tsd_key_t thr_ix_key;
+erts_spinlock_t alloc_thr_ix_lock;
+int last_thr_ix;
+int first_dyn_thr_ix;
+
+static void
+init_thr_ix(int static_ixs)
+{
+ erts_tsd_key_create(&thr_ix_key);
+ erts_spinlock_init(&alloc_thr_ix_lock, "alloc_thr_ix_lock");
+ last_thr_ix = -4711;
+ first_dyn_thr_ix = static_ixs+1;
+}
+
+int
+erts_alc_get_thr_ix(void)
+{
+ int ix = (int)(long) erts_tsd_get(thr_ix_key);
+ if (ix == 0) {
+ erts_spin_lock(&alloc_thr_ix_lock);
+ last_thr_ix++;
+ if (last_thr_ix < 0)
+ last_thr_ix = first_dyn_thr_ix;
+ ix = last_thr_ix;
+ erts_spin_unlock(&alloc_thr_ix_lock);
+ erts_tsd_set(thr_ix_key, (void *)(long) ix);
+ }
+ ASSERT(ix > 0);
+ return ix;
+}
+
+void erts_alloc_reg_scheduler_id(Uint id)
+{
+ int ix = (int) id;
+ ASSERT(0 < ix && ix <= first_dyn_thr_ix);
+ ASSERT(0 == (int) (long) erts_tsd_get(thr_ix_key));
+ erts_tsd_set(thr_ix_key, (void *)(long) ix);
+}
+
+__decl_noreturn void
+erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
+{
+ char buf[10];
+ char *t_str;
+ char *allctr_str;
+
+ ASSERT(n >= ERTS_ALC_N_MIN);
+ ASSERT(n <= ERTS_ALC_N_MAX);
+
+
+ if (n < ERTS_ALC_N_MIN || ERTS_ALC_N_MAX < n)
+ allctr_str = "UNKNOWN";
+ else {
+ ErtsAlcType_t a = ERTS_ALC_T2A(ERTS_ALC_N2T(n));
+ if (erts_allctrs_info[a].enabled)
+ allctr_str = (char *) ERTS_ALC_A2AD(a);
+ else
+ allctr_str = (char *) ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
+ }
+
+ t_str = type_no_str(n);
+ if (!t_str) {
+ sprintf(buf, "%d", (int) n);
+ t_str = buf;
+ }
+
+ switch (error) {
+ case ERTS_ALC_E_NOTSUP: {
+ char *op_str;
+ switch (func) {
+ case ERTS_ALC_O_ALLOC: op_str = "alloc"; break;
+ case ERTS_ALC_O_REALLOC: op_str = "realloc"; break;
+ case ERTS_ALC_O_FREE: op_str = "free"; break;
+ default: op_str = "UNKNOWN"; break;
+ }
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s: %s operation not supported (memory type: \"%s\")\n",
+ allctr_str, op_str, t_str);
+ break;
+ }
+ case ERTS_ALC_E_NOMEM: {
+ Uint size;
+ va_list argp;
+ char *op = func == ERTS_ALC_O_REALLOC ? "reallocate" : "allocate";
+
+
+ va_start(argp, n);
+ size = va_arg(argp, Uint);
+ va_end(argp);
+ erl_exit(1,
+ "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
+ allctr_str, op, size, t_str);
+ break;
+ }
+ case ERTS_ALC_E_NOALLCTR:
+ erl_exit(ERTS_ABORT_EXIT,
+ "erts_alloc: Unknown allocator type: %d\n",
+ ERTS_ALC_T2A(ERTS_ALC_N2T(n)));
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "erts_alloc: Unknown error: %d\n", error);
+ break;
+ }
+}
+
+__decl_noreturn void
+erts_alloc_enomem(ErtsAlcType_t type, Uint size)
+{
+ erts_alloc_n_enomem(ERTS_ALC_T2N(type), size);
+}
+
+__decl_noreturn void
+erts_alloc_n_enomem(ErtsAlcType_t n, Uint size)
+{
+ erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_ALLOC, n, size);
+}
+
+__decl_noreturn void
+erts_realloc_enomem(ErtsAlcType_t type, void *ptr, Uint size)
+{
+ erts_realloc_n_enomem(ERTS_ALC_T2N(type), ptr, size);
+}
+
+__decl_noreturn void
+erts_realloc_n_enomem(ErtsAlcType_t n, void *ptr, Uint size)
+{
+ erts_alc_fatal_error(ERTS_ALC_E_NOMEM, ERTS_ALC_O_REALLOC, n, size);
+}
+
+static ERTS_INLINE Uint
+alcu_size(ErtsAlcType_t ai)
+{
+ Uint res = 0;
+
+ ASSERT(erts_allctrs_info[ai].enabled);
+ ASSERT(erts_allctrs_info[ai].alloc_util);
+
+ if (!erts_allctrs_info[ai].thr_spec) {
+ Allctr_t *allctr = erts_allctrs_info[ai].extra;
+ AllctrSize_t asize;
+ erts_alcu_current_size(allctr, &asize);
+ res += asize.blocks;
+ }
+ else {
+ ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[ai];
+ int i;
+
+ ASSERT(tspec->all_thr_safe);
+
+ ASSERT(tspec->enabled);
+
+ for (i = tspec->size - 1; i >= 0; i--) {
+ Allctr_t *allctr = tspec->allctr[i];
+ AllctrSize_t asize;
+ if (allctr) {
+ erts_alcu_current_size(allctr, &asize);
+ res += asize.blocks;
+ }
+ }
+ }
+
+ return res;
+}
+
+Eterm
+erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
+{
+#define ERTS_MEM_NEED_ALL_ALCU (!erts_instr_stat && want_tot_or_sys)
+ ErtsFixInfo efi;
+ struct {
+ int total;
+ int processes;
+ int processes_used;
+ int system;
+ int atom;
+ int atom_used;
+ int binary;
+ int code;
+ int ets;
+ int maximum;
+ } want = {0};
+ struct {
+ Uint total;
+ Uint processes;
+ Uint processes_used;
+ Uint system;
+ Uint atom;
+ Uint atom_used;
+ Uint binary;
+ Uint code;
+ Uint ets;
+ Uint maximum;
+ } size = {0};
+ Eterm atoms[sizeof(size)/sizeof(Uint)];
+ Uint *uintps[sizeof(size)/sizeof(Uint)];
+ Eterm euints[sizeof(size)/sizeof(Uint)];
+ int need_atom;
+ int want_tot_or_sys;
+ int length;
+ Eterm res = THE_NON_VALUE;
+ ErtsAlcType_t ai;
+ int only_one_value = 0;
+
+ /* Figure out whats wanted... */
+
+ length = 0;
+ if (is_non_value(earg)) { /* i.e. wants all */
+ want.total = 1;
+ atoms[length] = am_total;
+ uintps[length++] = &size.total;
+
+ want.processes = 1;
+ atoms[length] = am_processes;
+ uintps[length++] = &size.processes;
+
+ want.processes_used = 1;
+ atoms[length] = am_processes_used;
+ uintps[length++] = &size.processes_used;
+
+ want.system = 1;
+ atoms[length] = am_system;
+ uintps[length++] = &size.system;
+
+ want.atom = 1;
+ atoms[length] = am_atom;
+ uintps[length++] = &size.atom;
+
+ want.atom_used = 1;
+ atoms[length] = am_atom_used;
+ uintps[length++] = &size.atom_used;
+
+ want.binary = 1;
+ atoms[length] = am_binary;
+ uintps[length++] = &size.binary;
+
+ want.code = 1;
+ atoms[length] = am_code;
+ uintps[length++] = &size.code;
+
+ want.ets = 1;
+ atoms[length] = am_ets;
+ uintps[length++] = &size.ets;
+
+ want.maximum = erts_instr_stat;
+ if (want.maximum) {
+ atoms[length] = am_maximum;
+ uintps[length++] = &size.maximum;
+ }
+
+ }
+ else {
+ Eterm tmp_heap[2];
+ Eterm wanted_list;
+
+ if (is_nil(earg))
+ return NIL;
+
+ if (is_not_atom(earg))
+ wanted_list = earg;
+ else {
+ wanted_list = CONS(&tmp_heap[0], earg, NIL);
+ only_one_value = 1;
+ }
+
+ while (is_list(wanted_list)) {
+ switch (CAR(list_val(wanted_list))) {
+ case am_total:
+ if (!want.total) {
+ want.total = 1;
+ atoms[length] = am_total;
+ uintps[length++] = &size.total;
+ }
+ break;
+ case am_processes:
+ if (!want.processes) {
+ want.processes = 1;
+ atoms[length] = am_processes;
+ uintps[length++] = &size.processes;
+ }
+ break;
+ case am_processes_used:
+ if (!want.processes_used) {
+ want.processes_used = 1;
+ atoms[length] = am_processes_used;
+ uintps[length++] = &size.processes_used;
+ }
+ break;
+ case am_system:
+ if (!want.system) {
+ want.system = 1;
+ atoms[length] = am_system;
+ uintps[length++] = &size.system;
+ }
+ break;
+ case am_atom:
+ if (!want.atom) {
+ want.atom = 1;
+ atoms[length] = am_atom;
+ uintps[length++] = &size.atom;
+ }
+ break;
+ case am_atom_used:
+ if (!want.atom_used) {
+ want.atom_used = 1;
+ atoms[length] = am_atom_used;
+ uintps[length++] = &size.atom_used;
+ }
+ break;
+ case am_binary:
+ if (!want.binary) {
+ want.binary = 1;
+ atoms[length] = am_binary;
+ uintps[length++] = &size.binary;
+ }
+ break;
+ case am_code:
+ if (!want.code) {
+ want.code = 1;
+ atoms[length] = am_code;
+ uintps[length++] = &size.code;
+ }
+ break;
+ case am_ets:
+ if (!want.ets) {
+ want.ets = 1;
+ atoms[length] = am_ets;
+ uintps[length++] = &size.ets;
+ }
+ break;
+ case am_maximum:
+ if (erts_instr_stat) {
+ if (!want.maximum) {
+ want.maximum = 1;
+ atoms[length] = am_maximum;
+ uintps[length++] = &size.maximum;
+ }
+ }
+ else
+ return am_badarg;
+ break;
+ default:
+ return am_badarg;
+ }
+ wanted_list = CDR(list_val(wanted_list));
+ }
+ if (is_not_nil(wanted_list))
+ return am_badarg;
+ }
+
+ /* All alloc_util allocators *have* to be enabled */
+
+ for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
+ switch (ai) {
+ case ERTS_ALC_A_SYSTEM:
+ case ERTS_ALC_A_FIXED_SIZE:
+ break;
+ default:
+ if (!erts_allctrs_info[ai].enabled
+ || !erts_allctrs_info[ai].alloc_util) {
+ return am_notsup;
+ }
+ break;
+ }
+ }
+
+ ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
+ ASSERT(length <= sizeof(euints)/sizeof(Eterm));
+ ASSERT(length <= sizeof(uintps)/sizeof(Uint));
+
+
+ if (proc) {
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
+ == erts_proc_lc_my_proc_locks(proc));
+ /* We'll need locks early in the lock order */
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ }
+
+ /* Calculate values needed... */
+
+ want_tot_or_sys = want.total || want.system;
+ need_atom = ERTS_MEM_NEED_ALL_ALCU || want.atom;
+
+ if (ERTS_MEM_NEED_ALL_ALCU) {
+ size.total = 0;
+
+ for (ai = ERTS_ALC_A_MIN; ai <= ERTS_ALC_A_MAX; ai++) {
+ if (erts_allctrs_info[ai].alloc_util) {
+ Uint *save;
+ Uint asz;
+ switch (ai) {
+ case ERTS_ALC_A_TEMPORARY:
+ /*
+ * Often not thread safe and usually never
+ * contain any allocated memory.
+ */
+ continue;
+ case ERTS_ALC_A_EHEAP:
+ save = &size.processes;
+ break;
+ case ERTS_ALC_A_ETS:
+ save = &size.ets;
+ break;
+ case ERTS_ALC_A_BINARY:
+ save = &size.binary;
+ break;
+ default:
+ save = NULL;
+ break;
+ }
+ asz = alcu_size(ai);
+ if (save)
+ *save = asz;
+ size.total += asz;
+ }
+ }
+ }
+
+
+
+ if (want_tot_or_sys || want.processes || want.processes_used) {
+ Uint tmp;
+
+ if (ERTS_MEM_NEED_ALL_ALCU)
+ tmp = size.processes;
+ else
+ tmp = alcu_size(ERTS_ALC_A_EHEAP);
+ tmp += erts_max_processes*sizeof(Process*);
+#ifdef HYBRID
+ tmp += erts_max_processes*sizeof(Process*);
+#endif
+ tmp += erts_bif_timer_memory_size();
+ tmp += erts_tot_link_lh_size();
+
+ size.processes = size.processes_used = tmp;
+
+ erts_fix_info(ERTS_ALC_T_NLINK_SH, &efi);
+ size.processes += efi.total;
+ size.processes_used += efi.used;
+
+ erts_fix_info(ERTS_ALC_T_MONITOR_SH, &efi);
+ size.processes += efi.total;
+ size.processes_used += efi.used;
+
+ erts_fix_info(ERTS_ALC_T_PROC, &efi);
+ size.processes += efi.total;
+ size.processes_used += efi.used;
+
+ erts_fix_info(ERTS_ALC_T_REG_PROC, &efi);
+ size.processes += efi.total;
+ size.processes_used += efi.used;
+
+ }
+
+ if (want.atom || want.atom_used) {
+ Uint reserved_atom_space, atom_space;
+ erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
+ size.atom = size.atom_used = atom_table_sz();
+ erts_fix_info(ERTS_ALC_T_ATOM, &efi);
+
+ if (want.atom) {
+ size.atom += reserved_atom_space;
+ size.atom += efi.total;
+ }
+
+ if (want.atom_used) {
+ size.atom_used += atom_space;
+ size.atom_used += efi.used;
+ }
+ }
+
+ if (!ERTS_MEM_NEED_ALL_ALCU && want.binary)
+ size.binary = alcu_size(ERTS_ALC_A_BINARY);
+
+ if (want.code) {
+ size.code = module_table_sz();
+ erts_fix_info(ERTS_ALC_T_MODULE, &efi);
+ size.code += efi.used;
+ size.code += export_table_sz();
+ erts_fix_info(ERTS_ALC_T_EXPORT, &efi);
+ size.code += efi.used;
+ size.code += erts_fun_table_sz();
+ erts_fix_info(ERTS_ALC_T_FUN_ENTRY, &efi);
+ size.code += efi.used;
+ size.code += allocated_modules*sizeof(Range);
+ size.code += erts_total_code_size;
+ }
+
+ if (want.ets) {
+ if (!ERTS_MEM_NEED_ALL_ALCU)
+ size.ets = alcu_size(ERTS_ALC_A_ETS);
+ size.ets += erts_get_ets_misc_mem_size();
+ }
+
+ if (erts_instr_stat && (want_tot_or_sys || want.maximum)) {
+ if (want_tot_or_sys) {
+ size.total = erts_instr_get_total();
+ size.system = size.total - size.processes;
+ }
+ size.maximum = erts_instr_get_max_total();
+ }
+ else if (want_tot_or_sys) {
+ size.system = size.total - size.processes;
+ }
+
+ if (print_to_p) {
+ int i;
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+
+ /* Print result... */
+ erts_print(to, arg, "=memory\n");
+ for (i = 0; i < length; i++)
+ erts_print(to, arg, "%T: %bpu\n", atoms[i], *uintps[i]);
+ }
+
+ if (proc) {
+ /* Build erlang term result... */
+ Uint *hp;
+ Uint hsz;
+
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+
+ if (only_one_value) {
+ ASSERT(length == 1);
+ hsz = 0;
+ erts_bld_uint(NULL, &hsz, *uintps[0]);
+ hp = hsz ? HAlloc((Process *) proc, hsz) : NULL;
+ res = erts_bld_uint(&hp, NULL, *uintps[0]);
+ }
+ else {
+ Uint **hpp = NULL;
+ Uint *hszp = &hsz;
+ hsz = 0;
+
+ while (1) {
+ int i;
+ for (i = 0; i < length; i++)
+ euints[i] = erts_bld_uint(hpp, hszp, *uintps[i]);
+ res = erts_bld_2tup_list(hpp, hszp, length, atoms, euints);
+ if (hpp)
+ break;
+ hp = HAlloc((Process *) proc, hsz);
+ hpp = &hp;
+ hszp = NULL;
+ }
+ }
+ }
+
+ return res;
+
+#undef ERTS_MEM_NEED_ALL_ALCU
+}
+
+struct aa_values {
+ Uint arity;
+ const char *name;
+ Uint ui[2];
+};
+
+Eterm
+erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
+{
+#define MAX_AA_VALUES \
+ (20 + (ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE + 1))
+
+ struct aa_values values[MAX_AA_VALUES];
+ Eterm res = THE_NON_VALUE;
+ int i, length;
+ ErtsFixInfo efi;
+ Uint reserved_atom_space, atom_space;
+
+ if (proc) {
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
+ == erts_proc_lc_my_proc_locks(proc));
+
+ /* We'll need locks early in the lock order */
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ }
+
+ i = 0;
+
+ if (erts_instr_stat) {
+ values[i].arity = 2;
+ values[i].name = "total";
+ values[i].ui[0] = erts_instr_get_total();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "maximum";
+ values[i].ui[0] = erts_instr_get_max_total();
+ i++;
+ }
+
+ values[i].arity = 2;
+ values[i].name = "sys_misc";
+ values[i].ui[0] = erts_sys_misc_mem_sz();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "static";
+ values[i].ui[0] =
+ erts_max_ports*sizeof(Port) /* Port table */
+ + erts_timer_wheel_memory_size() /* Timer wheel */
+#ifdef SYS_TMP_BUF_SIZE
+ + SYS_TMP_BUF_SIZE /* tmp_buf in sys on vxworks & ose */
+#endif
+ ;
+ i++;
+
+ erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
+
+ values[i].arity = 3;
+ values[i].name = "atom_space";
+ values[i].ui[0] = reserved_atom_space;
+ values[i].ui[1] = atom_space;
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "atom_table";
+ values[i].ui[0] = atom_table_sz();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "module_table";
+ values[i].ui[0] = module_table_sz();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "export_table";
+ values[i].ui[0] = export_table_sz();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "register_table";
+ values[i].ui[0] = process_reg_sz();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "fun_table";
+ values[i].ui[0] = erts_fun_table_sz();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "module_refs";
+ values[i].ui[0] = allocated_modules*sizeof(Range);
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "loaded_code";
+ values[i].ui[0] = erts_total_code_size;
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "dist_table";
+ values[i].ui[0] = erts_dist_table_size();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "node_table";
+ values[i].ui[0] = erts_node_table_size();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "bits_bufs_size";
+ values[i].ui[0] = erts_bits_bufs_size();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "bif_timer";
+ values[i].ui[0] = erts_bif_timer_memory_size();
+ i++;
+
+ values[i].arity = 2;
+ values[i].name = "link_lh";
+ values[i].ui[0] = erts_tot_link_lh_size();
+ i++;
+
+ {
+ Uint n;
+
+ for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
+ n++) {
+ erts_fix_info(ERTS_ALC_N2T(n), &efi);
+
+ values[i].arity = 3;
+ values[i].name = ERTS_ALC_N2TD(n);
+ values[i].ui[0] = efi.total;
+ values[i].ui[1] = efi.used;
+ i++;
+ }
+
+ }
+
+ length = i;
+ ASSERT(length <= MAX_AA_VALUES);
+
+ if (print_to_p) {
+ /* Print result... */
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+
+ erts_print(to, arg, "=allocated_areas\n");
+ for (i = 0; i < length; i++) {
+ switch (values[i].arity) {
+ case 2:
+ erts_print(to, arg, "%s: %bpu\n",
+ values[i].name, values[i].ui[0]);
+ break;
+ case 3:
+ erts_print(to, arg, "%s: %bpu %bpu\n",
+ values[i].name, values[i].ui[0], values[i].ui[1]);
+ break;
+ default:
+ erts_print(to, arg, "ERROR: internal_error\n");
+ ASSERT(0);
+ return am_internal_error;
+ }
+ }
+ }
+
+ if (proc) {
+ /* Build erlang term result... */
+ Eterm tuples[MAX_AA_VALUES];
+ Uint *hp;
+ Uint **hpp;
+ Uint hsz;
+ Uint *hszp;
+
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+
+ hpp = NULL;
+ hsz = 0;
+ hszp = &hsz;
+
+ while (1) {
+ int i;
+ for (i = 0; i < length; i++) {
+ Eterm atom;
+ if (hpp)
+ atom = am_atom_put(values[i].name,
+ (int) strlen(values[i].name));
+ else
+ atom = am_true;
+
+ switch (values[i].arity) {
+ case 2:
+ tuples[i] = erts_bld_tuple(hpp, hszp, 2,
+ atom,
+ erts_bld_uint(hpp, hszp,
+ values[i].ui[0]));
+ break;
+ case 3:
+ tuples[i] = erts_bld_tuple(hpp, hszp, 3,
+ atom,
+ erts_bld_uint(hpp, hszp,
+ values[i].ui[0]),
+ erts_bld_uint(hpp, hszp,
+ values[i].ui[1]));
+ break;
+ default:
+ ASSERT(0);
+ return am_internal_error;
+ }
+ }
+ res = erts_bld_list(hpp, hszp, length, tuples);
+ if (hpp)
+ break;
+ hp = HAlloc((Process *) proc, hsz);
+ hpp = &hp;
+ hszp = NULL;
+ }
+ }
+
+ return res;
+#undef MAX_AA_VALUES
+}
+
+Eterm
+erts_alloc_util_allocators(void *proc)
+{
+ Eterm res;
+ Uint *hp;
+ Uint sz;
+ int i;
+ /*
+ * Currently all allocators except sys_alloc and fix_alloc are
+ * alloc_util allocators.
+ */
+ sz = ((ERTS_ALC_A_MAX + 1 - ERTS_ALC_A_MIN) - 2)*2;
+ ASSERT(sz > 0);
+ hp = HAlloc((Process *) proc, sz);
+ res = NIL;
+ for (i = ERTS_ALC_A_MAX; i >= ERTS_ALC_A_MIN; i--) {
+ switch (i) {
+ case ERTS_ALC_A_SYSTEM:
+ case ERTS_ALC_A_FIXED_SIZE:
+ break;
+ default: {
+ char *alc_str = (char *) ERTS_ALC_A2AD(i);
+ Eterm alc = am_atom_put(alc_str, sys_strlen(alc_str));
+ res = CONS(hp, alc, res);
+ hp += 2;
+ break;
+ }
+ }
+ }
+ return res;
+}
+
+Eterm
+erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz)
+{
+#define ERTS_AIT_RET(R) \
+ do { res = (R); goto done; } while (0)
+#define ERTS_AIT_HALLOC(P, S) \
+ do { hp = HAlloc((P), (S)); hp_end = hp + (S); } while (0)
+
+ ErtsAlcType_t i;
+ Uint sz = 0;
+ Uint *hp = NULL;
+ Uint *hp_end = NULL;
+ Eterm res = am_undefined;
+
+ if (is_not_atom(which_alloc))
+ goto done;
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ if (erts_is_atom_str((char *) ERTS_ALC_A2AD(i), which_alloc)) {
+ if (!erts_allctrs_info[i].enabled)
+ ERTS_AIT_RET(am_false);
+ else {
+ if (erts_allctrs_info[i].alloc_util) {
+ Eterm ires, tmp;
+ Eterm **hpp;
+ Uint *szp;
+ Eterm (*info_func)(Allctr_t *,
+ int,
+ int *,
+ void *,
+ Uint **,
+ Uint *);
+
+ info_func = (only_sz
+ ? erts_alcu_sz_info
+ : erts_alcu_info);
+
+ if (erts_allctrs_info[i].thr_spec) {
+ ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[i];
+ int j;
+ int block_system = !tspec->all_thr_safe;
+
+ if (block_system) {
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ }
+ ASSERT(tspec->enabled);
+
+ szp = &sz;
+ hpp = NULL;
+
+ while (1) {
+ ires = NIL;
+ for (j = tspec->size - 1; j >= 0; j--) {
+ Allctr_t *allctr = tspec->allctr[j];
+ if (allctr) {
+ tmp = erts_bld_tuple(hpp,
+ szp,
+ 3,
+ erts_bld_atom(hpp,
+ szp,
+ "instance"),
+ make_small((Uint) j),
+ (*info_func)(allctr,
+ hpp != NULL,
+ NULL,
+ NULL,
+ hpp,
+ szp));
+ ires = erts_bld_cons(hpp, szp, tmp, ires);
+ }
+ }
+ if (hpp)
+ break;
+ ERTS_AIT_HALLOC((Process *) proc, sz);
+ hpp = &hp;
+ szp = NULL;
+ }
+
+ if (block_system) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ }
+ }
+ else {
+ Allctr_t *allctr = erts_allctrs_info[i].extra;
+ szp = &sz;
+ hpp = NULL;
+ while (1) {
+ ires = NIL;
+ tmp = erts_bld_tuple(hpp,
+ szp,
+ 3,
+ erts_bld_atom(hpp,
+ szp,
+ "instance"),
+ make_small((Uint) 0),
+ (*info_func)(allctr,
+ hpp != NULL,
+ NULL,
+ NULL,
+ hpp,
+ szp));
+ ires = erts_bld_cons(hpp, szp, tmp, ires);
+ if (hpp)
+ break;
+ ERTS_AIT_HALLOC((Process *) proc, sz);
+ hpp = &hp;
+ szp = NULL;
+ }
+ }
+ ERTS_AIT_RET(ires);
+ }
+ else {
+ Eterm *szp, **hpp;
+
+ switch (i) {
+ case ERTS_ALC_A_SYSTEM: {
+ SysAllocStat sas;
+ Eterm opts_am;
+ Eterm opts;
+ Eterm as[4];
+ Eterm ts[4];
+ int l;
+
+ if (only_sz)
+ ERTS_AIT_RET(NIL);
+
+ sys_alloc_stat(&sas);
+ opts_am = am_atom_put("options", 7);
+
+ szp = &sz;
+ hpp = NULL;
+
+ restart_sys_alloc:
+ l = 0;
+ as[l] = am_atom_put("e", 1);
+ ts[l++] = am_true;
+#ifdef ELIB_ALLOC_IS_CLIB
+ as[l] = am_atom_put("m", 1);
+ ts[l++] = am_atom_put("elib", 4);
+#else
+ as[l] = am_atom_put("m", 1);
+ ts[l++] = am_atom_put("libc", 4);
+#endif
+ if(sas.trim_threshold >= 0) {
+ as[l] = am_atom_put("tt", 2);
+ ts[l++] = erts_bld_uint(hpp, szp,
+ (Uint) sas.trim_threshold);
+ }
+ if(sas.top_pad >= 0) {
+ as[l] = am_atom_put("tp", 2);
+ ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
+ }
+
+ opts = erts_bld_2tup_list(hpp, szp, l, as, ts);
+ res = erts_bld_2tup_list(hpp, szp, 1, &opts_am, &opts);
+
+ if (szp) {
+ ERTS_AIT_HALLOC((Process *) proc, sz);
+ szp = NULL;
+ hpp = &hp;
+ goto restart_sys_alloc;
+ }
+ ERTS_AIT_RET(res);
+ }
+ case ERTS_ALC_A_FIXED_SIZE: {
+ ErtsAlcType_t n;
+ Eterm as[2], vs[2];
+
+ if (only_sz)
+ ERTS_AIT_RET(NIL);
+
+ as[0] = am_atom_put("options", 7);
+ as[1] = am_atom_put("pools", 5);
+
+ szp = &sz;
+ hpp = NULL;
+
+ restart_fix_alloc:
+
+ vs[0] = erts_bld_cons(hpp, szp,
+ erts_bld_tuple(hpp, szp, 2,
+ am_atom_put("e",
+ 1),
+ am_true),
+ NIL);
+
+ vs[1] = NIL;
+ for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
+ n++) {
+ ErtsFixInfo efi;
+ erts_fix_info(ERTS_ALC_N2T(n), &efi);
+
+ vs[1] = erts_bld_cons(
+ hpp, szp,
+ erts_bld_tuple(
+ hpp, szp, 3,
+ am_atom_put((char *) ERTS_ALC_N2TD(n),
+ strlen(ERTS_ALC_N2TD(n))),
+ erts_bld_uint(hpp, szp, efi.total),
+ erts_bld_uint(hpp, szp, efi.used)),
+ vs[1]);
+
+ }
+
+ res = erts_bld_2tup_list(hpp, szp, 2, as, vs);
+ if (szp) {
+ ERTS_AIT_HALLOC((Process *) proc, sz);
+ szp = NULL;
+ hpp = &hp;
+ goto restart_fix_alloc;
+ }
+ ERTS_AIT_RET(res);
+ }
+ default:
+ ASSERT(0);
+ goto done;
+ }
+ }
+ }
+ }
+ }
+
+ if (ERTS_IS_ATOM_STR("mseg_alloc", which_alloc)) {
+#if HAVE_ERTS_MSEG
+ if (only_sz)
+ ERTS_AIT_RET(NIL);
+ erts_mseg_info(NULL, NULL, 0, NULL, &sz);
+ if (sz)
+ ERTS_AIT_HALLOC((Process *) proc, sz);
+ ERTS_AIT_RET(erts_mseg_info(NULL, NULL, 1, &hp, NULL));
+#else
+ ERTS_AIT_RET(am_false);
+#endif
+
+ }
+ else if (ERTS_IS_ATOM_STR("alloc_util", which_alloc)) {
+ if (only_sz)
+ ERTS_AIT_RET(NIL);
+ erts_alcu_au_info_options(NULL, NULL, NULL, &sz);
+ if (sz)
+ ERTS_AIT_HALLOC((Process *) proc, sz);
+ ERTS_AIT_RET(erts_alcu_au_info_options(NULL, NULL, &hp, NULL));
+ }
+
+ done:
+ if (hp) {
+ ASSERT(hp_end >= hp);
+ HRelease((Process *) proc, hp_end, hp);
+ }
+ return res;
+
+#undef ERTS_AIT_RET
+#undef ERTS_AIT_HALLOC
+}
+
+void
+erts_allocator_info(int to, void *arg)
+{
+ ErtsAlcType_t a;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0)
+ || (ERTS_IS_CRASH_DUMPING
+ && erts_smp_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+
+ for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
+ int ai;
+ for (ai = 0; ai == 0 || ai < erts_allctrs_info[a].thr_spec; ai++) {
+ if (erts_allctrs_info[a].thr_spec) {
+ if (!erts_allctr_thr_spec[a].allctr[ai])
+ continue;
+ erts_print(to, arg, "=allocator:%s[%d]\n",
+ ERTS_ALC_A2AD(a), ai);
+ }
+ else {
+ erts_print(to, arg, "=allocator:%s\n", ERTS_ALC_A2AD(a));
+ }
+ if (!erts_allctrs_info[a].enabled)
+ erts_print(to, arg, "option e: false\n");
+ else {
+ if (erts_allctrs_info[a].alloc_util) {
+ void *as;
+ if (!erts_allctrs_info[a].thr_spec)
+ as = erts_allctrs_info[a].extra;
+ else {
+ ASSERT(erts_allctr_thr_spec[a].enabled);
+ as = erts_allctr_thr_spec[a].allctr[ai];
+ }
+ /* Binary alloc has its own thread safety... */
+ erts_alcu_info(as, 0, &to, arg, NULL, NULL);
+ }
+ else {
+ switch (a) {
+ case ERTS_ALC_A_SYSTEM: {
+ SysAllocStat sas;
+ erts_print(to, arg, "option e: true\n");
+#ifdef ELIB_ALLOC_IS_CLIB
+ erts_print(to, arg, "option m: elib\n");
+#else
+ erts_print(to, arg, "option m: libc\n");
+#endif
+ sys_alloc_stat(&sas);
+ if(sas.trim_threshold >= 0)
+ erts_print(to, arg, "option tt: %d\n", sas.trim_threshold);
+ if(sas.top_pad >= 0)
+ erts_print(to, arg, "option tp: %d\n", sas.top_pad);
+ break;
+ }
+ case ERTS_ALC_A_FIXED_SIZE: {
+ ErtsAlcType_t n;
+ erts_print(to, arg, "option e: true\n");
+
+ for (n = ERTS_ALC_N_MIN_A_FIXED_SIZE;
+ n <= ERTS_ALC_N_MAX_A_FIXED_SIZE;
+ n++) {
+ ErtsFixInfo efi;
+ erts_fix_info(ERTS_ALC_N2T(n), &efi);
+ erts_print(to, arg, "%s: %lu %lu\n",
+ ERTS_ALC_N2TD(n),
+ efi.total,
+ efi.used);
+ }
+ break;
+ }
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+#if HAVE_ERTS_MSEG
+ erts_print(to, arg, "=allocator:mseg_alloc\n");
+ erts_mseg_info(&to, arg, 0, NULL, NULL);
+#endif
+
+ erts_print(to, arg, "=allocator:alloc_util\n");
+ erts_alcu_au_info_options(&to, arg, NULL, NULL);
+
+ erts_print(to, arg, "=allocator:instr\n");
+ erts_print(to, arg, "option m: %s\n",
+ erts_instr_memory_map ? "true" : "false");
+ erts_print(to, arg, "option s: %s\n",
+ erts_instr_stat ? "true" : "false");
+ erts_print(to, arg, "option t: %s\n",
+ erts_mtrace_enabled ? "true" : "false");
+
+}
+
+Eterm
+erts_allocator_options(void *proc)
+{
+#if HAVE_ERTS_MSEG
+ int use_mseg = 0;
+#endif
+ Uint sz, *szp, *hp, **hpp;
+ Eterm res, features, settings;
+ Eterm atoms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5];
+ Uint terms[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+5];
+ int a, length;
+ SysAllocStat sas;
+ Uint *endp = NULL;
+
+ sys_alloc_stat(&sas);
+
+ /* First find out the heap size needed ... */
+ hpp = NULL;
+ szp = &sz;
+ sz = 0;
+
+ bld_term:
+
+ length = 0;
+ features = NIL;
+ settings = NIL;
+
+ for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
+ Eterm tmp = NIL;
+ atoms[length] = am_atom_put((char *) ERTS_ALC_A2AD(a),
+ strlen(ERTS_ALC_A2AD(a)));
+ if (erts_allctrs_info[a].enabled) {
+ if (erts_allctrs_info[a].alloc_util) {
+ Allctr_t *allctr;
+#if HAVE_ERTS_MSEG
+ use_mseg++;
+#endif
+ if (erts_allctr_thr_spec[a].enabled)
+ allctr = erts_allctr_thr_spec[a].allctr[1];
+ else
+ allctr = erts_allctrs_info[a].extra;
+ tmp = erts_alcu_info_options(allctr, NULL, NULL, hpp, szp);
+ }
+ else {
+ int l = 0;
+ Eterm as[4];
+ Eterm ts[4];
+
+ as[l] = am_atom_put("e", 1);
+ ts[l++] = am_true;
+
+ switch (a) {
+ case ERTS_ALC_A_SYSTEM:
+#ifdef ELIB_ALLOC_IS_CLIB
+ as[l] = am_atom_put("m", 1);
+ ts[l++] = am_atom_put("elib", 4);
+#else
+ as[l] = am_atom_put("m", 1);
+ ts[l++] = am_atom_put("libc", 4);
+#endif
+ if(sas.trim_threshold >= 0) {
+ as[l] = am_atom_put("tt", 2);
+ ts[l++] = erts_bld_uint(hpp, szp,
+ (Uint) sas.trim_threshold);
+ }
+ if(sas.top_pad >= 0) {
+ as[l] = am_atom_put("tp", 2);
+ ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
+ }
+ break;
+ default:
+ break;
+ }
+
+ tmp = erts_bld_2tup_list(hpp, szp, l, as, ts);
+
+ }
+
+ }
+ else {
+ Eterm atom = am_atom_put("e", 1);
+ Eterm term = am_false;
+ tmp = erts_bld_2tup_list(hpp, szp, 1, &atom, &term);
+ }
+
+ terms[length++] = tmp;
+
+ }
+
+#if HAVE_ERTS_MSEG
+ if (use_mseg) {
+ atoms[length] = am_atom_put("mseg_alloc", 10);
+ terms[length++] = erts_mseg_info_options(NULL, NULL, hpp, szp);
+ }
+#endif
+
+ atoms[length] = am_atom_put("alloc_util", 10);
+ terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
+
+ {
+ Eterm o[3], v[3];
+ o[0] = am_atom_put("m", 1);
+ v[0] = erts_instr_memory_map ? am_true : am_false;
+ o[1] = am_atom_put("s", 1);
+ v[1] = erts_instr_stat ? am_true : am_false;
+ o[2] = am_atom_put("t", 1);
+ v[2] = erts_mtrace_enabled ? am_true : am_false;
+
+ atoms[length] = am_atom_put("instr", 5);
+ terms[length++] = erts_bld_2tup_list(hpp, szp, 3, o, v);
+ }
+
+ settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms);
+
+ length = 0;
+
+ for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) {
+ if (erts_allctrs_info[a].enabled) {
+ terms[length++] = am_atom_put((char *) ERTS_ALC_A2AD(a),
+ strlen(ERTS_ALC_A2AD(a)));
+ }
+ }
+
+#if HAVE_ERTS_MSEG
+ if (use_mseg)
+ terms[length++] = am_atom_put("mseg_alloc", 10);
+#endif
+
+ features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
+
+#if defined(ELIB_ALLOC_IS_CLIB)
+ {
+ Eterm version;
+ int i;
+ int ver[5];
+ i = sscanf(ERLANG_VERSION,
+ "%d.%d.%d.%d.%d",
+ &ver[0], &ver[1], &ver[2], &ver[3], &ver[4]);
+
+ version = NIL;
+ for(i--; i >= 0; i--)
+ version = erts_bld_cons(hpp, szp, make_small(ver[i]), version);
+
+ res = erts_bld_tuple(hpp, szp, 4,
+ am_elib_malloc, version, features, settings);
+ }
+#elif defined(__GLIBC__)
+ {
+ Eterm AM_glibc = am_atom_put("glibc", 5);
+ Eterm version;
+
+ version = erts_bld_cons(hpp,
+ szp,
+ make_small(__GLIBC__),
+#ifdef __GLIBC_MINOR__
+ erts_bld_cons(hpp,
+ szp,
+ make_small(__GLIBC_MINOR__),
+ NIL)
+#else
+ NIL
+#endif
+ );
+
+ res = erts_bld_tuple(hpp, szp, 4,
+ AM_glibc, version, features, settings);
+ }
+
+#else /* unknown allocator */
+
+ res = erts_bld_tuple(hpp, szp, 4,
+ am_undefined, NIL, features, settings);
+
+#endif
+
+ if (szp) {
+ /* ... and then build the term */
+ hp = HAlloc((Process *) proc, sz);
+ endp = hp + sz;
+ hpp = &hp;
+ szp = NULL;
+ goto bld_term;
+ }
+
+ ASSERT(endp >= hp);
+ HRelease((Process *) proc, endp, hp);
+
+ return res;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Deprecated functions *
+ * *
+ * These functions are still defined since "non-OTP linked in drivers" may *
+ * contain (illegal) calls to them. *
+\* */
+
+/* --- DO *NOT* USE THESE FUNCTIONS --- */
+
+void *sys_alloc(Uint sz)
+{ return erts_alloc_fnf(ERTS_ALC_T_UNDEF, sz); }
+void *sys_realloc(void *ptr, Uint sz)
+{ return erts_realloc_fnf(ERTS_ALC_T_UNDEF, ptr, sz); }
+void sys_free(void *ptr)
+{ erts_free(ERTS_ALC_T_UNDEF, ptr); }
+void *safe_alloc(Uint sz)
+{ return erts_alloc(ERTS_ALC_T_UNDEF, sz); }
+void *safe_realloc(void *ptr, Uint sz)
+{ return erts_realloc(ERTS_ALC_T_UNDEF, ptr, sz); }
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * NOTE: erts_alc_test() is only supposed to be used for testing. *
+ * *
+ * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
+ * to erts_alc_test() *
+\* */
+#define ERTS_ALC_TEST_ABORT erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n")
+
+unsigned long erts_alc_test(unsigned long op,
+ unsigned long a1,
+ unsigned long a2,
+ unsigned long a3)
+{
+ switch (op >> 8) {
+ case 0x0: return erts_alcu_test(op, a1, a2);
+ case 0x1: return erts_gfalc_test(op, a1, a2);
+ case 0x2: return erts_bfalc_test(op, a1, a2);
+ case 0x3: return erts_afalc_test(op, a1, a2);
+ case 0x4: return erts_mseg_test(op, a1, a2, a3);
+ case 0xf:
+ switch (op) {
+ case 0xf00:
+#ifdef USE_THREADS
+ if (((Allctr_t *) a1)->thread_safe)
+ return (unsigned long) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF,
+ (void *) a1,
+ (Uint) a2);
+ else
+#endif
+ return (unsigned long) erts_alcu_alloc(ERTS_ALC_T_UNDEF,
+ (void *) a1,
+ (Uint) a2);
+ case 0xf01:
+#ifdef USE_THREADS
+ if (((Allctr_t *) a1)->thread_safe)
+ return (unsigned long) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF,
+ (void *) a1,
+ (void *) a2,
+ (Uint) a3);
+ else
+#endif
+ return (unsigned long) erts_alcu_realloc(ERTS_ALC_T_UNDEF,
+ (void *) a1,
+ (void *) a2,
+ (Uint) a3);
+ case 0xf02:
+#ifdef USE_THREADS
+ if (((Allctr_t *) a1)->thread_safe)
+ erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
+ else
+#endif
+ erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
+ return 0;
+ case 0xf03: {
+ Allctr_t *allctr;
+ struct au_init init;
+
+ SET_DEFAULT_ALLOC_OPTS(&init);
+ init.enable = 1;
+ init.atype = GOODFIT;
+ init.init.util.name_prefix = (char *) a1;
+ init.init.util.ts = a2 ? 1 : 0;
+
+ if ((char **) a3) {
+ char **argv = (char **) a3;
+ int i = 0;
+ while (argv[i]) {
+ if (argv[i][0] == '-' && argv[i][1] == 't')
+ handle_au_arg(&init, &argv[i][2], argv, &i);
+ else
+ return (unsigned long) NULL;
+ i++;
+ }
+ }
+
+ switch (init.atype) {
+ case GOODFIT:
+ allctr = erts_gfalc_start((GFAllctr_t *)
+ erts_alloc(ERTS_ALC_T_UNDEF,
+ sizeof(GFAllctr_t)),
+ &init.init.gf,
+ &init.init.util);
+ break;
+ case BESTFIT:
+ allctr = erts_bfalc_start((BFAllctr_t *)
+ erts_alloc(ERTS_ALC_T_UNDEF,
+ sizeof(BFAllctr_t)),
+ &init.init.bf,
+ &init.init.util);
+ break;
+ case AFIT:
+ allctr = erts_afalc_start((AFAllctr_t *)
+ erts_alloc(ERTS_ALC_T_UNDEF,
+ sizeof(AFAllctr_t)),
+ &init.init.af,
+ &init.init.util);
+ break;
+ default:
+ ASSERT(0);
+ allctr = NULL;
+ break;
+ }
+
+ return (unsigned long) allctr;
+ }
+ case 0xf04:
+ erts_alcu_stop((Allctr_t *) a1);
+ erts_free(ERTS_ALC_T_UNDEF, (void *) a1);
+ break;
+#ifdef USE_THREADS
+ case 0xf05: return (unsigned long) 1;
+ case 0xf06: return (unsigned long) ((Allctr_t *) a1)->thread_safe;
+#ifdef ETHR_NO_FORKSAFETY
+ case 0xf07: return (unsigned long) 0;
+#else
+ case 0xf07: return (unsigned long) ((Allctr_t *) a1)->thread_safe;
+#endif
+ case 0xf08: {
+ ethr_mutex *mtx = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_mutex));
+ if (ethr_mutex_init(mtx) != 0)
+ ERTS_ALC_TEST_ABORT;
+ return (unsigned long) mtx;
+ }
+ case 0xf09: {
+ ethr_mutex *mtx = (ethr_mutex *) a1;
+ if (ethr_mutex_destroy(mtx) != 0)
+ ERTS_ALC_TEST_ABORT;
+ erts_free(ERTS_ALC_T_UNDEF, (void *) mtx);
+ break;
+ }
+ case 0xf0a:
+ if (ethr_mutex_lock((ethr_mutex *) a1) != 0)
+ ERTS_ALC_TEST_ABORT;
+ break;
+ case 0xf0b:
+ if (ethr_mutex_unlock((ethr_mutex *) a1) != 0)
+ ERTS_ALC_TEST_ABORT;
+ break;
+ case 0xf0c: {
+ ethr_cond *cnd = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_cond));
+ if (ethr_cond_init(cnd) != 0)
+ ERTS_ALC_TEST_ABORT;
+ return (unsigned long) cnd;
+ }
+ case 0xf0d: {
+ ethr_cond *cnd = (ethr_cond *) a1;
+ if (ethr_cond_destroy(cnd) != 0)
+ ERTS_ALC_TEST_ABORT;
+ erts_free(ERTS_ALC_T_UNDEF, (void *) cnd);
+ break;
+ }
+ case 0xf0e:
+ if (ethr_cond_broadcast((ethr_cond *) a1) != 0)
+ ERTS_ALC_TEST_ABORT;
+ break;
+ case 0xf0f: {
+ int res;
+ do {
+ res = ethr_cond_wait((ethr_cond *) a1, (ethr_mutex *) a2);
+ } while (res == EINTR);
+ if (res != 0)
+ ERTS_ALC_TEST_ABORT;
+ break;
+ }
+ case 0xf10: {
+ ethr_tid *tid = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(ethr_tid));
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if (erts_lcnt_thr_create(tid,
+ (void * (*)(void *)) a1,
+ (void *) a2,
+ NULL) != 0)
+#else
+ if (ethr_thr_create(tid,
+ (void * (*)(void *)) a1,
+ (void *) a2,
+ NULL) != 0)
+#endif
+ ERTS_ALC_TEST_ABORT;
+ return (unsigned long) tid;
+ }
+ case 0xf11: {
+ ethr_tid *tid = (ethr_tid *) a1;
+ if (ethr_thr_join(*tid, NULL) != 0)
+ ERTS_ALC_TEST_ABORT;
+ erts_free(ERTS_ALC_T_UNDEF, (void *) tid);
+ break;
+ }
+ case 0xf12:
+ ethr_thr_exit((void *) a1);
+ ERTS_ALC_TEST_ABORT;
+ break;
+#endif /* #ifdef USE_THREADS */
+ default:
+ break;
+ }
+ return (unsigned long) 0;
+ default:
+ break;
+ }
+
+ ASSERT(0);
+ return ~((unsigned long) 0);
+}
+
+#ifdef DEBUG
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Debug stuff *
+\* */
+
+#if 0
+#define PRINT_OPS
+#else
+#undef PRINT_OPS
+#endif
+
+
+#define FENCE_SZ (3*sizeof(Uint))
+
+#ifdef ARCH_64
+#define FENCE_PATTERN 0xABCDEF97ABCDEF97
+#else
+#define FENCE_PATTERN 0xABCDEF97
+#endif
+
+#define TYPE_PATTERN_MASK ERTS_ALC_N_MASK
+#define TYPE_PATTERN_SHIFT 16
+
+#define FIXED_FENCE_PATTERN_MASK \
+ (~((Uint) (TYPE_PATTERN_MASK << TYPE_PATTERN_SHIFT)))
+#define FIXED_FENCE_PATTERN \
+ (FENCE_PATTERN & FIXED_FENCE_PATTERN_MASK)
+
+#define MK_PATTERN(T) \
+ (FIXED_FENCE_PATTERN | (((T) & TYPE_PATTERN_MASK) << TYPE_PATTERN_SHIFT))
+
+#define GET_TYPE_OF_PATTERN(P) \
+ (((P) >> TYPE_PATTERN_SHIFT) & TYPE_PATTERN_MASK)
+
+
+static void *
+set_memory_fence(void *ptr, Uint sz, ErtsAlcType_t n)
+{
+ Uint *ui_ptr;
+ Uint pattern;
+
+ if (!ptr)
+ return NULL;
+
+ ui_ptr = (Uint *) ptr;
+ pattern = MK_PATTERN(n);
+
+ *(ui_ptr++) = sz;
+ *(ui_ptr++) = pattern;
+ memcpy((void *) (((char *) ui_ptr)+sz), (void *) &pattern, sizeof(Uint));
+
+ return (void *) ui_ptr;
+}
+
+static void *
+check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
+{
+ Uint sz;
+ Uint found_type;
+ Uint pre_pattern;
+ Uint post_pattern;
+ Uint *ui_ptr;
+
+ if (!ptr)
+ return NULL;
+
+ ui_ptr = (Uint *) ptr;
+ pre_pattern = *(--ui_ptr);
+ *size = sz = *(--ui_ptr);
+
+ found_type = GET_TYPE_OF_PATTERN(pre_pattern);
+ if (pre_pattern != MK_PATTERN(n)) {
+ if ((FIXED_FENCE_PATTERN_MASK & pre_pattern) != FIXED_FENCE_PATTERN)
+ erl_exit(ERTS_ABORT_EXIT,
+ "ERROR: Fence at beginning of memory block (p=0x%u) "
+ "clobbered.\n",
+ (unsigned long) ptr);
+ }
+
+ memcpy((void *) &post_pattern, (void *) (((char *)ptr)+sz), sizeof(Uint));
+
+ if (post_pattern != MK_PATTERN(n)
+ || pre_pattern != post_pattern) {
+ char fbuf[10];
+ char obuf[10];
+ char *ftype;
+ char *otype;
+ char *op_str;
+
+ if ((FIXED_FENCE_PATTERN_MASK & post_pattern) != FIXED_FENCE_PATTERN)
+ erl_exit(ERTS_ABORT_EXIT,
+ "ERROR: Fence at end of memory block (p=0x%u, sz=%u) "
+ "clobbered.\n",
+ (unsigned long) ptr, (unsigned long) sz);
+ if (found_type != GET_TYPE_OF_PATTERN(post_pattern))
+ erl_exit(ERTS_ABORT_EXIT,
+ "ERROR: Fence around memory block (p=0x%u, sz=%u) "
+ "clobbered.\n",
+ (unsigned long) ptr, (unsigned long) sz);
+
+ ftype = type_no_str(found_type);
+ if (!ftype) {
+ sprintf(fbuf, "%d", (int) found_type);
+ ftype = fbuf;
+ }
+ otype = type_no_str(n);
+ if (!otype) {
+ sprintf(obuf, "%d", (int) n);
+ otype = obuf;
+ }
+
+ switch (func) {
+ case ERTS_ALC_O_ALLOC: op_str = "allocated"; break;
+ case ERTS_ALC_O_REALLOC: op_str = "reallocated"; break;
+ case ERTS_ALC_O_FREE: op_str = "freed"; break;
+ default: op_str = "???"; break;
+ }
+
+ erl_exit(ERTS_ABORT_EXIT,
+ "ERROR: Memory block (p=0x%u, sz=%u) allocated as type \"%s\","
+ " but %s as type \"%s\".\n",
+ (unsigned long) ptr, (unsigned long) sz, ftype, op_str, otype);
+ }
+
+ return (void *) ui_ptr;
+}
+
+static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
+
+static void *
+debug_alloc(ErtsAlcType_t n, void *extra, Uint size)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ Uint dsize;
+ void *res;
+
+ ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
+ dsize = size + FENCE_SZ;
+ res = (*real_af->alloc)(n, real_af->extra, dsize);
+
+ res = set_memory_fence(res, size, n);
+
+#ifdef PRINT_OPS
+ fprintf(stderr, "0x%lx = alloc(%s, %lu)\r\n",
+ (Uint) res, ERTS_ALC_N2TD(n), size);
+#endif
+
+ return res;
+}
+
+
+static void *
+debug_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ Uint dsize;
+ Uint old_size;
+ void *dptr;
+ void *res;
+
+ ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
+
+ dsize = size + FENCE_SZ;
+ dptr = check_memory_fence(ptr, &old_size, n, ERTS_ALC_O_REALLOC);
+
+ if (old_size > size)
+ sys_memset((void *) (((char *) ptr) + size),
+ 0xf,
+ sizeof(Uint) + old_size - size);
+
+ res = (*real_af->realloc)(n, real_af->extra, dptr, dsize);
+
+ res = set_memory_fence(res, size, n);
+
+#ifdef PRINT_OPS
+ fprintf(stderr, "0x%lx = realloc(%s, 0x%lx, %lu)\r\n",
+ (Uint) res, ERTS_ALC_N2TD(n), (Uint) ptr, size);
+#endif
+
+ return res;
+}
+
+static void
+debug_free(ErtsAlcType_t n, void *extra, void *ptr)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ void *dptr;
+ Uint size;
+
+ ASSERT(ERTS_ALC_N_MIN <= n && n <= ERTS_ALC_N_MAX);
+
+ dptr = check_memory_fence(ptr, &size, n, ERTS_ALC_O_FREE);
+
+ sys_memset((void *) dptr, n, size + FENCE_SZ);
+
+ (*real_af->free)(n, real_af->extra, dptr);
+
+#ifdef PRINT_OPS
+ fprintf(stderr, "free(%s, 0x%lx)\r\n", ERTS_ALC_N2TD(n), (Uint) ptr);
+#endif
+
+}
+
+static Uint
+install_debug_functions(void)
+{
+ int i;
+ ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+
+ sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ erts_allctrs[i].alloc = debug_alloc;
+ erts_allctrs[i].realloc = debug_realloc;
+ erts_allctrs[i].free = debug_free;
+ erts_allctrs[i].extra = (void *) &real_allctrs[i];
+ }
+ return FENCE_SZ;
+}
+
+
+
+#endif /* #ifdef DEBUG */
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
new file mode 100644
index 0000000000..e7a203002f
--- /dev/null
+++ b/erts/emulator/beam/erl_alloc.h
@@ -0,0 +1,564 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_ALLOC_H__
+#define ERL_ALLOC_H__
+
+#include "erl_alloc_types.h"
+#include "erl_alloc_util.h"
+#ifdef USE_THREADS
+#include "erl_threads.h"
+#endif
+
+#ifdef DEBUG
+# undef ERTS_ALC_WANT_INLINE
+# define ERTS_ALC_WANT_INLINE 0
+#endif
+
+#ifndef ERTS_ALC_WANT_INLINE
+# define ERTS_ALC_WANT_INLINE 1
+#endif
+
+#if ERTS_CAN_INLINE && ERTS_ALC_WANT_INLINE
+# define ERTS_ALC_DO_INLINE 1
+# define ERTS_ALC_INLINE static ERTS_INLINE
+#else
+# define ERTS_ALC_DO_INLINE 0
+# define ERTS_ALC_INLINE
+#endif
+
+#define ERTS_FIX_CORE_ALLOCATOR ERTS_ALC_A_LONG_LIVED
+extern ErtsAlcType_t erts_fix_core_allocator_ix;
+
+typedef struct {
+ Uint total;
+ Uint used;
+} ErtsFixInfo;
+
+void erts_sys_alloc_init(void);
+void *erts_sys_alloc(ErtsAlcType_t, void *, Uint);
+void *erts_sys_realloc(ErtsAlcType_t, void *, void *, Uint);
+void erts_sys_free(ErtsAlcType_t, void *, void *);
+
+
+void erts_init_fix_alloc(Uint, void *(*)(Uint));
+Uint erts_get_fix_size(ErtsAlcType_t);
+void erts_set_fix_size(ErtsAlcType_t, Uint);
+void erts_fix_info(ErtsAlcType_t, ErtsFixInfo *);
+void *erts_fix_alloc(ErtsAlcType_t, void *, Uint);
+void *erts_fix_realloc(ErtsAlcType_t, void *, void*, Uint);
+void erts_fix_free(ErtsAlcType_t, void *, void*);
+
+
+Eterm erts_memory(int *, void *, void *, Eterm);
+Eterm erts_allocated_areas(int *, void *, void *);
+
+Eterm erts_alloc_util_allocators(void *proc);
+void erts_allocator_info(int, void *);
+Eterm erts_allocator_info_term(void *proc, Eterm which_alloc, int only_sz);
+Eterm erts_allocator_options(void *proc);
+
+#define ERTS_ALLOC_INIT_DEF_OPTS_INITER {0}
+typedef struct {
+ int dummy;
+} ErtsAllocInitOpts;
+
+void erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop);
+
+#if defined(GET_ERTS_ALC_TEST) || defined(ERTS_ALC_INTERNAL__)
+/* Only for testing */
+unsigned long erts_alc_test(unsigned long,
+ unsigned long,
+ unsigned long,
+ unsigned long);
+#endif
+
+#define ERTS_ALC_O_ALLOC 0
+#define ERTS_ALC_O_REALLOC 1
+#define ERTS_ALC_O_FREE 2
+
+#define ERTS_ALC_E_NOTSUP 0
+#define ERTS_ALC_E_NOMEM 1
+#define ERTS_ALC_E_NOALLCTR 2
+
+#define ERTS_ALC_MIN_LONG_LIVED_TIME (10*60*1000)
+
+typedef struct {
+ int alloc_util;
+ int enabled;
+ int thr_spec;
+ void *extra;
+} ErtsAllocatorInfo_t;
+
+typedef struct {
+ void * (*alloc) (ErtsAlcType_t, void *, Uint);
+ void * (*realloc) (ErtsAlcType_t, void *, void *, Uint);
+ void (*free) (ErtsAlcType_t, void *, void *);
+ void *extra;
+} ErtsAllocatorFunctions_t;
+
+extern ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
+extern ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
+
+typedef struct {
+ int enabled;
+ int all_thr_safe;
+ int size;
+ Allctr_t **allctr;
+} ErtsAllocatorThrSpec_t;
+
+extern ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1];
+
+int erts_alc_get_thr_ix(void);
+void erts_alloc_reg_scheduler_id(Uint id);
+
+__decl_noreturn void erts_alloc_enomem(ErtsAlcType_t,Uint)
+ __noreturn;
+__decl_noreturn void erts_alloc_n_enomem(ErtsAlcType_t,Uint)
+ __noreturn;
+__decl_noreturn void erts_realloc_enomem(ErtsAlcType_t,void*,Uint)
+ __noreturn;
+__decl_noreturn void erts_realloc_n_enomem(ErtsAlcType_t,void*,Uint)
+ __noreturn;
+__decl_noreturn void erts_alc_fatal_error(int,int,ErtsAlcType_t,...)
+ __noreturn;
+
+/* --- DO *NOT* USE THESE DEPRECATED FUNCTIONS --- Instead use: */
+void *safe_alloc(Uint) __deprecated; /* erts_alloc() */
+void *safe_realloc(void *, Uint) __deprecated; /* erts_realloc() */
+void sys_free(void *) __deprecated; /* erts_free() */
+void *sys_alloc(Uint ) __deprecated; /* erts_alloc_fnf() */
+void *sys_realloc(void *, Uint) __deprecated; /* erts_realloc_fnf() */
+
+/*
+ * erts_alloc[_fnf](), erts_realloc[_fnf](), erts_free() works as
+ * malloc(), realloc(), and free() with the following exceptions:
+ *
+ * * They take an extra type argument as first argument which is
+ * the memory type to operate on. Memory types are generated
+ * (as ERTS_ALC_T_[SOMETHING] defines) from the erl_alloc.types
+ * configuration file.
+ * * The erts_alloc() and erts_realloc() functions terminate the
+ * emulator if memory cannot be obtained. The _fnf (Failure Not
+ * Fatal) suffixed versions return NULL if memory cannot be
+ * obtained.
+ * * They may be static functions so function pointers to "the same"
+ * function may differ.
+ *
+ * IMPORTANT: Memory allocated or reallocated as type X, can only
+ * be reallocated or deallocated as type X.
+ */
+
+#if !ERTS_ALC_DO_INLINE
+
+void *erts_alloc(ErtsAlcType_t type, Uint size);
+void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size);
+void erts_free(ErtsAlcType_t type, void *ptr);
+void *erts_alloc_fnf(ErtsAlcType_t type, Uint size);
+void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size);
+
+#endif /* #if !ERTS_ALC_DO_INLINE */
+
+#if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__)
+
+ERTS_ALC_INLINE
+void *erts_alloc(ErtsAlcType_t type, Uint size)
+{
+ void *res;
+ res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
+ ERTS_ALC_T2N(type),
+ erts_allctrs[ERTS_ALC_T2A(type)].extra,
+ size);
+ if (!res)
+ erts_alloc_n_enomem(ERTS_ALC_T2N(type), size);
+ return res;
+}
+
+ERTS_ALC_INLINE
+void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size)
+{
+ void *res;
+ res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
+ ERTS_ALC_T2N(type),
+ erts_allctrs[ERTS_ALC_T2A(type)].extra,
+ ptr,
+ size);
+ if (!res)
+ erts_realloc_n_enomem(ERTS_ALC_T2N(type), ptr, size);
+ return res;
+}
+
+ERTS_ALC_INLINE
+void erts_free(ErtsAlcType_t type, void *ptr)
+{
+ (*erts_allctrs[ERTS_ALC_T2A(type)].free)(
+ ERTS_ALC_T2N(type),
+ erts_allctrs[ERTS_ALC_T2A(type)].extra,
+ ptr);
+}
+
+
+ERTS_ALC_INLINE
+void *erts_alloc_fnf(ErtsAlcType_t type, Uint size)
+{
+ return (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
+ ERTS_ALC_T2N(type),
+ erts_allctrs[ERTS_ALC_T2A(type)].extra,
+ size);
+}
+
+
+ERTS_ALC_INLINE
+void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)
+{
+ return (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
+ ERTS_ALC_T2N(type),
+ erts_allctrs[ERTS_ALC_T2A(type)].extra,
+ ptr,
+ size);
+}
+
+#endif /* #if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__) */
+
+#ifndef ERTS_CACHE_LINE_SIZE
+/* Assume a cache line size of 64 bytes */
+# define ERTS_CACHE_LINE_SIZE ((Uint) 64)
+# define ERTS_CACHE_LINE_MASK (ERTS_CACHE_LINE_SIZE - 1)
+#endif
+
+#define ERTS_ALC_CACHE_LINE_ALIGN_SIZE(SZ) \
+ (((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE)
+
+#define ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
+ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
+ (void) 0, (void) 0, (void) 0)
+
+#define ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
+static erts_smp_spinlock_t NAME##_lck; \
+ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
+ erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_smp_spin_lock(&NAME##_lck), \
+ erts_smp_spin_unlock(&NAME##_lck))
+
+#ifdef ERTS_SMP
+
+#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
+ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT)
+
+#else /* !ERTS_SMP */
+
+#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
+static erts_mtx_t NAME##_lck; \
+ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
+ erts_mtx_init(NAME##_lck, #NAME "_alloc_lock"), \
+ erts_mtx_lock(&NAME##_lck), \
+ erts_mtx_unlock(&NAME##_lck))
+
+
+#endif
+
+#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \
+ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
+
+#define ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) \
+static erts_spinlock_t NAME##_lck; \
+ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \
+ erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_spin_lock(&NAME##_lck), \
+ erts_spin_unlock(&NAME##_lck))
+
+#ifdef ERTS_SMP
+
+#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \
+ ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ)
+
+#else /* !ERTS_SMP */
+
+#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \
+ ERTS_PALLOC_IMPL(NAME, TYPE, PASZ)
+
+#endif
+
+#define ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, ILCK, LCK, ULCK) \
+ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, ILCK, LCK, ULCK) \
+static void \
+init_##NAME##_alloc(void) \
+{ \
+ init_##NAME##_pre_alloc(); \
+} \
+static ERTS_INLINE TYPE * \
+NAME##_alloc(void) \
+{ \
+ TYPE *res = NAME##_pre_alloc(); \
+ if (!res) \
+ res = erts_alloc(ALCT, sizeof(TYPE)); \
+ return res; \
+} \
+static ERTS_INLINE void \
+NAME##_free(TYPE *p) \
+{ \
+ if (!NAME##_pre_free(p)) \
+ erts_free(ALCT, (void *) p); \
+}
+
+#ifdef ERTS_SMP
+#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \
+ ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ)
+#else
+#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \
+ ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
+#endif
+
+#ifdef ERTS_SMP
+#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
+ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ)
+#else
+#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
+ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
+#endif
+
+#define ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
+ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
+static void \
+init_##NAME##_alloc(void) \
+{ \
+ init_##NAME##_pre_alloc(); \
+} \
+static ERTS_INLINE TYPE * \
+NAME##_alloc(void) \
+{ \
+ TYPE *res = NAME##_pre_alloc(); \
+ if (!res) \
+ res = erts_alloc(ALCT, sizeof(TYPE)); \
+ return res; \
+} \
+static ERTS_INLINE void \
+NAME##_free(TYPE *p) \
+{ \
+ if (!NAME##_pre_free(p)) \
+ erts_free(ALCT, (void *) p); \
+}
+
+#ifdef DEBUG
+#define ERTS_PRE_ALLOC_SIZE(SZ) 2
+#define ERTS_PRE_ALLOC_CLOBBER(P, T) memset((void *) (P), 0xfd, sizeof(T))
+#else
+#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) > 1 ? (SZ) : 1)
+#define ERTS_PRE_ALLOC_CLOBBER(P, T)
+#endif
+
+#define ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, ILCK, LCK, ULCK) \
+union erts_qa_##NAME##__ { \
+ TYPE type; \
+ union erts_qa_##NAME##__ *next; \
+}; \
+static union erts_qa_##NAME##__ \
+ qa_prealcd_##NAME[ERTS_PRE_ALLOC_SIZE((PASZ))]; \
+static union erts_qa_##NAME##__ *qa_freelist_##NAME; \
+static void \
+init_##NAME##_alloc(void) \
+{ \
+ int i; \
+ qa_freelist_##NAME = &qa_prealcd_##NAME[0]; \
+ for (i = 1; i < ERTS_PRE_ALLOC_SIZE((PASZ)); i++) { \
+ ERTS_PRE_ALLOC_CLOBBER(&qa_prealcd_##NAME[i-1], \
+ union erts_qa_##NAME##__); \
+ qa_prealcd_##NAME[i-1].next = &qa_prealcd_##NAME[i]; \
+ } \
+ ERTS_PRE_ALLOC_CLOBBER(&qa_prealcd_##NAME[ERTS_PRE_ALLOC_SIZE((PASZ))-1],\
+ union erts_qa_##NAME##__); \
+ qa_prealcd_##NAME[ERTS_PRE_ALLOC_SIZE((PASZ))-1].next = NULL; \
+ ILCK; \
+} \
+static ERTS_INLINE TYPE * \
+NAME##_alloc(void) \
+{ \
+ TYPE *res; \
+ LCK; \
+ if (!qa_freelist_##NAME) \
+ res = NULL; \
+ else { \
+ res = &qa_freelist_##NAME->type; \
+ qa_freelist_##NAME = qa_freelist_##NAME->next; \
+ } \
+ ULCK; \
+ return res; \
+} \
+static ERTS_INLINE int \
+NAME##_free(TYPE *p) \
+{ \
+ union erts_qa_##NAME##__ * up; \
+ up = ((union erts_qa_##NAME##__ *) \
+ (((char *) p) \
+ - ((char *) &((union erts_qa_##NAME##__ *) 0)->type))); \
+ if (up > &qa_prealcd_##NAME[ERTS_PRE_ALLOC_SIZE((PASZ))-1] \
+ || up < &qa_prealcd_##NAME[0]) \
+ return 0; \
+ else { \
+ LCK; \
+ ERTS_PRE_ALLOC_CLOBBER(up, union erts_qa_##NAME##__); \
+ up->next = qa_freelist_##NAME; \
+ qa_freelist_##NAME = up; \
+ ULCK; \
+ return 1; \
+ } \
+}
+
+typedef struct {
+ void *start;
+ void *end;
+ int chunks_mem_size;
+} erts_sched_pref_quick_alloc_data_t;
+
+#ifdef DEBUG
+#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P) \
+do { \
+ ASSERT((void *) (C) < (void *) (P)); \
+ ASSERT((void *) (P) \
+ < (void *) (((char *) (C)) + (A)->chunks_mem_size)); \
+} while (0)
+#else
+#define ERTS_SPPA_DBG_CHK_IN_CHNK(A, C, P)
+#endif
+
+#define ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ) \
+union erts_qa_##NAME##__ { \
+ TYPE type; \
+ union erts_qa_##NAME##__ *next; \
+}; \
+typedef struct { \
+ erts_smp_spinlock_t lock; \
+ union erts_qa_##NAME##__ *freelist; \
+ union erts_qa_##NAME##__ pre_alloced[1]; \
+} erts_qa_##NAME##_chunk__; \
+static erts_sched_pref_quick_alloc_data_t *qa_data_##NAME##__; \
+static ERTS_INLINE erts_qa_##NAME##_chunk__ * \
+get_##NAME##_chunk_ix(int cix) \
+{ \
+ char *ptr = (char *) qa_data_##NAME##__->start; \
+ ptr += cix*qa_data_##NAME##__->chunks_mem_size; \
+ return (erts_qa_##NAME##_chunk__ *) ptr; \
+} \
+static ERTS_INLINE erts_qa_##NAME##_chunk__ * \
+get_##NAME##_chunk_ptr(void *ptr) \
+{ \
+ int cix; \
+ size_t diff; \
+ if (ptr < qa_data_##NAME##__->start || qa_data_##NAME##__->end <= ptr)\
+ return NULL; \
+ diff = ((char *) ptr) - ((char *) qa_data_##NAME##__->start); \
+ cix = diff / qa_data_##NAME##__->chunks_mem_size; \
+ return get_##NAME##_chunk_ix(cix); \
+} \
+static void \
+init_##NAME##_alloc(void) \
+{ \
+ size_t tot_size; \
+ size_t chunk_mem_size; \
+ char *chunk_start; \
+ int cix; \
+ int no_blocks = ERTS_PRE_ALLOC_SIZE((PASZ)); \
+ int no_blocks_per_chunk = 2*((no_blocks-1)/erts_no_schedulers + 1); \
+ no_blocks = no_blocks_per_chunk * erts_no_schedulers; \
+ chunk_mem_size = sizeof(erts_qa_##NAME##_chunk__); \
+ chunk_mem_size += (sizeof(union erts_qa_##NAME##__) \
+ * (no_blocks_per_chunk - 1)); \
+ chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size); \
+ tot_size = sizeof(erts_sched_pref_quick_alloc_data_t); \
+ tot_size += ERTS_CACHE_LINE_SIZE - 1; \
+ tot_size += chunk_mem_size*erts_no_schedulers; \
+ qa_data_##NAME##__ = erts_alloc(ERTS_ALC_T_PRE_ALLOC_DATA,tot_size);\
+ chunk_start = (((char *) qa_data_##NAME##__) \
+ + sizeof(erts_sched_pref_quick_alloc_data_t)); \
+ if ((((Uint) chunk_start) & ERTS_CACHE_LINE_MASK) != ((Uint) 0)) \
+ chunk_start = ((char *) \
+ ((((Uint) chunk_start) & ~ERTS_CACHE_LINE_MASK) \
+ + ERTS_CACHE_LINE_SIZE)); \
+ qa_data_##NAME##__->chunks_mem_size = chunk_mem_size; \
+ qa_data_##NAME##__->start = (void *) chunk_start; \
+ qa_data_##NAME##__->end = (chunk_start \
+ + chunk_mem_size*erts_no_schedulers); \
+ for (cix = 0; cix < erts_no_schedulers; cix++) { \
+ int i; \
+ erts_qa_##NAME##_chunk__ *chunk = get_##NAME##_chunk_ix(cix); \
+ erts_smp_spinlock_init(&chunk->lock, #NAME "_alloc_lock"); \
+ chunk->freelist = &chunk->pre_alloced[0]; \
+ for (i = 1; i < no_blocks_per_chunk; i++) { \
+ ERTS_PRE_ALLOC_CLOBBER(&chunk->pre_alloced[i-1], \
+ union erts_qa_##NAME##__); \
+ chunk->pre_alloced[i-1].next = &chunk->pre_alloced[i]; \
+ } \
+ ERTS_PRE_ALLOC_CLOBBER(&chunk->pre_alloced[no_blocks_per_chunk-1],\
+ union erts_qa_##NAME##__); \
+ chunk->pre_alloced[no_blocks_per_chunk-1].next = NULL; \
+ } \
+} \
+static ERTS_INLINE TYPE * \
+NAME##_alloc(void) \
+{ \
+ int cix = ((int) erts_get_scheduler_id()) - 1; \
+ TYPE *res; \
+ if (cix < 0) \
+ res = NULL; \
+ else { \
+ erts_qa_##NAME##_chunk__ *chunk = get_##NAME##_chunk_ix(cix); \
+ erts_smp_spin_lock(&chunk->lock); \
+ if (!chunk->freelist) \
+ res = NULL; \
+ else { \
+ res = &chunk->freelist->type; \
+ chunk->freelist = chunk->freelist->next; \
+ ERTS_SPPA_DBG_CHK_IN_CHNK(qa_data_##NAME##__, chunk, res); \
+ } \
+ erts_smp_spin_unlock(&chunk->lock); \
+ } \
+ return res; \
+} \
+static ERTS_INLINE int \
+NAME##_free(TYPE *p) \
+{ \
+ erts_qa_##NAME##_chunk__ *chunk; \
+ chunk = get_##NAME##_chunk_ptr((void *) p); \
+ if (!chunk) \
+ return 0; \
+ else { \
+ union erts_qa_##NAME##__ *up; \
+ ERTS_SPPA_DBG_CHK_IN_CHNK(qa_data_##NAME##__, chunk, p); \
+ up = ((union erts_qa_##NAME##__ *) \
+ (((char *) p) \
+ - ((char *) &((union erts_qa_##NAME##__ *) 0)->type))); \
+ erts_smp_spin_lock(&chunk->lock); \
+ ERTS_PRE_ALLOC_CLOBBER(up, union erts_qa_##NAME##__); \
+ up->next = chunk->freelist; \
+ chunk->freelist = up; \
+ erts_smp_spin_unlock(&chunk->lock); \
+ return 1; \
+ } \
+}
+
+#ifdef DEBUG
+#define ERTS_ALC_DBG_BLK_SZ(PTR) (*(((Uint *) (PTR)) - 2))
+#endif /* #ifdef DEBUG */
+
+#undef ERTS_ALC_INLINE
+#undef ERTS_ALC_ATTRIBUTES
+
+#endif /* #ifndef ERL_ALLOC_H__ */
+
+
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
new file mode 100644
index 0000000000..f701f71c7d
--- /dev/null
+++ b/erts/emulator/beam/erl_alloc.types
@@ -0,0 +1,383 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2003-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# Rules:
+# * Types, allocators, and classes can be declared.
+# * Types, allocators, classes, and descriptions can only contain
+# alphanumeric characters.
+# * Allocators and classes have to be declared before they are used in
+# type declarations.
+# * Descriptions have only one name space (i.e. class descriptions,
+# allocator descriptions, and type descriptions are all in the same
+# name space).
+# * Types, allocators, classes, and descriptions have different name
+# spaces.
+# * The type, allocator, and class names INVALID are reserved and can
+# not be used.
+# * The descriptions invalid_allocator, invalid_class, and invalid_type
+# are reserved and can not be used.
+# * Declarations can be done conditionally by use of a
+# +if <boolean_variable>
+#
+# +else
+#
+# +endif
+# or a
+# +ifnot <boolean_variable>
+#
+# +else
+#
+# +endif
+# construct (else branches are optional). The boolean variable X is
+# true after a "+enable X" statement or if it has been passed as a
+# command line argument to make_alloc_types. The variable X is false
+# after a "+disable X" statement or if it has never been mentioned.
+
+
+# --- Allocator declarations -------------------------------------------------
+#
+# If, and only if, the same thread performes *all* allocations,
+# reallocations and deallocations of all memory types that are handled
+# by a specific allocator (<ALLOCATOR> in type declaration), set
+# <MULTI_THREAD> for this specific allocator to false; otherwise, set
+# it to true.
+#
+# Syntax: allocator <ALLOCATOR> <MULTI_THREAD> <DESCRIPTION>
+#
+# <ALLOCATOR> <MULTI_THREAD> <DESCRIPTION>
+
+allocator SYSTEM true sys_alloc
+
++if smp
+
+allocator TEMPORARY true temp_alloc
+allocator SHORT_LIVED true sl_alloc
+allocator STANDARD true std_alloc
+allocator LONG_LIVED true ll_alloc
+allocator EHEAP true eheap_alloc
+allocator ETS true ets_alloc
+allocator FIXED_SIZE true fix_alloc
+
++else # Non smp build
+
+allocator TEMPORARY false temp_alloc
+allocator SHORT_LIVED false sl_alloc
+allocator STANDARD false std_alloc
+allocator LONG_LIVED false ll_alloc
+allocator EHEAP false eheap_alloc
+allocator ETS false ets_alloc
+allocator FIXED_SIZE false fix_alloc
+
++endif
+
+allocator BINARY true binary_alloc
+allocator DRIVER true driver_alloc
+
+
+# --- Class declarations -----------------------------------------------------
+#
+# Syntax: class <CLASS> <DESCRIPTION>
+#
+# <CLASS> <DESCRIPTION>
+
+class PROCESSES process_data
+class ATOM atom_data
+class CODE code_data
+class ETS ets_data
+class BINARIES binary_data
+class SYSTEM system_data
+
+# --- Type declarations ------------------------------------------------------
+#
+# Syntax: type <TYPE> <ALLOCATOR> <CLASS> <DESCRIPTION>
+#
+# Use ERTS_ALC_T_<TYPE> as first parameter to erts_alloc(), erts_alloc_fnf(),
+# erts_realloc(), erts_realloc_fnf() or erts_free() in order to allocate,
+# reallocate or deallocate a memory block of type <TYPE>.
+#
+# NOTE: Only use temp_alloc for memory types that *really* are *temporarily*
+# allocated. A good thumb rule: all memory allocated by temp_alloc
+# should be deallocated before the emulator starts executing Erlang
+# code again.
+#
+# NOTE: When adding or removing a type which uses the FIXED_SIZE allocator,
+# also add or remove initialization of the type in erts_alloc_init()
+# (erl_alloc.c).
+#
+# <TYPE> <ALLOCATOR> <CLASS> <DESCRIPTION>
+
+type PROC FIXED_SIZE PROCESSES proc
+type ATOM FIXED_SIZE ATOM atom_entry
+type EXPORT FIXED_SIZE CODE export_entry
+type MODULE FIXED_SIZE CODE module_entry
+type REG_PROC FIXED_SIZE PROCESSES reg_proc
+type LINK_LH STANDARD PROCESSES link_lh
+type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
+type MONITOR_LH STANDARD PROCESSES monitor_lh
+type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
+type NLINK_LH STANDARD PROCESSES nlink_lh
+type SUSPEND_MON STANDARD PROCESSES suspend_monitor
+type PEND_SUSPEND SHORT_LIVED PROCESSES pending_suspend
+type PROC_LIST SHORT_LIVED PROCESSES proc_list
+type FUN_ENTRY FIXED_SIZE CODE fun_entry
+type ATOM_TXT LONG_LIVED ATOM atom_text
+type HEAP EHEAP PROCESSES heap
+type OLD_HEAP EHEAP PROCESSES old_heap
+type HEAP_FRAG EHEAP PROCESSES heap_frag
+type TMP_HEAP TEMPORARY PROCESSES tmp_heap
+type MSG_REF SHORT_LIVED PROCESSES msg_ref
+type MSG_ROOTS TEMPORARY PROCESSES msg_roots
+type ROOTSET TEMPORARY PROCESSES root_set
+type LOADER_TMP TEMPORARY CODE loader_tmp
+type BIF_TIMER_TABLE LONG_LIVED SYSTEM bif_timer_table
+type SL_BIF_TIMER SHORT_LIVED PROCESSES bif_timer_sl
+type LL_BIF_TIMER STANDARD PROCESSES bif_timer_ll
+type REG_TABLE STANDARD SYSTEM reg_tab
+type FUN_TABLE STANDARD CODE fun_tab
+type DIST_TABLE STANDARD SYSTEM dist_tab
+type NODE_TABLE STANDARD SYSTEM node_tab
+type ATOM_TABLE LONG_LIVED ATOM atom_tab
+type EXPORT_TABLE LONG_LIVED CODE export_tab
+type MODULE_TABLE LONG_LIVED CODE module_tab
+type TAINT LONG_LIVED CODE taint_list
+type MODULE_REFS STANDARD CODE module_refs
+type NC_TMP TEMPORARY SYSTEM nc_tmp
+type TMP TEMPORARY SYSTEM tmp
+type UNDEF SYSTEM SYSTEM undefined
+type DCACHE STANDARD SYSTEM dcache
+type DCTRL_BUF TEMPORARY SYSTEM dctrl_buf
+type DIST_ENTRY STANDARD SYSTEM dist_entry
+type NODE_ENTRY STANDARD SYSTEM node_entry
+type PROC_TABLE LONG_LIVED PROCESSES proc_tab
+type PORT_TABLE LONG_LIVED SYSTEM port_tab
+type TIMER_WHEEL LONG_LIVED SYSTEM timer_wheel
+type DRV DRIVER SYSTEM drv_internal
+type DRV_BINARY BINARY BINARIES drv_binary
+type DRIVER STANDARD SYSTEM driver
+type NIF DRIVER SYSTEM nif_internal
+type BINARY BINARY BINARIES binary
+type NBIF_TABLE SYSTEM SYSTEM nbif_tab
+type CODE LONG_LIVED CODE code
+type ARG_REG STANDARD PROCESSES arg_reg
+type PROC_DICT STANDARD PROCESSES proc_dict
+type CALLS_BUF STANDARD PROCESSES calls_buf
+type BPD STANDARD SYSTEM bpd
+type PORT_NAME STANDARD SYSTEM port_name
+type LINEBUF STANDARD SYSTEM line_buf
+type IOQ STANDARD SYSTEM io_queue
+type BITS_BUF STANDARD SYSTEM bits_buf
+type TMP_DIST_BUF TEMPORARY SYSTEM tmp_dist_buf
+type ASYNC_Q LONG_LIVED SYSTEM async_queue
+type ESTACK TEMPORARY SYSTEM estack
+type PORT_CALL_BUF TEMPORARY SYSTEM port_call_buf
+type DB_TABLE FIXED_SIZE ETS db_tab
+type DB_FIXATION SHORT_LIVED ETS db_fixation
+type DB_FIX_DEL SHORT_LIVED ETS fixed_del
+type DB_TABLES LONG_LIVED ETS db_tabs
+type DB_NTAB_ENT STANDARD ETS db_named_table_entry
+type DB_TMP TEMPORARY ETS db_tmp
+type DB_MC_STK TEMPORARY ETS db_mc_stack
+type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
+type DB_MS_RUN_HEAP SHORT_LIVED ETS db_match_spec_run_heap
+type DB_MS_CMPL_HEAP TEMPORARY ETS db_match_spec_cmpl_heap
+type DB_SEG ETS ETS db_segment
+type DB_SEG_TAB ETS ETS db_segment_tab
+type DB_STK ETS ETS db_stack
+type DB_TRANS_TAB ETS ETS db_trans_tab
+type DB_SEL_LIST ETS ETS db_select_list
+type DB_DMC_ERROR ETS ETS db_dmc_error
+type DB_DMC_ERR_INFO ETS ETS db_dmc_error_info
+type DB_TERM ETS ETS db_term
+type DB_PROC_CLEANUP SHORT_LIVED ETS db_proc_cleanup_state
+type INSTR_INFO LONG_LIVED SYSTEM instr_info
+type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
+type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
+type INFO_DSBUF SYSTEM SYSTEM info_dsbuf
+# INFO_DSBUF have to use the SYSTEM allocator; otherwise, a deadlock might occur
+type SCHDLR_DATA LONG_LIVED PROCESSES scheduler_data
+type RUNQS LONG_LIVED SYSTEM run_queues
+type DDLL_PROCESS STANDARD SYSTEM ddll_processes
+type DDLL_HANDLE STANDARD SYSTEM ddll_handle
+type DDLL_ERRCODES LONG_LIVED SYSTEM ddll_errcodes
+type DDLL_TMP_BUF TEMPORARY SYSTEM ddll_tmp_buf
+type PORT_TASK SHORT_LIVED SYSTEM port_task
+type PORT_TASKQ SHORT_LIVED SYSTEM port_task_queue
+type MISC_OP_LIST SHORT_LIVED SYSTEM misc_op_list
+type PORT_NAMES SHORT_LIVED SYSTEM port_names
+type PORT_DATA_LOCK STANDARD SYSTEM port_data_lock
+type NODES_MON STANDARD PROCESSES nodes_monitor
+type PROCS_TPROC_EL SHORT_LIVED PROCESSES processes_term_proc_el
+type PROCS_CNKINF SHORT_LIVED PROCESSES processes_chunk_info
+type PROCS_PIDS SHORT_LIVED PROCESSES processes_pids
+type RE_TMP_BUF TEMPORARY SYSTEM re_tmp_buf
+type RE_SUBJECT SHORT_LIVED SYSTEM re_subject
+type RE_HEAP STANDARD SYSTEM re_heap
+type RE_STACK SHORT_LIVED SYSTEM re_stack
+type UNICODE_BUFFER SHORT_LIVED SYSTEM unicode_buffer
+type PRE_ALLOC_DATA LONG_LIVED SYSTEM pre_alloc_data
+type DRV_THR_OPTS DRIVER SYSTEM driver_thread_opts
+type DRV_TID DRIVER SYSTEM driver_tid
+type DRV_MTX DRIVER SYSTEM driver_mutex
+type DRV_CND DRIVER SYSTEM driver_cond
+type DRV_RWLCK DRIVER SYSTEM driver_rwlock
+type DRV_TSD DRIVER SYSTEM driver_tsd
+type PSD STANDARD PROCESSES process_specific_data
+type PRTSD STANDARD SYSTEM port_specific_data
+type CPUDATA LONG_LIVED SYSTEM cpu_data
+type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
+type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
+type ZLIB STANDARD SYSTEM zlib
+
++if smp
+type ASYNC SHORT_LIVED SYSTEM async
++else
+# sl_alloc is not thread safe in non smp build; therefore, we use driver_alloc
+type ASYNC DRIVER SYSTEM async
++endif
+
++if smp
+type PORT_LOCK STANDARD SYSTEM port_lock
+type DRIVER_LOCK STANDARD SYSTEM driver_lock
+type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list
+type PROC_LCK_WTR LONG_LIVED SYSTEM proc_lock_waiter
+type PROC_LCK_QS LONG_LIVED SYSTEM proc_lock_queues
+type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
++endif
+
+#
+# Types used for special emulators
+#
+
++if threads
+
+type ETHR_INTERNAL SYSTEM SYSTEM ethread_internal
+
++ifnot smp
+
+type ARCALLBACK LONG_LIVED SYSTEM async_ready_callback
+
++endif
+
++endif
+
++if shared_heap
+
+type STACK STANDARD PROCESSES stack
+type ACTIVE_PROCS STANDARD PROCESSES active_procs
+
++endif
+
++if hybrid
+
+type ACTIVE_PROCS STANDARD PROCESSES active_procs
+
+# Used for all memory involved in incremental gc of the message area
+# that is, young (x2) and old generation, forwarding pointers and blackmap
+type MESSAGE_AREA LONG_LIVED PROCESSES message_area
+
+# Used in MA_STACK (global.h) and INC_STORAGE (erl_nmgc.h)
+type OBJECT_STACK STANDARD PROCESSES object_stack
+
++endif
+
++if smp
+type SL_PTIMER SHORT_LIVED SYSTEM ptimer_sl
+type LL_PTIMER STANDARD SYSTEM ptimer_ll
+type SYS_MSG_Q SHORT_LIVED PROCESSES system_messages_queue
+type FP_EXCEPTION LONG_LIVED SYSTEM fp_exception
++endif
+
++if hipe
+
+# Currently most hipe code use this type.
+type HIPE SYSTEM SYSTEM hipe_data
+
++endif
+
++if heap_frag_elim_test
+
+type SSB SHORT_LIVED PROCESSES ssb
+
++endif
+
+
+#
+# Types used by system specific code
+#
+
+type DRV_TAB LONG_LIVED SYSTEM drv_tab
+type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
+type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state
+type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
+type FD_LIST SHORT_LIVED SYSTEM fd_list
+type POLLSET LONG_LIVED SYSTEM pollset
+type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
+type POLL_FDS LONG_LIVED SYSTEM poll_fds
+type POLL_RES_EVS LONG_LIVED SYSTEM poll_result_events
+type FD_STATUS LONG_LIVED SYSTEM fd_status
+
++if unix
+
+type SYS_READ_BUF TEMPORARY SYSTEM sys_read_buf
+type FD_TAB LONG_LIVED SYSTEM fd_tab
+type FD_ENTRY_BUF STANDARD SYSTEM fd_entry_buf
+type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path
+type ENVIRONMENT TEMPORARY SYSTEM environment
+type PUTENV_STR SYSTEM SYSTEM putenv_string
+type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
+
++endif
+
++if win32
+
+type DRV_DATA_BUF SYSTEM SYSTEM drv_data_buf
+type PRELOADED LONG_LIVED SYSTEM preloaded
+type PUTENV_STR SYSTEM SYSTEM putenv_string
+type WAITER_OBJ LONG_LIVED SYSTEM waiter_object
+type ENVIRONMENT SYSTEM SYSTEM environment
+type CON_VPRINTF_BUF TEMPORARY SYSTEM con_vprintf_buf
+
++endif
+
++if vxworks
+
+type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf
+type PEND_DATA SYSTEM SYSTEM pending_data
+type FD_TAB LONG_LIVED SYSTEM fd_tab
+type FD_ENTRY_BUF SYSTEM SYSTEM fd_entry_buf
+
++endif
+
++if ose
+
+type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf
+type PUTENV_STR SYSTEM SYSTEM putenv_string
+type GETENV_STR SYSTEM SYSTEM getenv_string
+type GETENV_STATE SYSTEM SYSTEM getenv_state
+type SIG_ENTRY SYSTEM SYSTEM sig_entry
+type DRIVER_DATA SYSTEM SYSTEM driver_data
+type PGM_TAB SYSTEM SYSTEM pgm_tab
+type PGM_ENTRY SYSTEM SYSTEM pgm_entry
+type PRT_TAB SYSTEM SYSTEM prt_tab
+type PRT_ENTRY SYSTEM SYSTEM prt_entry
+
++endif
+
+# ----------------------------------------------------------------------------
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
new file mode 100644
index 0000000000..9b7bc24c1c
--- /dev/null
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -0,0 +1,3467 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description: A memory allocator utility. This utility provides
+ * management of (multiple) memory segments, coalescing
+ * of free blocks, etc. Allocators are implemented by
+ * implementing a callback-interface which is called by
+ * this utility. The only task the callback-module has to
+ * perform is to supervise the free blocks.
+ *
+ * Author: Rickard Green
+ */
+
+/*
+ * Alloc util will enforce 8 byte alignment if sys_alloc and mseg_alloc at
+ * least enforces 8 byte alignment. If sys_alloc only enforces 4 byte
+ * alignment then alloc util will do so too.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "global.h"
+#include "big.h"
+#include "erl_mtrace.h"
+#define GET_ERL_ALLOC_UTIL_IMPL
+#include "erl_alloc_util.h"
+#include "erl_mseg.h"
+#include "erl_threads.h"
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "erl_lock_count.h"
+#endif
+
+#if defined(ERTS_ALLOC_UTIL_HARD_DEBUG) && defined(__GNUC__)
+#warning "* * * * * * * * * *"
+#warning "* * * * * * * * * *"
+#warning "* * NOTE: * *"
+#warning "* * Hard debug * *"
+#warning "* * is enabled! * *"
+#warning "* * * * * * * * * *"
+#warning "* * * * * * * * * *"
+#endif
+
+#define ALLOC_ZERO_EQ_NULL 0
+
+static int atoms_initialized = 0;
+static int initialized = 0;
+
+
+#if HAVE_ERTS_MSEG
+
+#define INV_MSEG_UNIT_MASK ((Uint) (mseg_unit_size - 1))
+#define MSEG_UNIT_MASK (~INV_MSEG_UNIT_MASK)
+#define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK)
+#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + INV_MSEG_UNIT_MASK)
+
+#endif
+
+#define INV_SYS_ALLOC_CARRIER_MASK ((Uint) (sys_alloc_carrier_size - 1))
+#define SYS_ALLOC_CARRIER_MASK (~INV_SYS_ALLOC_CARRIER_MASK)
+#define SYS_ALLOC_CARRIER_FLOOR(X) ((X) & SYS_ALLOC_CARRIER_MASK)
+#define SYS_ALLOC_CARRIER_CEILING(X) \
+ SYS_ALLOC_CARRIER_FLOOR((X) + INV_SYS_ALLOC_CARRIER_MASK)
+
+#undef ASSERT
+#define ASSERT ASSERT_EXPR
+
+#define ERTS_ALCU_FLG_FAIL_REALLOC_MOVE ((Uint) 1)
+
+#if 0
+/* Can be useful for debugging */
+#define MBC_REALLOC_ALWAYS_MOVES
+#endif
+
+
+/* alloc_util global parameters */
+static Uint sys_alloc_carrier_size;
+#if HAVE_ERTS_MSEG
+static Uint max_mseg_carriers;
+static Uint mseg_unit_size;
+#endif
+
+#define ONE_GIGA (1000000000)
+
+#define INC_CC(CC) ((CC).no == ONE_GIGA - 1 \
+ ? ((CC).giga_no++, (CC).no = 0) \
+ : (CC).no++)
+
+#define DEC_CC(CC) ((CC).no == 0 \
+ ? ((CC).giga_no--, (CC).no = ONE_GIGA - 1) \
+ : (CC).no--)
+
+/* ... */
+
+/* Blocks ... */
+
+#define SBC_BLK_FTR_FLG (((Uint) 1) << 0)
+#define UNUSED1_BLK_FTR_FLG (((Uint) 1) << 1)
+#define UNUSED2_BLK_FTR_FLG (((Uint) 1) << 2)
+
+#define ABLK_HDR_SZ (sizeof(Block_t))
+#define FBLK_FTR_SZ (sizeof(Uint))
+
+#define UMEMSZ2BLKSZ(AP, SZ) \
+ (ABLK_HDR_SZ + (SZ) <= (AP)->min_block_size \
+ ? (AP)->min_block_size \
+ : UNIT_CEILING(ABLK_HDR_SZ + (SZ)))
+
+#define UMEM2BLK(P) ((Block_t *) (((char *) (P)) - ABLK_HDR_SZ))
+#define BLK2UMEM(P) ((void *) (((char *) (P)) + ABLK_HDR_SZ))
+
+#define PREV_BLK_SZ(B) \
+ ((Uint) (*(((Uint *) (B)) - 1) & SZ_MASK))
+
+#define SET_BLK_SZ_FTR(B, SZ) \
+ (*((Uint *) (((char *) (B)) + (SZ) - sizeof(Uint))) = (SZ))
+
+#define THIS_FREE_BLK_HDR_FLG (((Uint) 1) << 0)
+#define PREV_FREE_BLK_HDR_FLG (((Uint) 1) << 1)
+#define LAST_BLK_HDR_FLG (((Uint) 1) << 2)
+
+#define SET_BLK_SZ(B, SZ) \
+ (ASSERT(((SZ) & FLG_MASK) == 0), \
+ (*((Block_t *) (B)) = ((*((Block_t *) (B)) & FLG_MASK) | (SZ))))
+#define SET_BLK_FREE(B) \
+ (*((Block_t *) (B)) |= THIS_FREE_BLK_HDR_FLG)
+#define SET_BLK_ALLOCED(B) \
+ (*((Block_t *) (B)) &= ~THIS_FREE_BLK_HDR_FLG)
+#define SET_PREV_BLK_FREE(B) \
+ (*((Block_t *) (B)) |= PREV_FREE_BLK_HDR_FLG)
+#define SET_PREV_BLK_ALLOCED(B) \
+ (*((Block_t *) (B)) &= ~PREV_FREE_BLK_HDR_FLG)
+#define SET_LAST_BLK(B) \
+ (*((Block_t *) (B)) |= LAST_BLK_HDR_FLG)
+#define SET_NOT_LAST_BLK(B) \
+ (*((Block_t *) (B)) &= ~LAST_BLK_HDR_FLG)
+
+#define SBH_THIS_FREE THIS_FREE_BLK_HDR_FLG
+#define SBH_THIS_ALLOCED ((Uint) 0)
+#define SBH_PREV_FREE PREV_FREE_BLK_HDR_FLG
+#define SBH_PREV_ALLOCED ((Uint) 0)
+#define SBH_LAST_BLK LAST_BLK_HDR_FLG
+#define SBH_NOT_LAST_BLK ((Uint) 0)
+
+#define SET_BLK_HDR(B, Sz, F) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), *((Block_t *) (B)) = ((Sz) | (F)))
+
+#define BLK_UMEM_SZ(B) \
+ (BLK_SZ(B) - (ABLK_HDR_SZ))
+#define IS_PREV_BLK_FREE(B) \
+ (*((Block_t *) (B)) & PREV_FREE_BLK_HDR_FLG)
+#define IS_PREV_BLK_ALLOCED(B) \
+ (!IS_PREV_BLK_FREE((B)))
+#define IS_FREE_BLK(B) \
+ (*((Block_t *) (B)) & THIS_FREE_BLK_HDR_FLG)
+#define IS_ALLOCED_BLK(B) \
+ (!IS_FREE_BLK((B)))
+#define IS_LAST_BLK(B) \
+ (*((Block_t *) (B)) & LAST_BLK_HDR_FLG)
+#define IS_NOT_LAST_BLK(B) \
+ (!IS_LAST_BLK((B)))
+
+#define GET_LAST_BLK_HDR_FLG(B) \
+ (*((Block_t*) (B)) & LAST_BLK_HDR_FLG)
+#define GET_THIS_FREE_BLK_HDR_FLG(B) \
+ (*((Block_t*) (B)) & THIS_FREE_BLK_HDR_FLG)
+#define GET_PREV_FREE_BLK_HDR_FLG(B) \
+ (*((Block_t*) (B)) & PREV_FREE_BLK_HDR_FLG)
+#define GET_BLK_HDR_FLGS(B) \
+ (*((Block_t*) (B)) & FLG_MASK)
+
+#define IS_FIRST_BLK(B) \
+ (IS_PREV_BLK_FREE((B)) && (PREV_BLK_SZ((B)) == 0))
+#define IS_NOT_FIRST_BLK(B) \
+ (!IS_FIRST_BLK((B)))
+
+#define SET_SBC_BLK_FTR(FTR) \
+ ((FTR) = (0 | SBC_BLK_FTR_FLG))
+#define SET_MBC_BLK_FTR(FTR) \
+ ((FTR) = 0)
+
+#define IS_SBC_BLK(B) \
+ (IS_PREV_BLK_FREE((B)) && (((Uint *) (B))[-1] & SBC_BLK_FTR_FLG))
+#define IS_MBC_BLK(B) \
+ (!IS_SBC_BLK((B)))
+
+#define NXT_BLK(B) \
+ ((Block_t *) (((char *) (B)) + BLK_SZ((B))))
+#define PREV_BLK(B) \
+ ((Block_t *) (((char *) (B)) - PREV_BLK_SZ((B))))
+
+/* Carriers ... */
+
+#define MSEG_CARRIER_HDR_FLAG (((Uint) 1) << 0)
+#define SBC_CARRIER_HDR_FLAG (((Uint) 1) << 1)
+
+#define SCH_SYS_ALLOC 0
+#define SCH_MSEG MSEG_CARRIER_HDR_FLAG
+#define SCH_MBC 0
+#define SCH_SBC SBC_CARRIER_HDR_FLAG
+
+#define SET_CARRIER_HDR(C, Sz, F) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)))
+
+#define BLK2SBC(AP, B) \
+ ((Carrier_t *) (((char *) (B)) - (AP)->sbc_header_size))
+#define FBLK2MBC(AP, B) \
+ ((Carrier_t *) (((char *) (B)) - (AP)->mbc_header_size))
+
+#define MBC2FBLK(AP, P) \
+ ((Block_t *) (((char *) (P)) + (AP)->mbc_header_size))
+#define SBC2BLK(AP, P) \
+ ((Block_t *) (((char *) (P)) + (AP)->sbc_header_size))
+#define SBC2UMEM(AP, P) \
+ ((void *) (((char *) (P)) + ((AP)->sbc_header_size + ABLK_HDR_SZ)))
+
+#define IS_MSEG_CARRIER(C) \
+ ((C)->chdr & MSEG_CARRIER_HDR_FLAG)
+#define IS_SYS_ALLOC_CARRIER(C) \
+ (!IS_MSEG_CARRIER((C)))
+#define IS_SB_CARRIER(C) \
+ ((C)->chdr & SBC_CARRIER_HDR_FLAG)
+#define IS_MB_CARRIER(C) \
+ (!IS_SB_CARRIER((C)))
+
+#define SET_MSEG_CARRIER(C) \
+ ((C)->chdr |= MSEG_CARRIER_HDR_FLAG)
+#define SET_SYS_ALLOC_CARRIER(C) \
+ ((C)->chdr &= ~MSEG_CARRIER_HDR_FLAG)
+#define SET_SB_CARRIER(C) \
+ ((C)->chdr |= SBC_CARRIER_HDR_FLAG)
+#define SET_MB_CARRIER(C) \
+ ((C)->chdr &= ~SBC_CARRIER_HDR_FLAG)
+
+#define SET_CARRIER_SZ(C, SZ) \
+ (ASSERT(((SZ) & FLG_MASK) == 0), \
+ ((C)->chdr = ((C)->chdr & FLG_MASK) | (SZ)))
+
+#define CFLG_SBC (1 << 0)
+#define CFLG_MBC (1 << 1)
+#define CFLG_FORCE_MSEG (1 << 2)
+#define CFLG_FORCE_SYS_ALLOC (1 << 3)
+#define CFLG_FORCE_SIZE (1 << 4)
+#define CFLG_MAIN_CARRIER (1 << 5)
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+static void check_blk_carrier(Allctr_t *, Block_t *);
+#define HARD_CHECK_BLK_CARRIER(A, B) check_blk_carrier((A), (B))
+#else
+#define HARD_CHECK_BLK_CARRIER(A, B)
+#endif
+
+
+/* Statistics updating ... */
+
+#ifdef DEBUG
+#define DEBUG_CHECK_CARRIER_NO_SZ(AP) \
+ ASSERT(((AP)->sbcs.curr_mseg.no && (AP)->sbcs.curr_mseg.size) \
+ || (!(AP)->sbcs.curr_mseg.no && !(AP)->sbcs.curr_mseg.size));\
+ ASSERT(((AP)->sbcs.curr_sys_alloc.no && (AP)->sbcs.curr_sys_alloc.size)\
+ || (!(AP)->sbcs.curr_sys_alloc.no && !(AP)->sbcs.curr_sys_alloc.size));\
+ ASSERT(((AP)->mbcs.curr_mseg.no && (AP)->mbcs.curr_mseg.size) \
+ || (!(AP)->mbcs.curr_mseg.no && !(AP)->mbcs.curr_mseg.size));\
+ ASSERT(((AP)->mbcs.curr_sys_alloc.no && (AP)->mbcs.curr_sys_alloc.size)\
+ || (!(AP)->mbcs.curr_sys_alloc.no && !(AP)->mbcs.curr_sys_alloc.size))
+
+#else
+#define DEBUG_CHECK_CARRIER_NO_SZ(AP)
+#endif
+
+#define STAT_SBC_ALLOC(AP, BSZ) \
+ (AP)->sbcs.blocks.curr.size += (BSZ); \
+ if ((AP)->sbcs.blocks.max.size < (AP)->sbcs.blocks.curr.size) \
+ (AP)->sbcs.blocks.max.size = (AP)->sbcs.blocks.curr.size; \
+ if ((AP)->sbcs.max.no < ((AP)->sbcs.curr_mseg.no \
+ + (AP)->sbcs.curr_sys_alloc.no)) \
+ (AP)->sbcs.max.no = ((AP)->sbcs.curr_mseg.no \
+ + (AP)->sbcs.curr_sys_alloc.no); \
+ if ((AP)->sbcs.max.size < ((AP)->sbcs.curr_mseg.size \
+ + (AP)->sbcs.curr_sys_alloc.size)) \
+ (AP)->sbcs.max.size = ((AP)->sbcs.curr_mseg.size \
+ + (AP)->sbcs.curr_sys_alloc.size)
+
+#define STAT_MSEG_SBC_ALLOC(AP, CSZ, BSZ) \
+do { \
+ (AP)->sbcs.curr_mseg.no++; \
+ (AP)->sbcs.curr_mseg.size += (CSZ); \
+ STAT_SBC_ALLOC((AP), (BSZ)); \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
+#define STAT_SYS_ALLOC_SBC_ALLOC(AP, CSZ, BSZ) \
+do { \
+ (AP)->sbcs.curr_sys_alloc.no++; \
+ (AP)->sbcs.curr_sys_alloc.size += (CSZ); \
+ STAT_SBC_ALLOC((AP), (BSZ)); \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
+
+#define STAT_SBC_FREE(AP, BSZ) \
+ ASSERT((AP)->sbcs.blocks.curr.size >= (BSZ)); \
+ (AP)->sbcs.blocks.curr.size -= (BSZ)
+
+#define STAT_MSEG_SBC_FREE(AP, CSZ, BSZ) \
+do { \
+ ASSERT((AP)->sbcs.curr_mseg.no > 0); \
+ (AP)->sbcs.curr_mseg.no--; \
+ ASSERT((AP)->sbcs.curr_mseg.size >= (CSZ)); \
+ (AP)->sbcs.curr_mseg.size -= (CSZ); \
+ STAT_SBC_FREE((AP), (BSZ)); \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
+#define STAT_SYS_ALLOC_SBC_FREE(AP, CSZ, BSZ) \
+do { \
+ ASSERT((AP)->sbcs.curr_sys_alloc.no > 0); \
+ (AP)->sbcs.curr_sys_alloc.no--; \
+ ASSERT((AP)->sbcs.curr_sys_alloc.size >= (CSZ)); \
+ (AP)->sbcs.curr_sys_alloc.size -= (CSZ); \
+ STAT_SBC_FREE((AP), (BSZ)); \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
+#define STAT_MBC_ALLOC(AP) \
+ if ((AP)->mbcs.max.no < ((AP)->mbcs.curr_mseg.no \
+ + (AP)->mbcs.curr_sys_alloc.no)) \
+ (AP)->mbcs.max.no = ((AP)->mbcs.curr_mseg.no \
+ + (AP)->mbcs.curr_sys_alloc.no); \
+ if ((AP)->mbcs.max.size < ((AP)->mbcs.curr_mseg.size \
+ + (AP)->mbcs.curr_sys_alloc.size)) \
+ (AP)->mbcs.max.size = ((AP)->mbcs.curr_mseg.size \
+ + (AP)->mbcs.curr_sys_alloc.size)
+
+
+#define STAT_MSEG_MBC_ALLOC(AP, CSZ) \
+do { \
+ (AP)->mbcs.curr_mseg.no++; \
+ (AP)->mbcs.curr_mseg.size += (CSZ); \
+ STAT_MBC_ALLOC((AP)); \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
+#define STAT_SYS_ALLOC_MBC_ALLOC(AP, CSZ) \
+do { \
+ (AP)->mbcs.curr_sys_alloc.no++; \
+ (AP)->mbcs.curr_sys_alloc.size += (CSZ); \
+ STAT_MBC_ALLOC((AP)); \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
+#define STAT_MSEG_MBC_FREE(AP, CSZ) \
+do { \
+ ASSERT((AP)->mbcs.curr_mseg.no > 0); \
+ (AP)->mbcs.curr_mseg.no--; \
+ ASSERT((AP)->mbcs.curr_mseg.size >= (CSZ)); \
+ (AP)->mbcs.curr_mseg.size -= (CSZ); \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
+#define STAT_SYS_ALLOC_MBC_FREE(AP, CSZ) \
+do { \
+ ASSERT((AP)->mbcs.curr_sys_alloc.no > 0); \
+ (AP)->mbcs.curr_sys_alloc.no--; \
+ ASSERT((AP)->mbcs.curr_sys_alloc.size >= (CSZ)); \
+ (AP)->mbcs.curr_sys_alloc.size -= (CSZ); \
+ DEBUG_CHECK_CARRIER_NO_SZ((AP)); \
+} while (0)
+
+#define STAT_MBC_BLK_ALLOC(AP, BSZ) \
+do { \
+ (AP)->mbcs.blocks.curr.no++; \
+ if ((AP)->mbcs.blocks.max.no < (AP)->mbcs.blocks.curr.no) \
+ (AP)->mbcs.blocks.max.no = (AP)->mbcs.blocks.curr.no; \
+ (AP)->mbcs.blocks.curr.size += (BSZ); \
+ if ((AP)->mbcs.blocks.max.size < (AP)->mbcs.blocks.curr.size) \
+ (AP)->mbcs.blocks.max.size = (AP)->mbcs.blocks.curr.size; \
+} while (0)
+
+#define STAT_MBC_BLK_FREE(AP, BSZ) \
+do { \
+ ASSERT((AP)->mbcs.blocks.curr.no > 0); \
+ (AP)->mbcs.blocks.curr.no--; \
+ ASSERT((AP)->mbcs.blocks.curr.size >= (BSZ)); \
+ (AP)->mbcs.blocks.curr.size -= (BSZ); \
+} while (0)
+
+/* Debug stuff... */
+#ifdef DEBUG
+static Uint carrier_alignment;
+#define DEBUG_SAVE_ALIGNMENT(C) \
+do { \
+ Uint algnmnt__ = sizeof(Unit_t) - (((Uint) (C)) % sizeof(Unit_t)); \
+ carrier_alignment = MIN(carrier_alignment, algnmnt__); \
+ ASSERT(((Uint) (C)) % sizeof(Uint) == 0); \
+} while (0)
+#define DEBUG_CHECK_ALIGNMENT(P) \
+do { \
+ ASSERT(sizeof(Unit_t) - (((Uint) (P)) % sizeof(Unit_t)) \
+ >= carrier_alignment); \
+ ASSERT(((Uint) (P)) % sizeof(Uint) == 0); \
+} while (0)
+
+#else
+#define DEBUG_SAVE_ALIGNMENT(C)
+#define DEBUG_CHECK_ALIGNMENT(P)
+#endif
+
+#ifdef DEBUG
+#ifdef USE_THREADS
+#define ERTS_ALCU_DBG_CHK_THR_SPEC(A) \
+do { \
+ if (!(A)->thread_safe) { \
+ if (!(A)->debug.saved_tid) \
+ (A)->debug.tid = erts_thr_self(); \
+ else { \
+ ASSERT(ethr_equal_tids((A)->debug.tid, erts_thr_self())); \
+ } \
+ } \
+} while (0)
+#else
+#define ERTS_ALCU_DBG_CHK_THR_SPEC(A)
+#endif
+#else
+#define ERTS_ALCU_DBG_CHK_THR_SPEC(A)
+#endif
+
+
+static void make_name_atoms(Allctr_t *allctr);
+
+
+/* mseg ... */
+
+#if HAVE_ERTS_MSEG
+
+static ERTS_INLINE void *
+alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p)
+{
+ void *res;
+
+ res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, &allctr->mseg_opt);
+ INC_CC(allctr->calls.mseg_alloc);
+ return res;
+}
+
+static ERTS_INLINE void *
+alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p)
+{
+ void *res;
+
+ res = erts_mseg_realloc_opt(allctr->alloc_no, seg, old_size, new_size_p,
+ &allctr->mseg_opt);
+ INC_CC(allctr->calls.mseg_realloc);
+ return res;
+}
+
+static ERTS_INLINE void
+alcu_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size)
+{
+ erts_mseg_dealloc_opt(allctr->alloc_no, seg, size, &allctr->mseg_opt);
+ INC_CC(allctr->calls.mseg_dealloc);
+}
+
+#endif
+
+static ERTS_INLINE void *
+alcu_sys_alloc(Allctr_t *allctr, Uint size)
+{
+ void *res;
+
+ res = erts_sys_alloc(0, NULL, size);
+ INC_CC(allctr->calls.sys_alloc);
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_alloc(res, allctr->alloc_no, ERTS_ALC_A_SYSTEM, size);
+ return res;
+}
+
+static ERTS_INLINE void *
+alcu_sys_realloc(Allctr_t *allctr, void *ptr, Uint size)
+{
+ void *res;
+
+ res = erts_sys_realloc(0, NULL, ptr, size);
+ INC_CC(allctr->calls.sys_realloc);
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_realloc(res,
+ allctr->alloc_no,
+ ERTS_ALC_A_SYSTEM,
+ ptr,
+ size);
+ return res;
+}
+
+static ERTS_INLINE void
+alcu_sys_free(Allctr_t *allctr, void *ptr)
+{
+ erts_sys_free(0, NULL, ptr);
+ INC_CC(allctr->calls.sys_free);
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_free(allctr->alloc_no, ERTS_ALC_A_SYSTEM, ptr);
+}
+
+static Uint
+get_next_mbc_size(Allctr_t *allctr)
+{
+ Uint size;
+ int cs = (allctr->mbcs.curr_mseg.no
+ + allctr->mbcs.curr_sys_alloc.no
+ - (allctr->main_carrier ? 1 : 0));
+
+ ASSERT(cs >= 0);
+ ASSERT(allctr->largest_mbc_size >= allctr->smallest_mbc_size);
+
+ if (cs >= allctr->mbc_growth_stages)
+ size = allctr->largest_mbc_size;
+ else
+ size = ((cs*(allctr->largest_mbc_size - allctr->smallest_mbc_size)
+ / allctr->mbc_growth_stages)
+ + allctr->smallest_mbc_size);
+
+ if (size < allctr->min_mbc_size)
+ size = allctr->min_mbc_size;
+
+ return size;
+}
+
+static ERTS_INLINE void
+link_carrier(CarrierList_t *cl, Carrier_t *crr)
+{
+ crr->next = NULL;
+ if (!cl->last) {
+ ASSERT(!cl->first);
+ cl->first = cl->last = crr;
+ crr->prev = NULL;
+ }
+ else {
+ ASSERT(cl->first);
+ ASSERT(!cl->first->prev);
+ ASSERT(cl->last);
+ ASSERT(!cl->last->next);
+ crr->prev = cl->last;
+ cl->last->next = crr;
+ cl->last = crr;
+ }
+ ASSERT(crr->next != crr);
+ ASSERT(crr->prev != crr);
+}
+
+static ERTS_INLINE void
+relink_carrier(CarrierList_t *cl, Carrier_t *crr)
+{
+ if (crr->next) {
+ if (crr->next->prev != crr)
+ crr->next->prev = crr;
+ }
+ else if (cl->last != crr)
+ cl->last = crr;
+
+ if (crr->prev) {
+ if (crr->prev->next != crr)
+ crr->prev->next = crr;
+ }
+ else if (cl->first != crr)
+ cl->first = crr;
+}
+
+static ERTS_INLINE void
+unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
+{
+ ASSERT(crr->next != crr);
+ ASSERT(crr->prev != crr);
+
+ if (cl->first == crr) {
+ ASSERT(!crr->prev);
+ cl->first = crr->next;
+ }
+ else {
+ ASSERT(crr->prev);
+ crr->prev->next = crr->next;
+ }
+
+ if (cl->last == crr) {
+ ASSERT(!crr->next);
+ cl->last = crr->prev;
+ }
+ else {
+ ASSERT(crr->next);
+ crr->next->prev = crr->prev;
+ }
+}
+
+
+static Block_t *create_carrier(Allctr_t *, Uint, Uint);
+static void destroy_carrier(Allctr_t *, Block_t *);
+
+/* Multi block carrier alloc/realloc/free ... */
+
+/* NOTE! mbc_alloc() may in case of memory shortage place the requested
+ * block in a sbc.
+ */
+static ERTS_INLINE void *
+mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp)
+{
+ Block_t *blk;
+
+ ASSERT(size);
+ ASSERT(size < allctr->sbc_threshold);
+
+ *blk_szp = UMEMSZ2BLKSZ(allctr, size);
+
+ blk = (*allctr->get_free_block)(allctr, *blk_szp, NULL, 0);
+
+ if (!blk) {
+ blk = create_carrier(allctr, *blk_szp, CFLG_MBC);
+ if (!blk) {
+ /* Emergency! We couldn't create the carrier as we wanted.
+ Try to place it in a sys_alloced sbc. */
+ blk = create_carrier(allctr,
+ size,
+ CFLG_SBC|CFLG_FORCE_SIZE|CFLG_FORCE_SYS_ALLOC);
+ }
+ }
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ if (IS_MBC_BLK(blk)) {
+ (*allctr->link_free_block)(allctr, blk);
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+ (*allctr->unlink_free_block)(allctr, blk);
+ }
+#endif
+
+ return blk;
+}
+
+static ERTS_INLINE void
+mbc_alloc_finalize(Allctr_t *allctr,
+ Block_t *blk,
+ Uint org_blk_sz,
+ Uint flags,
+ Uint want_blk_sz,
+ int valid_blk_info)
+{
+ Uint blk_sz;
+ Uint nxt_blk_sz;
+ Block_t *nxt_blk;
+ Uint prev_free_flg = flags & PREV_FREE_BLK_HDR_FLG;
+
+ ASSERT(org_blk_sz >= want_blk_sz);
+ ASSERT(blk);
+
+#ifdef DEBUG
+ nxt_blk = NULL;
+#endif
+
+ if (org_blk_sz - allctr->min_block_size >= want_blk_sz) {
+ /* Shrink block... */
+ blk_sz = want_blk_sz;
+ nxt_blk_sz = org_blk_sz - blk_sz;
+ SET_BLK_HDR(blk,
+ blk_sz,
+ SBH_THIS_ALLOCED|SBH_NOT_LAST_BLK|prev_free_flg);
+
+ nxt_blk = NXT_BLK(blk);
+ SET_BLK_HDR(nxt_blk,
+ nxt_blk_sz,
+ (SBH_THIS_FREE
+ | SBH_PREV_ALLOCED
+ | (flags & LAST_BLK_HDR_FLG)));
+
+ if (!(flags & LAST_BLK_HDR_FLG)) {
+ SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
+ if (!valid_blk_info) {
+ Block_t *nxt_nxt_blk = NXT_BLK(nxt_blk);
+ SET_PREV_BLK_FREE(nxt_nxt_blk);
+ }
+ }
+ (*allctr->link_free_block)(allctr, nxt_blk);
+
+ ASSERT(IS_NOT_LAST_BLK(blk));
+ ASSERT(IS_FREE_BLK(nxt_blk));
+ ASSERT((flags & LAST_BLK_HDR_FLG)
+ ? IS_LAST_BLK(nxt_blk)
+ : IS_NOT_LAST_BLK(nxt_blk));
+ ASSERT((flags & LAST_BLK_HDR_FLG)
+ || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
+ ASSERT((flags & LAST_BLK_HDR_FLG)
+ || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
+ ASSERT(nxt_blk_sz == BLK_SZ(nxt_blk));
+ ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
+ ASSERT(nxt_blk_sz >= allctr->min_block_size);
+ }
+ else {
+ blk_sz = org_blk_sz;
+ if (flags & LAST_BLK_HDR_FLG) {
+ if (valid_blk_info)
+ SET_BLK_ALLOCED(blk);
+ else
+ SET_BLK_HDR(blk,
+ blk_sz,
+ SBH_THIS_ALLOCED|SBH_LAST_BLK|prev_free_flg);
+ }
+ else {
+ if (valid_blk_info)
+ SET_BLK_ALLOCED(blk);
+ else
+ SET_BLK_HDR(blk,
+ blk_sz,
+ SBH_THIS_ALLOCED|SBH_NOT_LAST_BLK|prev_free_flg);
+ nxt_blk = NXT_BLK(blk);
+ SET_PREV_BLK_ALLOCED(nxt_blk);
+ }
+
+ ASSERT((flags & LAST_BLK_HDR_FLG)
+ ? IS_LAST_BLK(blk)
+ : IS_NOT_LAST_BLK(blk));
+ }
+
+ STAT_MBC_BLK_ALLOC(allctr, blk_sz);
+
+ ASSERT(IS_ALLOCED_BLK(blk));
+ ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz % sizeof(Unit_t) == 0);
+ ASSERT(blk_sz >= allctr->min_block_size);
+ ASSERT(blk_sz >= want_blk_sz);
+ ASSERT(IS_MBC_BLK(blk));
+
+ ASSERT(!nxt_blk || IS_PREV_BLK_ALLOCED(nxt_blk));
+ ASSERT(!nxt_blk || IS_MBC_BLK(nxt_blk));
+
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+}
+
+static void *
+mbc_alloc(Allctr_t *allctr, Uint size)
+{
+ Block_t *blk;
+ Uint blk_sz;
+ blk = mbc_alloc_block(allctr, size, &blk_sz);
+ if (!blk)
+ return NULL;
+ if (IS_MBC_BLK(blk))
+ mbc_alloc_finalize(allctr,
+ blk,
+ BLK_SZ(blk),
+ GET_BLK_HDR_FLGS(blk),
+ blk_sz,
+ 1);
+ return BLK2UMEM(blk);
+}
+
+static void
+mbc_free(Allctr_t *allctr, void *p)
+{
+ Uint is_first_blk;
+ Uint is_last_blk;
+ Uint blk_sz;
+ Block_t *blk;
+ Block_t *nxt_blk;
+
+
+ ASSERT(p);
+
+ blk = UMEM2BLK(p);
+ blk_sz = BLK_SZ(blk);
+
+ ASSERT(IS_MBC_BLK(blk));
+ ASSERT(blk_sz >= allctr->min_block_size);
+
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+
+ STAT_MBC_BLK_FREE(allctr, blk_sz);
+
+ is_first_blk = IS_FIRST_BLK(blk);
+ is_last_blk = IS_LAST_BLK(blk);
+
+ if (!is_first_blk && IS_PREV_BLK_FREE(blk)) {
+ /* Coalesce with previous block... */
+ blk = PREV_BLK(blk);
+ (*allctr->unlink_free_block)(allctr, blk);
+
+ blk_sz += BLK_SZ(blk);
+ is_first_blk = IS_FIRST_BLK(blk);
+ SET_BLK_SZ(blk, blk_sz);
+ }
+ else {
+ SET_BLK_FREE(blk);
+ }
+
+ if (is_last_blk)
+ SET_LAST_BLK(blk);
+ else {
+ nxt_blk = NXT_BLK(blk);
+ if (IS_FREE_BLK(nxt_blk)) {
+ /* Coalesce with next block... */
+ (*allctr->unlink_free_block)(allctr, nxt_blk);
+ blk_sz += BLK_SZ(nxt_blk);
+ SET_BLK_SZ(blk, blk_sz);
+
+ is_last_blk = IS_LAST_BLK(nxt_blk);
+ if (is_last_blk)
+ SET_LAST_BLK(blk);
+ else {
+ SET_NOT_LAST_BLK(blk);
+ SET_BLK_SZ_FTR(blk, blk_sz);
+ }
+ }
+ else {
+ SET_PREV_BLK_FREE(nxt_blk);
+ SET_NOT_LAST_BLK(blk);
+ SET_BLK_SZ_FTR(blk, blk_sz);
+ }
+
+ }
+
+ ASSERT(is_last_blk ? IS_LAST_BLK(blk) : IS_NOT_LAST_BLK(blk));
+ ASSERT(is_first_blk ? IS_FIRST_BLK(blk) : IS_NOT_FIRST_BLK(blk));
+ ASSERT(IS_FREE_BLK(blk));
+ ASSERT(is_first_blk || IS_PREV_BLK_ALLOCED(blk));
+ ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(blk)));
+ ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(is_last_blk || blk == PREV_BLK(NXT_BLK(blk)));
+ ASSERT(blk_sz % sizeof(Unit_t) == 0);
+ ASSERT(IS_MBC_BLK(blk));
+
+ if (is_first_blk
+ && is_last_blk
+ && allctr->main_carrier != FBLK2MBC(allctr, blk))
+ destroy_carrier(allctr, blk);
+ else {
+ (*allctr->link_free_block)(allctr, blk);
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+ }
+}
+
+static void *
+mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint flgs)
+{
+ void *new_p;
+ Uint old_blk_sz;
+ Block_t *blk;
+#ifndef MBC_REALLOC_ALWAYS_MOVES
+ Block_t *new_blk, *cand_blk;
+ Uint cand_blk_sz;
+ Uint blk_sz;
+ Block_t *nxt_blk;
+ Uint nxt_blk_sz;
+ Uint is_last_blk;
+#endif /* #ifndef MBC_REALLOC_ALWAYS_MOVES */
+
+ ASSERT(p);
+ ASSERT(size);
+ ASSERT(size < allctr->sbc_threshold);
+
+ blk = (Block_t *) UMEM2BLK(p);
+ old_blk_sz = BLK_SZ(blk);
+
+ ASSERT(old_blk_sz >= allctr->min_block_size);
+
+#ifdef MBC_REALLOC_ALWAYS_MOVES
+ if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ return NULL;
+#else /* !MBC_REALLOC_ALWAYS_MOVES */
+ blk_sz = UMEMSZ2BLKSZ(allctr, size);
+
+ ASSERT(IS_ALLOCED_BLK(blk));
+ ASSERT(IS_MBC_BLK(blk));
+
+ is_last_blk = IS_LAST_BLK(blk);
+
+ if (old_blk_sz == blk_sz)
+ return p;
+ else if (blk_sz < old_blk_sz) {
+ /* Shrink block... */
+ Block_t *nxt_nxt_blk;
+ Uint diff_sz_val = old_blk_sz - blk_sz;
+ Uint old_blk_sz_val = old_blk_sz;
+
+ if (diff_sz_val >= (~((Uint) 0) / 100)) {
+ /* div both by 128 */
+ old_blk_sz_val >>= 7;
+ diff_sz_val >>= 7;
+ }
+
+ /* Avoid fragmentation by moving the block if it is shrunk much */
+ if (100*diff_sz_val > allctr->mbc_move_threshold*old_blk_sz_val) {
+ if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ return NULL;
+
+ cand_blk_sz = old_blk_sz;
+ if (!IS_PREV_BLK_FREE(blk) || IS_FIRST_BLK(blk))
+ cand_blk = blk;
+ else {
+ cand_blk = PREV_BLK(blk);
+ cand_blk_sz += PREV_BLK_SZ(blk);
+ }
+ if (!is_last_blk) {
+ nxt_blk = NXT_BLK(blk);
+ if (IS_FREE_BLK(nxt_blk))
+ cand_blk_sz += BLK_SZ(nxt_blk);
+ }
+
+ new_blk = (*allctr->get_free_block)(allctr,
+ blk_sz,
+ cand_blk,
+ cand_blk_sz);
+
+ if (new_blk || cand_blk != blk)
+ goto move_into_new_blk;
+ }
+
+ /* Shrink at current location */
+
+ nxt_blk_sz = old_blk_sz - blk_sz;
+
+ if ((is_last_blk || IS_ALLOCED_BLK(NXT_BLK(blk)))
+ && (nxt_blk_sz < allctr->min_block_size))
+ return p;
+
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+
+ SET_BLK_SZ(blk, blk_sz);
+ SET_NOT_LAST_BLK(blk);
+
+ nxt_blk = NXT_BLK(blk);
+ SET_BLK_HDR(nxt_blk,
+ nxt_blk_sz,
+ SBH_THIS_FREE|SBH_PREV_ALLOCED|SBH_NOT_LAST_BLK);
+
+ STAT_MBC_BLK_FREE(allctr, old_blk_sz);
+ STAT_MBC_BLK_ALLOC(allctr, blk_sz);
+
+ ASSERT(BLK_SZ(blk) >= allctr->min_block_size);
+
+ if (is_last_blk)
+ SET_LAST_BLK(nxt_blk);
+ else {
+ nxt_nxt_blk = NXT_BLK(nxt_blk);
+ if (IS_FREE_BLK(nxt_nxt_blk)) {
+ /* Coalesce with next free block... */
+ nxt_blk_sz += BLK_SZ(nxt_nxt_blk);
+ (*allctr->unlink_free_block)(allctr, nxt_nxt_blk);
+ SET_BLK_SZ(nxt_blk, nxt_blk_sz);
+
+ is_last_blk = IS_LAST_BLK(nxt_nxt_blk);
+ if (is_last_blk)
+ SET_LAST_BLK(nxt_blk);
+ else
+ SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
+ }
+ else {
+ SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
+ SET_PREV_BLK_FREE(nxt_nxt_blk);
+ }
+ }
+
+ (*allctr->link_free_block)(allctr, nxt_blk);
+
+
+ ASSERT(IS_ALLOCED_BLK(blk));
+ ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz % sizeof(Unit_t) == 0);
+ ASSERT(blk_sz >= allctr->min_block_size);
+ ASSERT(blk_sz >= size + ABLK_HDR_SZ);
+ ASSERT(IS_MBC_BLK(blk));
+
+ ASSERT(IS_FREE_BLK(nxt_blk));
+ ASSERT(IS_PREV_BLK_ALLOCED(nxt_blk));
+ ASSERT(nxt_blk_sz == BLK_SZ(nxt_blk));
+ ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
+ ASSERT(nxt_blk_sz >= allctr->min_block_size);
+ ASSERT(IS_MBC_BLK(nxt_blk));
+ ASSERT(is_last_blk ? IS_LAST_BLK(nxt_blk) : IS_NOT_LAST_BLK(nxt_blk));
+ ASSERT(is_last_blk || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
+ ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
+
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+
+ return p;
+ }
+
+ /* Need larger block... */
+
+ if (!is_last_blk) {
+ nxt_blk = NXT_BLK(blk);
+ nxt_blk_sz = BLK_SZ(nxt_blk);
+ if (IS_FREE_BLK(nxt_blk) && blk_sz <= old_blk_sz + nxt_blk_sz) {
+ /* Grow into next block... */
+
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+
+ (*allctr->unlink_free_block)(allctr, nxt_blk);
+ nxt_blk_sz -= blk_sz - old_blk_sz;
+
+ is_last_blk = IS_LAST_BLK(nxt_blk);
+ if (nxt_blk_sz < allctr->min_block_size) {
+ blk_sz += nxt_blk_sz;
+
+ SET_BLK_SZ(blk, blk_sz);
+
+ if (is_last_blk) {
+ SET_LAST_BLK(blk);
+#ifdef DEBUG
+ nxt_blk = NULL;
+#endif
+ }
+ else {
+ nxt_blk = NXT_BLK(blk);
+ SET_PREV_BLK_ALLOCED(nxt_blk);
+#ifdef DEBUG
+ is_last_blk = IS_LAST_BLK(nxt_blk);
+ nxt_blk_sz = BLK_SZ(nxt_blk);
+#endif
+ }
+ }
+ else {
+ SET_BLK_SZ(blk, blk_sz);
+
+ nxt_blk = NXT_BLK(blk);
+ SET_BLK_HDR(nxt_blk,
+ nxt_blk_sz,
+ SBH_THIS_FREE|SBH_PREV_ALLOCED|SBH_NOT_LAST_BLK);
+
+ if (is_last_blk)
+ SET_LAST_BLK(nxt_blk);
+ else
+ SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
+
+ (*allctr->link_free_block)(allctr, nxt_blk);
+
+ ASSERT(IS_FREE_BLK(nxt_blk));
+ }
+
+ STAT_MBC_BLK_FREE(allctr, old_blk_sz);
+ STAT_MBC_BLK_ALLOC(allctr, blk_sz);
+
+
+ ASSERT(IS_ALLOCED_BLK(blk));
+ ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz % sizeof(Unit_t) == 0);
+ ASSERT(blk_sz >= allctr->min_block_size);
+ ASSERT(blk_sz >= size + ABLK_HDR_SZ);
+ ASSERT(IS_MBC_BLK(blk));
+
+ ASSERT(!nxt_blk || IS_PREV_BLK_ALLOCED(nxt_blk));
+ ASSERT(!nxt_blk || nxt_blk_sz == BLK_SZ(nxt_blk));
+ ASSERT(!nxt_blk || nxt_blk_sz % sizeof(Unit_t) == 0);
+ ASSERT(!nxt_blk || nxt_blk_sz >= allctr->min_block_size);
+ ASSERT(!nxt_blk || IS_MBC_BLK(nxt_blk));
+ ASSERT(!nxt_blk || (is_last_blk
+ ? IS_LAST_BLK(nxt_blk)
+ : IS_NOT_LAST_BLK(nxt_blk)));
+ ASSERT(!nxt_blk || is_last_blk
+ || IS_ALLOCED_BLK(nxt_blk)
+ || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
+ ASSERT(!nxt_blk || is_last_blk
+ || IS_ALLOCED_BLK(nxt_blk)
+ || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
+
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+
+ return p;
+ }
+ }
+
+ if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ return NULL;
+
+ /* Need to grow in another block */
+
+ if (!IS_PREV_BLK_FREE(blk) || IS_FIRST_BLK(blk)) {
+ cand_blk = NULL;
+ cand_blk_sz = 0;
+ }
+ else {
+ cand_blk = PREV_BLK(blk);
+ cand_blk_sz = old_blk_sz + PREV_BLK_SZ(blk);
+
+ if (!is_last_blk) {
+ nxt_blk = NXT_BLK(blk);
+ if (IS_FREE_BLK(nxt_blk))
+ cand_blk_sz += BLK_SZ(nxt_blk);
+ }
+ }
+
+ if (cand_blk_sz < blk_sz) {
+ /* We wont fit in cand_blk get a new one */
+#endif /* !MBC_REALLOC_ALWAYS_MOVES */
+
+ new_p = mbc_alloc(allctr, size);
+ if (!new_p)
+ return NULL;
+ sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
+ mbc_free(allctr, p);
+
+ return new_p;
+
+#ifndef MBC_REALLOC_ALWAYS_MOVES
+
+ }
+ else {
+ /* We will at least fit in cand_blk */
+
+ new_blk = (*allctr->get_free_block)(allctr,
+ blk_sz,
+ cand_blk,
+ cand_blk_sz);
+ move_into_new_blk:
+ /*
+ * new_blk, and cand_blk have to be correctly set
+ * when jumping to this label.
+ */
+
+ if (new_blk) {
+ mbc_alloc_finalize(allctr,
+ new_blk,
+ BLK_SZ(new_blk),
+ GET_BLK_HDR_FLGS(new_blk),
+ blk_sz,
+ 1);
+ new_p = BLK2UMEM(new_blk);
+ sys_memcpy(new_p, p, MIN(size, old_blk_sz - ABLK_HDR_SZ));
+ mbc_free(allctr, p);
+ return new_p;
+ }
+ else {
+ Uint new_blk_sz;
+ Uint new_blk_flgs;
+ Uint prev_blk_sz;
+ Uint blk_cpy_sz;
+
+ ASSERT(IS_PREV_BLK_FREE(blk));
+ ASSERT(cand_blk == PREV_BLK(blk));
+
+ prev_blk_sz = PREV_BLK_SZ(blk);
+ new_blk = cand_blk;
+ new_blk_sz = prev_blk_sz + old_blk_sz;
+ new_blk_flgs = GET_BLK_HDR_FLGS(new_blk);
+
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+
+ (*allctr->unlink_free_block)(allctr, new_blk); /* prev */
+
+ if (is_last_blk)
+ new_blk_flgs |= LAST_BLK_HDR_FLG;
+ else {
+ nxt_blk = NXT_BLK(blk);
+ if (IS_FREE_BLK(nxt_blk)) {
+ new_blk_flgs |= GET_LAST_BLK_HDR_FLG(nxt_blk);
+ new_blk_sz += BLK_SZ(nxt_blk);
+ (*allctr->unlink_free_block)(allctr, nxt_blk);
+ }
+ }
+
+ /*
+ * Copy user-data then update new blocks in mbc_alloc_finalize().
+ * mbc_alloc_finalize() may write headers at old location of
+ * user data; therfore, order is important.
+ */
+
+ new_p = BLK2UMEM(new_blk);
+ blk_cpy_sz = MIN(blk_sz, old_blk_sz);
+
+ if (prev_blk_sz >= blk_cpy_sz)
+ sys_memcpy(new_p, p, blk_cpy_sz - ABLK_HDR_SZ);
+ else
+ sys_memmove(new_p, p, blk_cpy_sz - ABLK_HDR_SZ);
+
+ mbc_alloc_finalize(allctr,
+ new_blk,
+ new_blk_sz,
+ new_blk_flgs,
+ blk_sz,
+ 0);
+
+ STAT_MBC_BLK_FREE(allctr, old_blk_sz);
+
+ return new_p;
+ }
+ }
+#endif /* !MBC_REALLOC_ALWAYS_MOVES */
+}
+
+#ifdef DEBUG
+
+#if HAVE_ERTS_MSEG
+#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % mseg_unit_size == 0)
+#else
+#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ)
+#endif
+
+#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ) \
+do { \
+ ASSERT(IS_FIRST_BLK((B))); \
+ ASSERT(IS_LAST_BLK((B))); \
+ ASSERT((CSZ) == CARRIER_SZ((C))); \
+ ASSERT((BSZ) == BLK_SZ((B))); \
+ ASSERT((BSZ) % sizeof(Unit_t) == 0); \
+ if ((SBC)) { \
+ ASSERT(IS_SBC_BLK((B))); \
+ ASSERT(IS_SB_CARRIER((C))); \
+ } \
+ else { \
+ ASSERT(IS_MBC_BLK((B))); \
+ ASSERT(IS_MB_CARRIER((C))); \
+ } \
+ if ((MSEGED)) { \
+ ASSERT(IS_MSEG_CARRIER((C))); \
+ ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ)); \
+ } \
+ else { \
+ ASSERT(IS_SYS_ALLOC_CARRIER((C))); \
+ ASSERT((CSZ) % sizeof(Unit_t) == 0); \
+ } \
+} while (0)
+
+#else
+#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ)
+#endif
+
+
+static Block_t *
+create_carrier(Allctr_t *allctr, Uint umem_sz, Uint flags)
+{
+ Block_t *blk;
+ Carrier_t *crr;
+ Uint blk_sz, bcrr_sz, crr_sz;
+#if HAVE_ERTS_MSEG
+ int have_tried_sys_alloc = 0, have_tried_mseg = 0;
+#endif
+#ifdef DEBUG
+ int is_mseg = 0;
+#endif
+
+ ASSERT((flags & CFLG_SBC && !(flags & CFLG_MBC))
+ || (flags & CFLG_MBC && !(flags & CFLG_SBC)));
+
+ blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
+
+#if HAVE_ERTS_MSEG
+
+ if (flags & CFLG_FORCE_SYS_ALLOC)
+ goto try_sys_alloc;
+ if (flags & CFLG_FORCE_MSEG)
+ goto try_mseg;
+ if (erts_mseg_no() >= max_mseg_carriers)
+ goto try_sys_alloc;
+ if (flags & CFLG_SBC) {
+ if (allctr->sbcs.curr_mseg.no >= allctr->max_mseg_sbcs)
+ goto try_sys_alloc;
+ }
+ else {
+ if (allctr->mbcs.curr_mseg.no >= allctr->max_mseg_mbcs)
+ goto try_sys_alloc;
+ }
+
+ try_mseg:
+
+ if (flags & CFLG_SBC) {
+ crr_sz = blk_sz + allctr->sbc_header_size;
+ }
+ else {
+ crr_sz = (*allctr->get_next_mbc_size)(allctr);
+ if (crr_sz < allctr->mbc_header_size + blk_sz)
+ crr_sz = allctr->mbc_header_size + blk_sz;
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ if (sizeof(Unit_t) == sizeof(Uint))
+ crr_sz += sizeof(Uint);
+#endif
+ }
+ crr_sz = MSEG_UNIT_CEILING(crr_sz);
+ ASSERT(crr_sz % mseg_unit_size == 0);
+
+ crr = (Carrier_t *) alcu_mseg_alloc(allctr, &crr_sz);
+ if (!crr) {
+ have_tried_mseg = 1;
+ if (!(have_tried_sys_alloc || flags & CFLG_FORCE_MSEG))
+ goto try_sys_alloc;
+ return NULL;
+ }
+
+#ifdef DEBUG
+ is_mseg = 1;
+#endif
+ if (flags & CFLG_SBC) {
+ SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_SBC);
+ STAT_MSEG_SBC_ALLOC(allctr, crr_sz, blk_sz);
+ goto sbc_final_touch;
+ }
+ else {
+ SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_MBC);
+ STAT_MSEG_MBC_ALLOC(allctr, crr_sz);
+ goto mbc_final_touch;
+ }
+
+ try_sys_alloc:
+#endif /* #if HAVE_ERTS_MSEG */
+
+ if (flags & CFLG_SBC) {
+ bcrr_sz = blk_sz + allctr->sbc_header_size;
+ }
+ else {
+ bcrr_sz = allctr->mbc_header_size + blk_sz;
+ if (!(flags & CFLG_MAIN_CARRIER)
+ && bcrr_sz < allctr->smallest_mbc_size)
+ bcrr_sz = allctr->smallest_mbc_size;
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ if (sizeof(Unit_t) == sizeof(Uint))
+ bcrr_sz += sizeof(Uint);
+#endif
+
+ }
+
+ crr_sz = (flags & CFLG_FORCE_SIZE
+ ? UNIT_CEILING(bcrr_sz)
+ : SYS_ALLOC_CARRIER_CEILING(bcrr_sz));
+
+ crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz);
+
+ if (!crr) {
+ if (crr_sz > UNIT_CEILING(bcrr_sz)) {
+ crr_sz = UNIT_CEILING(bcrr_sz);
+ crr = (Carrier_t *) alcu_sys_alloc(allctr, crr_sz);
+ }
+ if (!crr) {
+#if HAVE_ERTS_MSEG
+ have_tried_sys_alloc = 1;
+ if (!(have_tried_mseg || flags & CFLG_FORCE_SYS_ALLOC))
+ goto try_mseg;
+#endif
+ return NULL;
+ }
+ }
+ if (flags & CFLG_SBC) {
+ SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_SBC);
+ STAT_SYS_ALLOC_SBC_ALLOC(allctr, crr_sz, blk_sz);
+
+#if HAVE_ERTS_MSEG
+ sbc_final_touch:
+#endif
+
+ blk = SBC2BLK(allctr, crr);
+
+ SET_SBC_BLK_FTR(((Uint *) blk)[-1]);
+ SET_BLK_HDR(blk, blk_sz, SBH_THIS_ALLOCED|SBH_PREV_FREE|SBH_LAST_BLK);
+
+ link_carrier(&allctr->sbc_list, crr);
+
+ CHECK_1BLK_CARRIER(allctr, 1, is_mseg, crr, crr_sz, blk, blk_sz);
+
+ }
+ else {
+ SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC);
+ STAT_SYS_ALLOC_MBC_ALLOC(allctr, crr_sz);
+
+#if HAVE_ERTS_MSEG
+ mbc_final_touch:
+#endif
+
+ blk = MBC2FBLK(allctr, crr);
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ if (sizeof(Unit_t) == sizeof(Uint))
+ crr_sz -= sizeof(Uint);
+#endif
+
+ blk_sz = UNIT_FLOOR(crr_sz - allctr->mbc_header_size);
+
+ SET_MBC_BLK_FTR(((Uint *) blk)[-1]);
+ SET_BLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_PREV_FREE|SBH_LAST_BLK);
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ *((Carrier_t **) NXT_BLK(blk)) = crr;
+#endif
+
+ if (flags & CFLG_MAIN_CARRIER) {
+ ASSERT(!allctr->main_carrier);
+ allctr->main_carrier = crr;
+ }
+
+ link_carrier(&allctr->mbc_list, crr);
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ if (sizeof(Unit_t) == sizeof(Uint))
+ crr_sz += sizeof(Uint);
+#endif
+ CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz);
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ if (sizeof(Unit_t) == sizeof(Uint))
+ crr_sz -= sizeof(Uint);
+#endif
+ if (allctr->creating_mbc)
+ (*allctr->creating_mbc)(allctr, crr);
+
+ }
+
+ DEBUG_SAVE_ALIGNMENT(crr);
+ return blk;
+}
+
+static Block_t *
+resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, Uint flags)
+{
+ Block_t *new_blk;
+ Carrier_t *new_crr, *old_crr;
+ Uint create_flags, old_crr_sz, old_blk_sz, new_blk_sz, new_crr_sz;
+ Uint new_bcrr_sz;
+
+ if (flags & CFLG_MBC) {
+ ASSERT(0);
+ return NULL;
+ }
+
+ ASSERT(flags & CFLG_SBC);
+ create_flags = flags|CFLG_SBC;
+
+ HARD_CHECK_BLK_CARRIER(allctr, old_blk);
+
+ old_blk_sz = BLK_SZ(old_blk);
+ old_crr = BLK2SBC(allctr, old_blk);
+ old_crr_sz = CARRIER_SZ(old_crr);
+ ASSERT(IS_SB_CARRIER(old_crr));
+ ASSERT(IS_SBC_BLK(old_blk));
+
+ new_blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
+
+#if HAVE_ERTS_MSEG
+
+ if (IS_MSEG_CARRIER(old_crr)) {
+ STAT_MSEG_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
+
+ if (!(flags & CFLG_FORCE_SYS_ALLOC)) {
+
+ new_crr_sz = new_blk_sz + allctr->sbc_header_size;
+ new_crr_sz = MSEG_UNIT_CEILING(new_crr_sz);
+ new_crr = (Carrier_t *) alcu_mseg_realloc(allctr,
+ old_crr,
+ old_crr_sz,
+ &new_crr_sz);
+ if (new_crr) {
+ SET_CARRIER_SZ(new_crr, new_crr_sz);
+ new_blk = SBC2BLK(allctr, new_crr);
+ SET_BLK_SZ(new_blk, new_blk_sz);
+ STAT_MSEG_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
+ relink_carrier(&allctr->sbc_list, new_crr);
+ CHECK_1BLK_CARRIER(allctr, 1, 1, new_crr, new_crr_sz,
+ new_blk, new_blk_sz);
+ DEBUG_SAVE_ALIGNMENT(new_crr);
+ return new_blk;
+ }
+ create_flags |= CFLG_FORCE_SYS_ALLOC; /* since mseg_realloc()
+ failed */
+ }
+
+ new_blk = create_carrier(allctr, umem_sz, create_flags);
+ if (new_blk) {
+ sys_memcpy((void *) BLK2UMEM(new_blk),
+ (void *) BLK2UMEM(old_blk),
+ MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
+ unlink_carrier(&allctr->sbc_list, old_crr);
+ alcu_mseg_dealloc(allctr, old_crr, old_crr_sz);
+ }
+ else {
+ /* Old carrier unchanged; restore stat */
+ STAT_MSEG_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
+ }
+
+ return new_blk;
+ }
+ else {
+ if (!(flags & CFLG_FORCE_MSEG)) {
+#endif /* #if HAVE_ERTS_MSEG */
+ new_bcrr_sz = new_blk_sz + allctr->sbc_header_size;
+ new_crr_sz = (flags & CFLG_FORCE_SIZE
+ ? UNIT_CEILING(new_bcrr_sz)
+ : SYS_ALLOC_CARRIER_CEILING(new_bcrr_sz));
+
+ new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
+ (void *) old_crr,
+ new_crr_sz);
+ if (new_crr) {
+ sys_realloc_success:
+ SET_CARRIER_SZ(new_crr, new_crr_sz);
+ new_blk = SBC2BLK(allctr, new_crr);
+ SET_BLK_SZ(new_blk, new_blk_sz);
+ STAT_SYS_ALLOC_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
+ STAT_SYS_ALLOC_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
+ relink_carrier(&allctr->sbc_list, new_crr);
+ CHECK_1BLK_CARRIER(allctr, 1, 0, new_crr, new_crr_sz,
+ new_blk, new_blk_sz);
+ DEBUG_SAVE_ALIGNMENT(new_crr);
+ return new_blk;
+ }
+ else if (new_crr_sz > UNIT_CEILING(new_bcrr_sz)) {
+ new_crr_sz = new_blk_sz + allctr->sbc_header_size;
+ new_crr_sz = UNIT_CEILING(new_crr_sz);
+ new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
+ (void *) old_crr,
+ new_crr_sz);
+ if (new_crr)
+ goto sys_realloc_success;
+ }
+
+#if !HAVE_ERTS_MSEG
+ return NULL;
+#else
+ create_flags |= CFLG_FORCE_MSEG; /* Since sys_realloc() failed */
+ }
+
+ STAT_SYS_ALLOC_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
+
+ new_blk = create_carrier(allctr, umem_sz, create_flags);
+ if (new_blk) {
+ sys_memcpy((void *) BLK2UMEM(new_blk),
+ (void *) BLK2UMEM(old_blk),
+ MIN(new_blk_sz, old_blk_sz) - ABLK_HDR_SZ);
+ unlink_carrier(&allctr->sbc_list, old_crr);
+ alcu_sys_free(allctr, old_crr);
+ }
+ else {
+ /* Old carrier unchanged; restore... */
+ STAT_SYS_ALLOC_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
+ }
+ DEBUG_SAVE_ALIGNMENT(new_crr);
+ return new_blk;
+ }
+#endif
+}
+
+static void
+destroy_carrier(Allctr_t *allctr, Block_t *blk)
+{
+ Uint crr_sz;
+ Carrier_t *crr;
+#if HAVE_ERTS_MSEG
+ Uint is_mseg = 0;
+#endif
+
+ ASSERT(IS_FIRST_BLK(blk));
+
+ if (IS_SBC_BLK(blk)) {
+ Uint blk_sz = BLK_SZ(blk);
+ crr = BLK2SBC(allctr, blk);
+ crr_sz = CARRIER_SZ(crr);
+
+ ASSERT(IS_LAST_BLK(blk));
+
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+
+#if HAVE_ERTS_MSEG
+ if (IS_MSEG_CARRIER(crr)) {
+ is_mseg++;
+ ASSERT(crr_sz % mseg_unit_size == 0);
+ STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz);
+ }
+ else
+#endif
+ STAT_SYS_ALLOC_SBC_FREE(allctr, crr_sz, blk_sz);
+
+ unlink_carrier(&allctr->sbc_list, crr);
+
+ }
+ else {
+ crr = FBLK2MBC(allctr, blk);
+ crr_sz = CARRIER_SZ(crr);
+
+#ifdef DEBUG
+ if (!allctr->stopped) {
+ ASSERT(IS_LAST_BLK(blk));
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ (*allctr->link_free_block)(allctr, blk);
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+ (*allctr->unlink_free_block)(allctr, blk);
+#endif
+ }
+#endif
+
+#if HAVE_ERTS_MSEG
+ if (IS_MSEG_CARRIER(crr)) {
+ is_mseg++;
+ ASSERT(crr_sz % mseg_unit_size == 0);
+ STAT_MSEG_MBC_FREE(allctr, crr_sz);
+ }
+ else
+#endif
+ STAT_SYS_ALLOC_MBC_FREE(allctr, crr_sz);
+
+ unlink_carrier(&allctr->mbc_list, crr);
+ if (allctr->destroying_mbc)
+ (*allctr->destroying_mbc)(allctr, crr);
+ }
+
+
+#if HAVE_ERTS_MSEG
+ if (is_mseg) {
+ alcu_mseg_dealloc(allctr, crr, crr_sz);
+ }
+ else
+#endif
+ alcu_sys_free(allctr, crr);
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Info stuff *
+\* */
+
+static struct {
+ Eterm versions;
+
+ Eterm options;
+ Eterm e;
+ Eterm t;
+ Eterm ramv;
+ Eterm sbct;
+#if HAVE_ERTS_MSEG
+ Eterm asbcst;
+ Eterm rsbcst;
+#endif
+ Eterm rsbcmt;
+ Eterm rmbcmt;
+ Eterm mmbcs;
+ Eterm msbclt;
+#if HAVE_ERTS_MSEG
+ Eterm mmsbc;
+ Eterm mmmbc;
+#endif
+ Eterm lmbcs;
+ Eterm smbcs;
+ Eterm mbcgs;
+
+#if HAVE_ERTS_MSEG
+ Eterm mmc;
+#endif
+ Eterm ycs;
+
+ Eterm mbcs;
+ Eterm sbcs;
+ Eterm sys_alloc_carriers_size;
+#if HAVE_ERTS_MSEG
+ Eterm mseg_alloc_carriers_size;
+#endif
+ Eterm carriers_size;
+ Eterm sys_alloc_carriers;
+#if HAVE_ERTS_MSEG
+ Eterm mseg_alloc_carriers;
+#endif
+ Eterm carriers;
+ Eterm blocks_size;
+ Eterm blocks;
+
+ Eterm calls;
+ Eterm sys_alloc;
+ Eterm sys_free;
+ Eterm sys_realloc;
+#if HAVE_ERTS_MSEG
+ Eterm mseg_alloc;
+ Eterm mseg_dealloc;
+ Eterm mseg_realloc;
+#endif
+#ifdef DEBUG
+ Eterm end_of_atoms;
+#endif
+} am;
+
+static ERTS_INLINE void atom_init(Eterm *atom, char *name)
+{
+ *atom = am_atom_put(name, strlen(name));
+}
+#define AM_INIT(AM) atom_init(&am.AM, #AM)
+
+static erts_mtx_t init_atoms_mtx;
+
+static void
+init_atoms(Allctr_t *allctr)
+{
+
+#ifdef USE_THREADS
+ if (allctr && allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+#endif
+
+ erts_mtx_lock(&init_atoms_mtx);
+
+ if (!atoms_initialized) {
+#ifdef DEBUG
+ Eterm *atom;
+
+ for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
+ *atom = THE_NON_VALUE;
+ }
+#endif
+
+ AM_INIT(versions);
+
+ AM_INIT(options);
+ AM_INIT(e);
+ AM_INIT(t);
+ AM_INIT(ramv);
+ AM_INIT(sbct);
+#if HAVE_ERTS_MSEG
+ AM_INIT(asbcst);
+ AM_INIT(rsbcst);
+#endif
+ AM_INIT(rsbcmt);
+ AM_INIT(rmbcmt);
+ AM_INIT(mmbcs);
+ AM_INIT(msbclt);
+#if HAVE_ERTS_MSEG
+ AM_INIT(mmsbc);
+ AM_INIT(mmmbc);
+#endif
+ AM_INIT(lmbcs);
+ AM_INIT(smbcs);
+ AM_INIT(mbcgs);
+
+#if HAVE_ERTS_MSEG
+ AM_INIT(mmc);
+#endif
+ AM_INIT(ycs);
+
+ AM_INIT(mbcs);
+ AM_INIT(sbcs);
+ AM_INIT(sys_alloc_carriers_size);
+#if HAVE_ERTS_MSEG
+ AM_INIT(mseg_alloc_carriers_size);
+#endif
+ AM_INIT(carriers_size);
+ AM_INIT(sys_alloc_carriers);
+#if HAVE_ERTS_MSEG
+ AM_INIT(mseg_alloc_carriers);
+#endif
+ AM_INIT(carriers);
+ AM_INIT(blocks_size);
+ AM_INIT(blocks);
+
+ AM_INIT(calls);
+ AM_INIT(sys_alloc);
+ AM_INIT(sys_free);
+ AM_INIT(sys_realloc);
+#if HAVE_ERTS_MSEG
+ AM_INIT(mseg_alloc);
+ AM_INIT(mseg_dealloc);
+ AM_INIT(mseg_realloc);
+#endif
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
+ ASSERT(*atom != THE_NON_VALUE);
+ }
+#endif
+ }
+
+
+ if (allctr) {
+
+ make_name_atoms(allctr);
+
+ (*allctr->init_atoms)();
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
+#endif
+ allctr->atoms_initialized = 1;
+ }
+
+ atoms_initialized = 1;
+ erts_mtx_unlock(&init_atoms_mtx);
+
+}
+
+static ERTS_INLINE void
+ensure_atoms_initialized(Allctr_t *allctr)
+{
+ if (!allctr || !allctr->atoms_initialized)
+ init_atoms(allctr);
+}
+
+#define bld_uint erts_bld_uint
+#define bld_cons erts_bld_cons
+#define bld_tuple erts_bld_tuple
+#define bld_string erts_bld_string
+
+/*
+ * bld_unstable_uint() (instead bld_uint()) is used when values may
+ * change between size check and actual build. This because a value
+ * that would fit a small when size check is done may need to be built
+ * as a big when the actual build is performed. Caller is required to
+ * HRelease after build.
+ */
+static ERTS_INLINE Eterm
+bld_unstable_uint(Uint **hpp, Uint *szp, Uint ui)
+{
+ Eterm res = THE_NON_VALUE;
+ if (szp)
+ *szp += BIG_UINT_HEAP_SIZE;
+ if (hpp) {
+ if (IS_USMALL(0, ui))
+ res = make_small(ui);
+ else {
+ res = uint_to_big(ui, *hpp);
+ *hpp += BIG_UINT_HEAP_SIZE;
+ }
+ }
+ return res;
+}
+
+static ERTS_INLINE void
+add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
+{
+ *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 2, el1, el2), *lp);
+}
+
+static ERTS_INLINE void
+add_3tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2, Eterm el3)
+{
+ *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 3, el1, el2, el3), *lp);
+}
+
+static ERTS_INLINE void
+add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
+ Eterm el1, Eterm el2, Eterm el3, Eterm el4)
+{
+ *lp =
+ bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp);
+}
+
+static Eterm
+sz_info_carriers(Allctr_t *allctr,
+ CarriersStats_t *cs,
+ char *prefix,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+ Uint curr_size = cs->curr_mseg.size + cs->curr_sys_alloc.size;
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ erts_print(to,
+ arg,
+ "%sblocks size: %bpu %bpu %bpu\n",
+ prefix,
+ cs->blocks.curr.size,
+ cs->blocks.max.size,
+ cs->blocks.max_ever.size);
+ erts_print(to,
+ arg,
+ "%scarriers size: %bpu %bpu %bpu\n",
+ prefix,
+ curr_size,
+ cs->max.size,
+ cs->max_ever.size);
+ }
+
+ if (hpp || szp) {
+ res = NIL;
+ add_4tup(hpp, szp, &res,
+ am.carriers_size,
+ bld_unstable_uint(hpp, szp, curr_size),
+ bld_unstable_uint(hpp, szp, cs->max.size),
+ bld_unstable_uint(hpp, szp, cs->max_ever.size));
+ add_4tup(hpp, szp, &res,
+ am.blocks_size,
+ bld_unstable_uint(hpp, szp, cs->blocks.curr.size),
+ bld_unstable_uint(hpp, szp, cs->blocks.max.size),
+ bld_unstable_uint(hpp, szp, cs->blocks.max_ever.size));
+ }
+
+ return res;
+}
+
+static Eterm
+info_carriers(Allctr_t *allctr,
+ CarriersStats_t *cs,
+ char *prefix,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+ Uint curr_no = cs->curr_mseg.no + cs->curr_sys_alloc.no;
+ Uint curr_size = cs->curr_mseg.size + cs->curr_sys_alloc.size;
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ erts_print(to,
+ arg,
+ "%sblocks: %bpu %bpu %bpu\n",
+ prefix,
+ cs->blocks.curr.no,
+ cs->blocks.max.no,
+ cs->blocks.max_ever.no);
+ erts_print(to,
+ arg,
+ "%sblocks size: %bpu %bpu %bpu\n",
+ prefix,
+ cs->blocks.curr.size,
+ cs->blocks.max.size,
+ cs->blocks.max_ever.size);
+ erts_print(to,
+ arg,
+ "%scarriers: %bpu %bpu %bpu\n",
+ prefix,
+ curr_no,
+ cs->max.no,
+ cs->max_ever.no);
+#if HAVE_ERTS_MSEG
+ erts_print(to,
+ arg,
+ "%smseg carriers: %bpu\n",
+ prefix,
+ cs->curr_mseg.no);
+#endif
+ erts_print(to,
+ arg,
+ "%ssys_alloc carriers: %bpu\n",
+ prefix,
+ cs->curr_sys_alloc.no);
+ erts_print(to,
+ arg,
+ "%scarriers size: %bpu %bpu %bpu\n",
+ prefix,
+ curr_size,
+ cs->max.size,
+ cs->max_ever.size);
+#if HAVE_ERTS_MSEG
+ erts_print(to,
+ arg,
+ "%smseg carriers size: %bpu\n",
+ prefix,
+ cs->curr_mseg.size);
+#endif
+ erts_print(to,
+ arg,
+ "%ssys_alloc carriers size: %bpu\n",
+ prefix,
+ cs->curr_sys_alloc.size);
+ }
+
+ if (hpp || szp) {
+ res = NIL;
+ add_2tup(hpp, szp, &res,
+ am.sys_alloc_carriers_size,
+ bld_unstable_uint(hpp, szp, cs->curr_sys_alloc.size));
+#if HAVE_ERTS_MSEG
+ add_2tup(hpp, szp, &res,
+ am.mseg_alloc_carriers_size,
+ bld_unstable_uint(hpp, szp, cs->curr_mseg.size));
+#endif
+ add_4tup(hpp, szp, &res,
+ am.carriers_size,
+ bld_unstable_uint(hpp, szp, curr_size),
+ bld_unstable_uint(hpp, szp, cs->max.size),
+ bld_unstable_uint(hpp, szp, cs->max_ever.size));
+ add_2tup(hpp, szp, &res,
+ am.sys_alloc_carriers,
+ bld_unstable_uint(hpp, szp, cs->curr_sys_alloc.no));
+#if HAVE_ERTS_MSEG
+ add_2tup(hpp, szp, &res,
+ am.mseg_alloc_carriers,
+ bld_unstable_uint(hpp, szp, cs->curr_mseg.no));
+#endif
+ add_4tup(hpp, szp, &res,
+ am.carriers,
+ bld_unstable_uint(hpp, szp, curr_no),
+ bld_unstable_uint(hpp, szp, cs->max.no),
+ bld_unstable_uint(hpp, szp, cs->max_ever.no));
+ add_4tup(hpp, szp, &res,
+ am.blocks_size,
+ bld_unstable_uint(hpp, szp, cs->blocks.curr.size),
+ bld_unstable_uint(hpp, szp, cs->blocks.max.size),
+ bld_unstable_uint(hpp, szp, cs->blocks.max_ever.size));
+ add_4tup(hpp, szp, &res,
+ am.blocks,
+ bld_unstable_uint(hpp, szp, cs->blocks.curr.no),
+ bld_unstable_uint(hpp, szp, cs->blocks.max.no),
+ bld_unstable_uint(hpp, szp, cs->blocks.max_ever.no));
+ }
+
+ return res;
+}
+
+static void
+make_name_atoms(Allctr_t *allctr)
+{
+ char alloc[] = "alloc";
+ char realloc[] = "realloc";
+ char free[] = "free";
+ char buf[MAX_ATOM_LENGTH];
+ size_t prefix_len = strlen(allctr->name_prefix);
+
+ if (prefix_len > MAX_ATOM_LENGTH + sizeof(realloc) - 1)
+ erl_exit(1,"Too long allocator name: %salloc\n",allctr->name_prefix);
+
+ memcpy((void *) buf, (void *) allctr->name_prefix, prefix_len);
+
+ memcpy((void *) &buf[prefix_len], (void *) alloc, sizeof(alloc) - 1);
+ allctr->name.alloc = am_atom_put(buf, prefix_len + sizeof(alloc) - 1);
+
+ memcpy((void *) &buf[prefix_len], (void *) realloc, sizeof(realloc) - 1);
+ allctr->name.realloc = am_atom_put(buf, prefix_len + sizeof(realloc) - 1);
+
+ memcpy((void *) &buf[prefix_len], (void *) free, sizeof(free) - 1);
+ allctr->name.free = am_atom_put(buf, prefix_len + sizeof(free) - 1);
+
+}
+
+static Eterm
+info_calls(Allctr_t *allctr,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+
+
+ if (print_to_p) {
+
+#define PRINT_CC_4(TO, TOA, NAME, CC) \
+ if ((CC).giga_no == 0) \
+ erts_print(TO, TOA, "%s calls: %bpu\n", NAME, CC.no); \
+ else \
+ erts_print(TO, TOA, "%s calls: %bpu%09lu\n", NAME, CC.giga_no, CC.no)
+
+#define PRINT_CC_5(TO, TOA, PRFX, NAME, CC) \
+ if ((CC).giga_no == 0) \
+ erts_print(TO, TOA, "%s%s calls: %bpu\n",PRFX,NAME,CC.no); \
+ else \
+ erts_print(TO, TOA, "%s%s calls: %bpu%09lu\n",PRFX,NAME,CC.giga_no,CC.no)
+
+ char *prefix = allctr->name_prefix;
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+
+ PRINT_CC_5(to, arg, prefix, "alloc", allctr->calls.this_alloc);
+ PRINT_CC_5(to, arg, prefix, "free", allctr->calls.this_free);
+ PRINT_CC_5(to, arg, prefix, "realloc", allctr->calls.this_realloc);
+
+#if HAVE_ERTS_MSEG
+ PRINT_CC_4(to, arg, "mseg_alloc", allctr->calls.mseg_alloc);
+ PRINT_CC_4(to, arg, "mseg_dealloc", allctr->calls.mseg_dealloc);
+ PRINT_CC_4(to, arg, "mseg_realloc", allctr->calls.mseg_realloc);
+#endif
+
+ PRINT_CC_4(to, arg, "sys_alloc", allctr->calls.sys_alloc);
+ PRINT_CC_4(to, arg, "sys_free", allctr->calls.sys_free);
+ PRINT_CC_4(to, arg, "sys_realloc", allctr->calls.sys_realloc);
+
+#undef PRINT_CC_4
+#undef PRINT_CC_5
+
+ }
+
+
+ if (hpp || szp) {
+
+ ASSERT(allctr->name.alloc != THE_NON_VALUE);
+ ASSERT(allctr->name.realloc != THE_NON_VALUE);
+ ASSERT(allctr->name.free != THE_NON_VALUE);
+
+ res = NIL;
+
+ add_3tup(hpp, szp, &res,
+ am.sys_realloc,
+ bld_unstable_uint(hpp, szp, allctr->calls.sys_realloc.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.sys_realloc.no));
+ add_3tup(hpp, szp, &res,
+ am.sys_free,
+ bld_unstable_uint(hpp, szp, allctr->calls.sys_free.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.sys_free.no));
+ add_3tup(hpp, szp, &res,
+ am.sys_alloc,
+ bld_unstable_uint(hpp, szp, allctr->calls.sys_alloc.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.sys_alloc.no));
+#if HAVE_ERTS_MSEG
+ add_3tup(hpp, szp, &res,
+ am.mseg_realloc,
+ bld_unstable_uint(hpp, szp, allctr->calls.mseg_realloc.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.mseg_realloc.no));
+ add_3tup(hpp, szp, &res,
+ am.mseg_dealloc,
+ bld_unstable_uint(hpp, szp, allctr->calls.mseg_dealloc.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.mseg_dealloc.no));
+ add_3tup(hpp, szp, &res,
+ am.mseg_alloc,
+ bld_unstable_uint(hpp, szp, allctr->calls.mseg_alloc.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.mseg_alloc.no));
+#endif
+ add_3tup(hpp, szp, &res,
+ allctr->name.realloc,
+ bld_unstable_uint(hpp, szp, allctr->calls.this_realloc.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.this_realloc.no));
+ add_3tup(hpp, szp, &res,
+ allctr->name.free,
+ bld_unstable_uint(hpp, szp, allctr->calls.this_free.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.this_free.no));
+ add_3tup(hpp, szp, &res,
+ allctr->name.alloc,
+ bld_unstable_uint(hpp, szp, allctr->calls.this_alloc.giga_no),
+ bld_unstable_uint(hpp, szp, allctr->calls.this_alloc.no));
+ }
+
+ return res;
+}
+
+static Eterm
+info_options(Allctr_t *allctr,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+
+ if (!allctr) {
+ if (print_to_p)
+ erts_print(*print_to_p, print_to_arg, "option e: false\n");
+ if (hpp || szp) {
+ res = NIL;
+ add_2tup(hpp, szp, &res, am.e, am_false);
+ }
+ return res;
+ }
+
+ if (print_to_p) {
+ char topt[21]; /* Enough for any 64-bit integer */
+ if (allctr->t)
+ erts_snprintf(&topt[0], sizeof(topt), "%d", allctr->t);
+ else
+ erts_snprintf(&topt[0], sizeof(topt), "false");
+ erts_print(*print_to_p,
+ print_to_arg,
+ "option e: true\n"
+ "option t: %s\n"
+ "option ramv: %s\n"
+ "option sbct: %bpu\n"
+#if HAVE_ERTS_MSEG
+ "option asbcst: %bpu\n"
+ "option rsbcst: %bpu\n"
+#endif
+ "option rsbcmt: %bpu\n"
+ "option rmbcmt: %bpu\n"
+ "option mmbcs: %bpu\n"
+#if HAVE_ERTS_MSEG
+ "option mmsbc: %bpu\n"
+ "option mmmbc: %bpu\n"
+#endif
+ "option lmbcs: %bpu\n"
+ "option smbcs: %bpu\n"
+ "option mbcgs: %bpu\n",
+ topt,
+ allctr->ramv ? "true" : "false",
+ allctr->sbc_threshold,
+#if HAVE_ERTS_MSEG
+ allctr->mseg_opt.abs_shrink_th,
+ allctr->mseg_opt.rel_shrink_th,
+#endif
+ allctr->sbc_move_threshold,
+ allctr->mbc_move_threshold,
+ allctr->main_carrier_size,
+#if HAVE_ERTS_MSEG
+ allctr->max_mseg_sbcs,
+ allctr->max_mseg_mbcs,
+#endif
+ allctr->largest_mbc_size,
+ allctr->smallest_mbc_size,
+ allctr->mbc_growth_stages);
+ }
+
+ res = (*allctr->info_options)(allctr, "option ", print_to_p, print_to_arg,
+ hpp, szp);
+
+ if (hpp || szp) {
+ add_2tup(hpp, szp, &res,
+ am.mbcgs,
+ bld_uint(hpp, szp, allctr->mbc_growth_stages));
+ add_2tup(hpp, szp, &res,
+ am.smbcs,
+ bld_uint(hpp, szp, allctr->smallest_mbc_size));
+ add_2tup(hpp, szp, &res,
+ am.lmbcs,
+ bld_uint(hpp, szp, allctr->largest_mbc_size));
+#if HAVE_ERTS_MSEG
+ add_2tup(hpp, szp, &res,
+ am.mmsbc,
+ bld_uint(hpp, szp, allctr->max_mseg_sbcs));
+ add_2tup(hpp, szp, &res,
+ am.mmmbc,
+ bld_uint(hpp, szp, allctr->max_mseg_mbcs));
+#endif
+ add_2tup(hpp, szp, &res,
+ am.mmbcs,
+ bld_uint(hpp, szp, allctr->main_carrier_size));
+ add_2tup(hpp, szp, &res,
+ am.rmbcmt,
+ bld_uint(hpp, szp, allctr->mbc_move_threshold));
+ add_2tup(hpp, szp, &res,
+ am.rsbcmt,
+ bld_uint(hpp, szp, allctr->sbc_move_threshold));
+#if HAVE_ERTS_MSEG
+ add_2tup(hpp, szp, &res,
+ am.rsbcst,
+ bld_uint(hpp, szp, allctr->mseg_opt.rel_shrink_th));
+ add_2tup(hpp, szp, &res,
+ am.asbcst,
+ bld_uint(hpp, szp, allctr->mseg_opt.abs_shrink_th));
+#endif
+ add_2tup(hpp, szp, &res,
+ am.sbct,
+ bld_uint(hpp, szp, allctr->sbc_threshold));
+ add_2tup(hpp, szp, &res, am.ramv, allctr->ramv ? am_true : am_false);
+ add_2tup(hpp, szp, &res, am.t, (allctr->t
+ ? bld_uint(hpp, szp, (Uint) allctr->t)
+ : am_false));
+ add_2tup(hpp, szp, &res, am.e, am_true);
+ }
+
+ return res;
+}
+
+
+static ERTS_INLINE void
+update_max_ever_values(CarriersStats_t *cs)
+{
+ if (cs->max_ever.no < cs->max.no)
+ cs->max_ever.no = cs->max.no;
+ if (cs->max_ever.size < cs->max.size)
+ cs->max_ever.size = cs->max.size;
+ if (cs->blocks.max_ever.no < cs->blocks.max.no)
+ cs->blocks.max_ever.no = cs->blocks.max.no;
+ if (cs->blocks.max_ever.size < cs->blocks.max.size)
+ cs->blocks.max_ever.size = cs->blocks.max.size;
+}
+
+static ERTS_INLINE void
+reset_max_values(CarriersStats_t *cs)
+{
+ cs->max.no = cs->curr_mseg.no + cs->curr_sys_alloc.no;
+ cs->max.size = cs->curr_mseg.size + cs->curr_sys_alloc.size;
+ cs->blocks.max.no = cs->blocks.curr.no;
+ cs->blocks.max.size = cs->blocks.curr.size;
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Exported functions *
+\* */
+
+Eterm
+erts_alcu_au_info_options(int *print_to_p, void *print_to_arg,
+ Uint **hpp, Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+
+ if (print_to_p) {
+
+ erts_print(*print_to_p,
+ print_to_arg,
+#if HAVE_ERTS_MSEG
+ "option mmc: %bpu\n"
+#endif
+ "option ycs: %bpu\n",
+#if HAVE_ERTS_MSEG
+ max_mseg_carriers,
+#endif
+ sys_alloc_carrier_size);
+ }
+
+ if (hpp || szp) {
+ res = NIL;
+ ensure_atoms_initialized(NULL);
+ add_2tup(hpp, szp, &res,
+ am.ycs,
+ bld_uint(hpp, szp, sys_alloc_carrier_size));
+#if HAVE_ERTS_MSEG
+ add_2tup(hpp, szp, &res,
+ am.mmc,
+ bld_uint(hpp, szp, max_mseg_carriers));
+#endif
+ }
+
+ return res;
+}
+
+
+Eterm
+erts_alcu_info_options(Allctr_t *allctr,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res;
+
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
+#endif
+ if (hpp || szp)
+ ensure_atoms_initialized(allctr);
+ res = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+#endif
+ return res;
+}
+
+/* ----------------------------------------------------------------------- */
+
+Eterm
+erts_alcu_sz_info(Allctr_t *allctr,
+ int begin_max_period,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res, mbcs, sbcs;
+
+ res = THE_NON_VALUE;
+
+ if (!allctr) {
+ if (print_to_p)
+ erts_print(*print_to_p, print_to_arg, "false\n");
+ if (szp)
+ *szp = 0;
+ return am_false;
+ }
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
+#endif
+
+ if (hpp || szp)
+ ensure_atoms_initialized(allctr);
+
+ /* Update sbc values not continously updated */
+ allctr->sbcs.blocks.curr.no
+ = allctr->sbcs.curr_mseg.no + allctr->sbcs.curr_sys_alloc.no;
+ allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
+
+ update_max_ever_values(&allctr->mbcs);
+ update_max_ever_values(&allctr->sbcs);
+
+ mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
+ print_to_arg, hpp, szp);
+ sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
+ print_to_arg, hpp, szp);
+
+ if (hpp || szp) {
+ res = NIL;
+ add_2tup(hpp, szp, &res, am.sbcs, sbcs);
+ add_2tup(hpp, szp, &res, am.mbcs, mbcs);
+ }
+
+ if (begin_max_period) {
+ reset_max_values(&allctr->mbcs);
+ reset_max_values(&allctr->sbcs);
+ }
+
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+#endif
+
+ return res;
+}
+
+Eterm
+erts_alcu_info(Allctr_t *allctr,
+ int begin_max_period,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res, sett, mbcs, sbcs, calls;
+
+ res = THE_NON_VALUE;
+
+ if (!allctr) {
+ if (print_to_p)
+ erts_print(*print_to_p, print_to_arg, "false\n");
+ if (szp)
+ *szp = 0;
+ return am_false;
+ }
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
+#endif
+
+ if (hpp || szp)
+ ensure_atoms_initialized(allctr);
+
+ /* Update sbc values not continously updated */
+ allctr->sbcs.blocks.curr.no
+ = allctr->sbcs.curr_mseg.no + allctr->sbcs.curr_sys_alloc.no;
+ allctr->sbcs.blocks.max.no = allctr->sbcs.max.no;
+
+ update_max_ever_values(&allctr->mbcs);
+ update_max_ever_values(&allctr->sbcs);
+
+ if (print_to_p) {
+ erts_print(*print_to_p,
+ print_to_arg,
+ "versions: %s %s\n",
+ allctr->vsn_str,
+ ERTS_ALCU_VSN_STR);
+ }
+
+ sett = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
+ mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
+ print_to_arg, hpp, szp);
+ sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
+ print_to_arg, hpp, szp);
+ calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp);
+
+ if (hpp || szp) {
+ res = NIL;
+
+ add_2tup(hpp, szp, &res, am.calls, calls);
+ add_2tup(hpp, szp, &res, am.sbcs, sbcs);
+ add_2tup(hpp, szp, &res, am.mbcs, mbcs);
+ add_2tup(hpp, szp, &res, am.options, sett);
+ add_3tup(hpp, szp, &res,
+ am.versions,
+ bld_string(hpp, szp, allctr->vsn_str),
+ bld_string(hpp, szp, ERTS_ALCU_VSN_STR));;
+ }
+
+ if (begin_max_period) {
+ reset_max_values(&allctr->mbcs);
+ reset_max_values(&allctr->sbcs);
+ }
+
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+#endif
+
+ return res;
+}
+
+
+void
+erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size)
+{
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_lock(&allctr->mutex);
+#endif
+
+ size->carriers = allctr->mbcs.curr_mseg.size;
+ size->carriers += allctr->mbcs.curr_sys_alloc.size;
+ size->carriers += allctr->sbcs.curr_mseg.size;
+ size->carriers += allctr->sbcs.curr_sys_alloc.size;
+
+ size->blocks = allctr->mbcs.blocks.curr.size;
+ size->blocks += allctr->sbcs.blocks.curr.size;
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_unlock(&allctr->mutex);
+#endif
+}
+
+/* ----------------------------------------------------------------------- */
+
+static ERTS_INLINE void *
+do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
+{
+ Allctr_t *allctr = (Allctr_t *) extra;
+ void *res;
+
+ ASSERT(initialized);
+
+ ASSERT(allctr);
+
+ ERTS_ALCU_DBG_CHK_THR_SPEC(allctr);
+
+#if ALLOC_ZERO_EQ_NULL
+ if (!size)
+ return NULL;
+#endif
+
+ INC_CC(allctr->calls.this_alloc);
+
+ if (size >= allctr->sbc_threshold) {
+ Block_t *blk = create_carrier(allctr, size, CFLG_SBC);
+ res = blk ? BLK2UMEM(blk) : NULL;
+ }
+ else
+ res = mbc_alloc(allctr, size);
+
+ return res;
+}
+
+void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
+{
+ void *res;
+ res = do_erts_alcu_alloc(type, extra, size);
+ DEBUG_CHECK_ALIGNMENT(res);
+ return res;
+}
+
+
+#ifdef USE_THREADS
+
+void *
+erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
+{
+ Allctr_t *allctr = (Allctr_t *) extra;
+ void *res;
+ erts_mtx_lock(&allctr->mutex);
+ res = do_erts_alcu_alloc(type, extra, size);
+
+ DEBUG_CHECK_ALIGNMENT(res);
+
+ erts_mtx_unlock(&allctr->mutex);
+ return res;
+}
+
+void *
+erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ int ix = erts_alc_get_thr_ix();
+ Allctr_t *allctr;
+ int unlock;
+ void *res;
+
+ ASSERT(ix > 0);
+ if (ix < tspec->size) {
+ allctr = tspec->allctr[ix];
+ unlock = 0;
+ }
+ else {
+ allctr = tspec->allctr[0];
+ unlock = 1;
+ erts_mtx_lock(&allctr->mutex);
+ }
+
+ res = do_erts_alcu_alloc(type, allctr, size);
+
+ if (unlock)
+ erts_mtx_unlock(&allctr->mutex);
+
+ DEBUG_CHECK_ALIGNMENT(res);
+
+ return res;
+}
+
+void *
+erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ int ix = erts_alc_get_thr_ix();
+ Allctr_t *allctr;
+ void *res;
+
+ ASSERT(sizeof(Uint) == sizeof(Allctr_t *));
+ ASSERT(ix > 0);
+ if (ix >= tspec->size)
+ ix = (ix % (tspec->size - 1)) + 1;
+ allctr = tspec->allctr[ix];
+ erts_mtx_lock(&allctr->mutex);
+ res = do_erts_alcu_alloc(type, allctr, size + sizeof(Uint));
+ if (res) {
+ *((Allctr_t **) res) = allctr;
+ res = (void *) (((char *) res) + sizeof(Uint));
+ }
+ erts_mtx_unlock(&allctr->mutex);
+ DEBUG_CHECK_ALIGNMENT(res);
+ return res;
+}
+
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+static ERTS_INLINE void
+do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
+{
+ Allctr_t *allctr = (Allctr_t *) extra;
+ ASSERT(initialized);
+
+ ASSERT(allctr);
+
+ ERTS_ALCU_DBG_CHK_THR_SPEC(allctr);
+
+ if (p) {
+ Block_t *blk;
+
+ INC_CC(allctr->calls.this_free);
+
+ blk = UMEM2BLK(p);
+ if (IS_SBC_BLK(blk))
+ destroy_carrier(allctr, blk);
+ else
+ mbc_free(allctr, p);
+ }
+}
+
+void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
+{
+ do_erts_alcu_free(type, extra, p);
+}
+
+#ifdef USE_THREADS
+
+void
+erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
+{
+ Allctr_t *allctr = (Allctr_t *) extra;
+ erts_mtx_lock(&allctr->mutex);
+ do_erts_alcu_free(type, extra, p);
+ erts_mtx_unlock(&allctr->mutex);
+}
+
+void
+erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ int ix = erts_alc_get_thr_ix();
+ int unlock;
+ Allctr_t *allctr;
+
+ ASSERT(ix > 0);
+ if (ix < tspec->size) {
+ allctr = tspec->allctr[ix];
+ unlock = 0;
+ }
+ else {
+ allctr = tspec->allctr[0];
+ unlock = 1;
+ erts_mtx_lock(&allctr->mutex);
+ }
+
+ do_erts_alcu_free(type, allctr, p);
+ if (unlock)
+ erts_mtx_unlock(&allctr->mutex);
+}
+
+void
+erts_alcu_free_thr_pref(ErtsAlcType_t type, void *unused, void *p)
+{
+ if (p) {
+ void *ptr = (void *) (((char *) p) - sizeof(Uint));
+ Allctr_t *allctr = *((Allctr_t **) ptr);
+ erts_mtx_lock(&allctr->mutex);
+ do_erts_alcu_free(type, allctr, ptr);
+ erts_mtx_unlock(&allctr->mutex);
+ }
+}
+
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+static ERTS_INLINE void *
+do_erts_alcu_realloc(ErtsAlcType_t type,
+ void *extra,
+ void *p,
+ Uint size,
+ Uint flgs)
+{
+ Allctr_t *allctr = (Allctr_t *) extra;
+ Block_t *blk;
+ void *res;
+
+ ASSERT(initialized);
+
+ ASSERT(allctr);
+
+ ERTS_ALCU_DBG_CHK_THR_SPEC(allctr);
+
+ if (!p) {
+ res = do_erts_alcu_alloc(type, extra, size);
+ INC_CC(allctr->calls.this_realloc);
+ DEC_CC(allctr->calls.this_alloc);
+ return res;
+ }
+
+#if ALLOC_ZERO_EQ_NULL
+ if (!size) {
+ ASSERT(p);
+ do_erts_alcu_free(type, extra, p);
+ INC_CC(allctr->calls.this_realloc);
+ DEC_CC(allctr->calls.this_free);
+ return NULL;
+ }
+#endif
+
+ INC_CC(allctr->calls.this_realloc);
+
+ blk = UMEM2BLK(p);
+
+ if (size < allctr->sbc_threshold) {
+ if (IS_MBC_BLK(blk))
+ res = mbc_realloc(allctr, p, size, flgs);
+ else {
+ Uint used_sz = allctr->sbc_header_size + ABLK_HDR_SZ + size;
+ Uint crr_sz;
+ Uint diff_sz_val;
+ Uint crr_sz_val;
+
+#if HAVE_ERTS_MSEG
+ if (IS_SYS_ALLOC_CARRIER(BLK2SBC(allctr, blk)))
+#endif
+ crr_sz = SYS_ALLOC_CARRIER_CEILING(used_sz);
+#if HAVE_ERTS_MSEG
+ else
+ crr_sz = MSEG_UNIT_CEILING(used_sz);
+#endif
+ diff_sz_val = crr_sz - used_sz;
+ if (diff_sz_val < (~((Uint) 0) / 100))
+ crr_sz_val = crr_sz;
+ else {
+ /* div both by 128 */
+ crr_sz_val = crr_sz >> 7;
+ /* A sys_alloc carrier could potentially be
+ smaller than 128 bytes (but not likely) */
+ if (crr_sz_val == 0)
+ goto do_carrier_resize;
+ diff_sz_val >>= 7;
+ }
+
+ if (100*diff_sz_val < allctr->sbc_move_threshold*crr_sz_val)
+ /* Data won't be copied into a new carrier... */
+ goto do_carrier_resize;
+ else if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ return NULL;
+
+ res = mbc_alloc(allctr, size);
+ if (res) {
+ sys_memcpy((void*) res,
+ (void*) p,
+ MIN(BLK_SZ(blk) - ABLK_HDR_SZ, size));
+ destroy_carrier(allctr, blk);
+ }
+ }
+ }
+ else {
+ Block_t *new_blk;
+ if(IS_SBC_BLK(blk)) {
+ do_carrier_resize:
+ new_blk = resize_carrier(allctr, blk, size, CFLG_SBC);
+ res = new_blk ? BLK2UMEM(new_blk) : NULL;
+ }
+ else if (flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
+ return NULL;
+ else {
+ new_blk = create_carrier(allctr, size, CFLG_SBC);
+ if (new_blk) {
+ res = BLK2UMEM(new_blk);
+ sys_memcpy((void *) res,
+ (void *) p,
+ MIN(BLK_SZ(blk) - ABLK_HDR_SZ, size));
+ mbc_free(allctr, p);
+ }
+ else
+ res = NULL;
+ }
+ }
+
+ return res;
+}
+
+void *
+erts_alcu_realloc(ErtsAlcType_t type, void *extra, void *p, Uint size)
+{
+ void *res;
+ res = do_erts_alcu_realloc(type, extra, p, size, 0);
+ DEBUG_CHECK_ALIGNMENT(res);
+ return res;
+}
+
+void *
+erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
+{
+ void *res;
+ res = do_erts_alcu_alloc(type, extra, size);
+ if (!res)
+ res = erts_alcu_realloc(type, extra, p, size);
+ else {
+ Block_t *blk;
+ size_t cpy_size;
+
+ blk = UMEM2BLK(p);
+ cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ;
+ if (cpy_size > size)
+ cpy_size = size;
+ sys_memcpy(res, p, cpy_size);
+ do_erts_alcu_free(type, extra, p);
+ }
+ DEBUG_CHECK_ALIGNMENT(res);
+ return res;
+}
+
+
+#ifdef USE_THREADS
+
+void *
+erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
+{
+ Allctr_t *allctr = (Allctr_t *) extra;
+ void *res;
+ erts_mtx_lock(&allctr->mutex);
+ res = do_erts_alcu_realloc(type, extra, ptr, size, 0);
+ erts_mtx_unlock(&allctr->mutex);
+ DEBUG_CHECK_ALIGNMENT(res);
+ return res;
+}
+
+void *
+erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
+{
+ Allctr_t *allctr = (Allctr_t *) extra;
+ void *res;
+ erts_mtx_lock(&allctr->mutex);
+ res = do_erts_alcu_alloc(type, extra, size);
+ if (!res)
+ res = erts_alcu_realloc_ts(type, extra, p, size);
+ else {
+ Block_t *blk;
+ size_t cpy_size;
+
+ blk = UMEM2BLK(p);
+ cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ;
+ if (cpy_size > size)
+ cpy_size = size;
+ sys_memcpy(res, p, cpy_size);
+ do_erts_alcu_free(type, extra, p);
+ }
+ erts_mtx_unlock(&allctr->mutex);
+ DEBUG_CHECK_ALIGNMENT(res);
+ return res;
+}
+
+void *
+erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
+ void *ptr, Uint size)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ int ix = erts_alc_get_thr_ix();
+ Allctr_t *allctr;
+ int unlock;
+ void *res;
+
+ ASSERT(ix > 0);
+ if (ix < tspec->size) {
+ allctr = tspec->allctr[ix];
+ unlock = 0;
+ }
+ else {
+ allctr = tspec->allctr[0];
+ unlock = 1;
+ erts_mtx_lock(&allctr->mutex);
+ }
+
+ res = do_erts_alcu_realloc(type, allctr, ptr, size, 0);
+
+ if (unlock)
+ erts_mtx_unlock(&allctr->mutex);
+
+ DEBUG_CHECK_ALIGNMENT(res);
+
+ return res;
+}
+
+void *
+erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t type, void *extra,
+ void *ptr, Uint size)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ int ix = erts_alc_get_thr_ix();
+ Allctr_t *allctr;
+ int unlock;
+ void *res;
+
+ ASSERT(ix > 0);
+ if (ix < tspec->size) {
+ allctr = tspec->allctr[ix];
+ unlock = 0;
+ }
+ else {
+ allctr = tspec->allctr[0];
+ unlock = 1;
+ erts_mtx_lock(&allctr->mutex);
+ }
+
+
+ res = do_erts_alcu_alloc(type, allctr, size);
+ if (!res) {
+ if (unlock)
+ erts_mtx_unlock(&allctr->mutex);
+ res = erts_alcu_realloc_thr_spec(type, allctr, ptr, size);
+ }
+ else {
+ Block_t *blk;
+ size_t cpy_size;
+
+ blk = UMEM2BLK(ptr);
+ cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ;
+ if (cpy_size > size)
+ cpy_size = size;
+ sys_memcpy(res, ptr, cpy_size);
+ do_erts_alcu_free(type, allctr, ptr);
+ if (unlock)
+ erts_mtx_unlock(&allctr->mutex);
+ }
+
+ DEBUG_CHECK_ALIGNMENT(res);
+
+ return res;
+}
+
+void *
+erts_alcu_realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ int ix;
+ void *ptr, *res;
+ Allctr_t *pref_allctr, *used_allctr;
+
+ if (!p)
+ return erts_alcu_alloc_thr_pref(type, extra, size);
+
+ ptr = (void *) (((char *) p) - sizeof(Uint));
+ used_allctr = *((Allctr_t **) ptr);
+
+ ix = erts_alc_get_thr_ix();
+ ASSERT(ix > 0);
+ if (ix >= tspec->size)
+ ix = (ix % (tspec->size - 1)) + 1;
+ pref_allctr = tspec->allctr[ix];
+ ASSERT(used_allctr && pref_allctr);
+
+ erts_mtx_lock(&used_allctr->mutex);
+ res = do_erts_alcu_realloc(type,
+ used_allctr,
+ ptr,
+ size + sizeof(Uint),
+ (pref_allctr != used_allctr
+ ? ERTS_ALCU_FLG_FAIL_REALLOC_MOVE
+ : 0));
+ erts_mtx_unlock(&used_allctr->mutex);
+ if (res) {
+ ASSERT(used_allctr == *((Allctr_t **) res));
+ res = (void *) (((char *) res) + sizeof(Uint));
+ DEBUG_CHECK_ALIGNMENT(res);
+ }
+ else {
+ erts_mtx_lock(&pref_allctr->mutex);
+ res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(Uint));
+ erts_mtx_unlock(&pref_allctr->mutex);
+ if (res) {
+ Block_t *blk;
+ size_t cpy_size;
+
+ *((Allctr_t **) res) = pref_allctr;
+ res = (void *) (((char *) res) + sizeof(Uint));
+
+ DEBUG_CHECK_ALIGNMENT(res);
+
+ erts_mtx_lock(&used_allctr->mutex);
+ blk = UMEM2BLK(ptr);
+ cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(Uint);
+ if (cpy_size > size)
+ cpy_size = size;
+ sys_memcpy(res, p, cpy_size);
+ do_erts_alcu_free(type, used_allctr, ptr);
+ erts_mtx_unlock(&used_allctr->mutex);
+ }
+ }
+
+ return res;
+}
+
+
+void *
+erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
+ void *p, Uint size)
+{
+ ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
+ int ix;
+ void *ptr, *res;
+ Allctr_t *pref_allctr, *used_allctr;
+
+ if (!p)
+ return erts_alcu_alloc_thr_pref(type, extra, size);
+
+ ptr = (void *) (((char *) p) - sizeof(Uint));
+ used_allctr = *((Allctr_t **) ptr);
+
+ ix = erts_alc_get_thr_ix();
+ ASSERT(ix > 0);
+ if (ix >= tspec->size)
+ ix = (ix % (tspec->size - 1)) + 1;
+ pref_allctr = tspec->allctr[ix];
+ ASSERT(used_allctr && pref_allctr);
+
+ erts_mtx_lock(&pref_allctr->mutex);
+ res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(Uint));
+ if (!res) {
+ erts_mtx_unlock(&pref_allctr->mutex);
+ res = erts_alcu_realloc_thr_pref(type, extra, p, size);
+ }
+ else {
+ Block_t *blk;
+ size_t cpy_size;
+ Allctr_t *allctr;
+
+ *((Allctr_t **) res) = pref_allctr;
+ res = (void *) (((char *) res) + sizeof(Uint));
+
+ DEBUG_CHECK_ALIGNMENT(res);
+
+ if (used_allctr == pref_allctr)
+ allctr = pref_allctr;
+ else {
+ erts_mtx_unlock(&pref_allctr->mutex);
+ allctr = used_allctr;
+ erts_mtx_lock(&allctr->mutex);
+ }
+
+ blk = UMEM2BLK(ptr);
+ cpy_size = BLK_SZ(blk) - ABLK_HDR_SZ - sizeof(Uint);
+ if (cpy_size > size)
+ cpy_size = size;
+ sys_memcpy(res, p, cpy_size);
+ do_erts_alcu_free(type, allctr, ptr);
+ erts_mtx_unlock(&allctr->mutex);
+ }
+
+ return res;
+}
+
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+int
+erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
+{
+ /* erts_alcu_start assumes that allctr has been zeroed */
+
+ if (!initialized)
+ goto error;
+
+#if HAVE_ERTS_MSEG
+ {
+ ErtsMsegOpt_t mseg_opt = ERTS_MSEG_DEFAULT_OPT_INITIALIZER;
+
+ sys_memcpy((void *) &allctr->mseg_opt,
+ (void *) &mseg_opt,
+ sizeof(ErtsMsegOpt_t));
+ }
+#endif
+
+ allctr->name_prefix = init->name_prefix;
+ if (!allctr->name_prefix)
+ goto error;
+
+ allctr->alloc_no = init->alloc_no;
+ if (allctr->alloc_no < ERTS_ALC_A_MIN
+ || ERTS_ALC_A_MAX < allctr->alloc_no)
+ allctr->alloc_no = ERTS_ALC_A_INVALID;
+
+ if (!allctr->vsn_str)
+ goto error;
+
+ allctr->name.alloc = THE_NON_VALUE;
+ allctr->name.realloc = THE_NON_VALUE;
+ allctr->name.free = THE_NON_VALUE;
+
+ if (init->tspec)
+ allctr->t = init->tspec;
+ else if (init->tpref)
+ allctr->t = init->tpref;
+ else
+ allctr->t = 0;
+
+ allctr->ramv = init->ramv;
+ allctr->main_carrier_size = init->mmbcs;
+ allctr->sbc_threshold = init->sbct;
+#if HAVE_ERTS_MSEG
+ allctr->mseg_opt.abs_shrink_th = init->asbcst;
+ allctr->mseg_opt.rel_shrink_th = init->rsbcst;
+#endif
+ allctr->sbc_move_threshold = init->rsbcmt;
+ allctr->mbc_move_threshold = init->rmbcmt;
+#if HAVE_ERTS_MSEG
+ allctr->max_mseg_sbcs = init->mmsbc;
+ allctr->max_mseg_mbcs = init->mmmbc;
+#endif
+
+ allctr->largest_mbc_size = MAX(init->lmbcs, init->smbcs);
+ allctr->smallest_mbc_size = init->smbcs;
+ allctr->mbc_growth_stages = MAX(1, init->mbcgs);
+
+ if (allctr->min_block_size < ABLK_HDR_SZ)
+ goto error;
+ allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
+ + sizeof(Uint));
+
+#if HAVE_ERTS_MSEG
+ if (allctr->mseg_opt.abs_shrink_th > ~((Uint) 0) / 100)
+ allctr->mseg_opt.abs_shrink_th = ~((Uint) 0) / 100;
+#endif
+
+#ifdef USE_THREADS
+ if (init->ts) {
+ allctr->thread_safe = 1;
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_mtx_init_x_opt(&allctr->mutex,
+ "alcu_allocator",
+ make_small(allctr->alloc_no),
+ ERTS_LCNT_LT_ALLOC);
+#else
+ erts_mtx_init_x(&allctr->mutex,
+ "alcu_allocator",
+ make_small(allctr->alloc_no));
+#endif /*ERTS_ENABLE_LOCK_COUNT*/
+
+#ifdef DEBUG
+ allctr->debug.saved_tid = 0;
+#endif
+ }
+#endif
+
+ if(!allctr->get_free_block
+ || !allctr->link_free_block
+ || !allctr->unlink_free_block
+ || !allctr->info_options)
+ goto error;
+
+ if (!allctr->get_next_mbc_size)
+ allctr->get_next_mbc_size = get_next_mbc_size;
+
+ if (allctr->mbc_header_size < sizeof(Carrier_t))
+ goto error;
+#ifdef USE_THREADS
+ if (init->tpref) {
+ allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
+ + FBLK_FTR_SZ
+ + ABLK_HDR_SZ
+ + sizeof(Uint))
+ - ABLK_HDR_SZ
+ - sizeof(Uint));
+ allctr->sbc_header_size = (UNIT_CEILING(sizeof(Carrier_t)
+ + FBLK_FTR_SZ
+ + ABLK_HDR_SZ
+ + sizeof(Uint))
+ - ABLK_HDR_SZ
+ - sizeof(Uint));
+ }
+ else
+#endif
+ {
+ allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
+ + FBLK_FTR_SZ
+ + ABLK_HDR_SZ)
+ - ABLK_HDR_SZ);
+ allctr->sbc_header_size = (UNIT_CEILING(sizeof(Carrier_t)
+ + FBLK_FTR_SZ
+ + ABLK_HDR_SZ)
+ - ABLK_HDR_SZ);
+ }
+
+ if (allctr->main_carrier_size) {
+ Block_t *blk;
+
+ blk = create_carrier(allctr,
+ allctr->main_carrier_size,
+ CFLG_MBC
+ | CFLG_FORCE_SIZE
+ | CFLG_FORCE_SYS_ALLOC
+ | CFLG_MAIN_CARRIER);
+ if (!blk)
+ goto error;
+
+ (*allctr->link_free_block)(allctr, blk);
+
+ HARD_CHECK_BLK_CARRIER(allctr, blk);
+
+ }
+
+ return 1;
+
+ error:
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_destroy(&allctr->mutex);
+#endif
+
+ return 0;
+
+}
+
+/* ------------------------------------------------------------------------- */
+
+void
+erts_alcu_stop(Allctr_t *allctr)
+{
+ allctr->stopped = 1;
+
+ while (allctr->sbc_list.first)
+ destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first));
+ while (allctr->mbc_list.first)
+ destroy_carrier(allctr, MBC2FBLK(allctr, allctr->mbc_list.first));
+
+#ifdef USE_THREADS
+ if (allctr->thread_safe)
+ erts_mtx_destroy(&allctr->mutex);
+#endif
+
+}
+
+/* ------------------------------------------------------------------------- */
+
+void
+erts_alcu_init(AlcUInit_t *init)
+{
+
+#if HAVE_ERTS_MSEG
+ mseg_unit_size = erts_mseg_unit_size();
+
+ if (mseg_unit_size % sizeof(Unit_t)) /* A little paranoid... */
+ erl_exit(-1,
+ "Mseg unit size (%d) not evenly divideble by "
+ "internal unit size of alloc_util (%d)\n",
+ mseg_unit_size,
+ sizeof(Unit_t));
+
+ max_mseg_carriers = init->mmc;
+ sys_alloc_carrier_size = MSEG_UNIT_CEILING(init->ycs);
+#else /* #if HAVE_ERTS_MSEG */
+ sys_alloc_carrier_size = ((init->ycs + 4095) / 4096) * 4096;
+#endif
+
+#ifdef DEBUG
+ carrier_alignment = sizeof(Unit_t);
+#endif
+
+ erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms");
+
+ atoms_initialized = 0;
+ initialized = 1;
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * NOTE: erts_alcu_test() is only supposed to be used for testing. *
+ * *
+ * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
+ * to erts_alcu_test() *
+\* */
+
+unsigned long
+erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2)
+{
+ switch (op) {
+ case 0x000: return (unsigned long) BLK_SZ((Block_t *) a1);
+ case 0x001: return (unsigned long) BLK_UMEM_SZ((Block_t *) a1);
+ case 0x002: return (unsigned long) IS_PREV_BLK_FREE((Block_t *) a1);
+ case 0x003: return (unsigned long) IS_FREE_BLK((Block_t *) a1);
+ case 0x004: return (unsigned long) IS_LAST_BLK((Block_t *) a1);
+ case 0x005: return (unsigned long) UMEM2BLK((void *) a1);
+ case 0x006: return (unsigned long) BLK2UMEM((Block_t *) a1);
+ case 0x007: return (unsigned long) IS_SB_CARRIER((Carrier_t *) a1);
+ case 0x008: return (unsigned long) IS_SBC_BLK((Block_t *) a1);
+ case 0x009: return (unsigned long) IS_MB_CARRIER((Carrier_t *) a1);
+ case 0x00a: return (unsigned long) IS_MSEG_CARRIER((Carrier_t *) a1);
+ case 0x00b: return (unsigned long) CARRIER_SZ((Carrier_t *) a1);
+ case 0x00c: return (unsigned long) SBC2BLK((Allctr_t *) a1,
+ (Carrier_t *) a2);
+ case 0x00d: return (unsigned long) BLK2SBC((Allctr_t *) a1,
+ (Block_t *) a2);
+ case 0x00e: return (unsigned long) MBC2FBLK((Allctr_t *) a1,
+ (Carrier_t *) a2);
+ case 0x00f: return (unsigned long) FBLK2MBC((Allctr_t *) a1,
+ (Block_t *) a2);
+ case 0x010: return (unsigned long) ((Allctr_t *) a1)->mbc_list.first;
+ case 0x011: return (unsigned long) ((Allctr_t *) a1)->mbc_list.last;
+ case 0x012: return (unsigned long) ((Allctr_t *) a1)->sbc_list.first;
+ case 0x013: return (unsigned long) ((Allctr_t *) a1)->sbc_list.last;
+ case 0x014: return (unsigned long) ((Carrier_t *) a1)->next;
+ case 0x015: return (unsigned long) ((Carrier_t *) a1)->prev;
+ case 0x016: return (unsigned long) ABLK_HDR_SZ;
+ case 0x017: return (unsigned long) ((Allctr_t *) a1)->min_block_size;
+ case 0x018: return (unsigned long) NXT_BLK((Block_t *) a1);
+ case 0x019: return (unsigned long) PREV_BLK((Block_t *) a1);
+ case 0x01a: return (unsigned long) IS_FIRST_BLK((Block_t *) a1);
+ case 0x01b: return (unsigned long) sizeof(Unit_t);
+ default: ASSERT(0); return ~((unsigned long) 0);
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Debug functions *
+\* */
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+
+static void
+check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
+{
+ Carrier_t *crr;
+ CarrierList_t *cl;
+
+ if (IS_SBC_BLK(iblk)) {
+ Carrier_t *sbc = BLK2SBC(allctr, iblk);
+
+ ASSERT(SBC2BLK(allctr, sbc) == iblk);
+ ASSERT(IS_ALLOCED_BLK(iblk));
+ ASSERT(IS_FIRST_BLK(iblk));
+ ASSERT(IS_LAST_BLK(iblk));
+ ASSERT(CARRIER_SZ(sbc) - allctr->sbc_header_size >= BLK_SZ(iblk));
+#if HAVE_ERTS_MSEG
+ if (IS_MSEG_CARRIER(sbc)) {
+ ASSERT(CARRIER_SZ(sbc) % mseg_unit_size == 0);
+ }
+#endif
+ crr = sbc;
+ cl = &allctr->sbc_list;
+ }
+ else {
+ Carrier_t *mbc = NULL;
+ Block_t *prev_blk = NULL;
+ Block_t *blk;
+ char *carrier_end;
+ Uint is_free_blk;
+ Uint tot_blk_sz;
+ Uint blk_sz;
+
+ blk = iblk;
+ tot_blk_sz = 0;
+
+ while (1) {
+
+ if (prev_blk) {
+ ASSERT(NXT_BLK(prev_blk) == blk);
+ if (IS_FREE_BLK(prev_blk)) {
+ ASSERT(IS_PREV_BLK_FREE(blk));
+ ASSERT(prev_blk == PREV_BLK(blk));
+ }
+ else {
+ ASSERT(IS_PREV_BLK_ALLOCED(blk));
+ }
+ }
+
+ if (mbc) {
+ if (blk == iblk)
+ break;
+ ASSERT(((Block_t *) mbc) < blk && blk < iblk);
+ }
+ else
+ ASSERT(blk >= iblk);
+
+
+ ASSERT(IS_MBC_BLK(blk));
+
+ blk_sz = BLK_SZ(blk);
+
+ ASSERT(blk_sz % sizeof(Unit_t) == 0);
+ ASSERT(blk_sz >= allctr->min_block_size);
+
+ tot_blk_sz += blk_sz;
+
+ is_free_blk = (int) IS_FREE_BLK(blk);
+ if(is_free_blk) {
+ if (IS_NOT_LAST_BLK(blk))
+ ASSERT(*((Uint *) (((char *) blk)+blk_sz-sizeof(Uint)))
+ == blk_sz);
+ }
+
+ if (allctr->check_block)
+ (*allctr->check_block)(allctr, blk, (int) is_free_blk);
+
+ if (IS_LAST_BLK(blk)) {
+ carrier_end = ((char *) NXT_BLK(blk)) + sizeof(Uint);
+ mbc = *((Carrier_t **) NXT_BLK(blk));
+ prev_blk = NULL;
+ blk = MBC2FBLK(allctr, mbc);
+ ASSERT(IS_FIRST_BLK(blk));
+ }
+ else {
+ prev_blk = blk;
+ blk = NXT_BLK(blk);
+ }
+ }
+
+ ASSERT(IS_MB_CARRIER(mbc));
+ ASSERT((((char *) mbc)
+ + allctr->mbc_header_size
+ + tot_blk_sz
+ + sizeof(Uint)) == carrier_end);
+ ASSERT(((char *) mbc) + CARRIER_SZ(mbc) == carrier_end);
+
+ if (allctr->check_mbc)
+ (*allctr->check_mbc)(allctr, mbc);
+
+#if HAVE_ERTS_MSEG
+ if (IS_MSEG_CARRIER(mbc)) {
+ ASSERT(CARRIER_SZ(mbc) % mseg_unit_size == 0);
+ }
+#endif
+ crr = mbc;
+ cl = &allctr->mbc_list;
+ }
+
+ if (cl->first == crr) {
+ ASSERT(!crr->prev);
+ }
+ else {
+ ASSERT(crr->prev);
+ ASSERT(crr->prev->next == crr);
+ }
+ if (cl->last == crr) {
+ ASSERT(!crr->next);
+ }
+ else {
+ ASSERT(crr->next);
+ ASSERT(crr->next->prev == crr);
+ }
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
new file mode 100644
index 0000000000..10b11661e6
--- /dev/null
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -0,0 +1,342 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_ALLOC_UTIL__
+#define ERL_ALLOC_UTIL__
+
+#define ERTS_ALCU_VSN_STR "2.2"
+
+#include "erl_alloc_types.h"
+
+typedef struct Allctr_t_ Allctr_t;
+
+typedef struct {
+ Uint ycs;
+ Uint mmc;
+} AlcUInit_t;
+
+typedef struct {
+ char *name_prefix;
+ ErtsAlcType_t alloc_no;
+ int ts;
+ int tspec;
+ int tpref;
+ int ramv;
+ Uint sbct;
+ Uint asbcst;
+ Uint rsbcst;
+ Uint rsbcmt;
+ Uint rmbcmt;
+ Uint mmbcs;
+ Uint mmsbc;
+ Uint mmmbc;
+ Uint lmbcs;
+ Uint smbcs;
+ Uint mbcgs;
+} AllctrInit_t;
+
+typedef struct {
+ Uint blocks;
+ Uint carriers;
+} AllctrSize_t;
+
+#ifndef SMALL_MEMORY
+
+#define ERTS_DEFAULT_ALCU_INIT { \
+ 1024*1024, /* (bytes) ycs: sys_alloc carrier size */\
+ 1024 /* (amount) mmc: max mseg carriers */\
+}
+
+#define ERTS_DEFAULT_ALLCTR_INIT { \
+ NULL, \
+ ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\
+ 1, /* (bool) ts: thread safe */\
+ 0, /* (bool) tspec: thread specific */\
+ 0, /* (bool) tpref: thread preferred */\
+ 0, /* (bool) ramv: realloc always moves */\
+ 512*1024, /* (bytes) sbct: sbc threshold */\
+ 2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\
+ 20, /* (%) rsbcst: rel sbc shrink threshold */\
+ 80, /* (%) rsbcmt: rel sbc move threshold */\
+ 50, /* (%) rmbcmt: rel mbc move threshold */\
+ 1024*1024, /* (bytes) mmbcs: main multiblock carrier size */\
+ 256, /* (amount) mmsbc: max mseg sbcs */\
+ 10, /* (amount) mmmbc: max mseg mbcs */\
+ 10*1024*1024, /* (bytes) lmbcs: largest mbc size */\
+ 1024*1024, /* (bytes) smbcs: smallest mbc size */\
+ 10 /* (amount) mbcgs: mbc growth stages */\
+}
+
+#else /* if SMALL_MEMORY */
+
+#define ERTS_DEFAULT_ALCU_INIT { \
+ 128*1024, /* (bytes) ycs: sys_alloc carrier size */\
+ 1024 /* (amount) mmc: max mseg carriers */\
+}
+
+#define ERTS_DEFAULT_ALLCTR_INIT { \
+ NULL, \
+ ERTS_ALC_A_INVALID, /* (number) alloc_no: allocator number */\
+ 1, /* (bool) ts: thread safe */\
+ 0, /* (bool) tspec: thread specific */\
+ 0, /* (bool) tpref: thread preferred */\
+ 0, /* (bool) ramv: realloc always moves */\
+ 64*1024, /* (bytes) sbct: sbc threshold */\
+ 2*1024*2024, /* (amount) asbcst: abs sbc shrink threshold */\
+ 20, /* (%) rsbcst: rel sbc shrink threshold */\
+ 80, /* (%) rsbcmt: rel sbc move threshold */\
+ 128*1024, /* (bytes) mmbcs: main multiblock carrier size */\
+ 256, /* (amount) mmsbc: max mseg sbcs */\
+ 10, /* (amount) mmmbc: max mseg mbcs */\
+ 1024*1024, /* (bytes) lmbcs: largest mbc size */\
+ 128*1024, /* (bytes) smbcs: smallest mbc size */\
+ 10 /* (amount) mbcgs: mbc growth stages */\
+}
+
+#endif
+
+void * erts_alcu_alloc(ErtsAlcType_t, void *, Uint);
+void * erts_alcu_realloc(ErtsAlcType_t, void *, void *, Uint);
+void * erts_alcu_realloc_mv(ErtsAlcType_t, void *, void *, Uint);
+void erts_alcu_free(ErtsAlcType_t, void *, void *);
+#ifdef USE_THREADS
+void * erts_alcu_alloc_ts(ErtsAlcType_t, void *, Uint);
+void * erts_alcu_realloc_ts(ErtsAlcType_t, void *, void *, Uint);
+void * erts_alcu_realloc_mv_ts(ErtsAlcType_t, void *, void *, Uint);
+void erts_alcu_free_ts(ErtsAlcType_t, void *, void *);
+void * erts_alcu_alloc_thr_spec(ErtsAlcType_t, void *, Uint);
+void * erts_alcu_realloc_thr_spec(ErtsAlcType_t, void *, void *, Uint);
+void * erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t, void *, void *, Uint);
+void erts_alcu_free_thr_spec(ErtsAlcType_t, void *, void *);
+void * erts_alcu_alloc_thr_pref(ErtsAlcType_t, void *, Uint);
+void * erts_alcu_realloc_thr_pref(ErtsAlcType_t, void *, void *, Uint);
+void * erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t, void *, void *, Uint);
+void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *);
+#endif
+Eterm erts_alcu_au_info_options(int *, void *, Uint **, Uint *);
+Eterm erts_alcu_info_options(Allctr_t *, int *, void *, Uint **, Uint *);
+Eterm erts_alcu_sz_info(Allctr_t *, int, int *, void *, Uint **, Uint *);
+Eterm erts_alcu_info(Allctr_t *, int, int *, void *, Uint **, Uint *);
+void erts_alcu_init(AlcUInit_t *);
+void erts_alcu_current_size(Allctr_t *, AllctrSize_t *);
+
+#endif
+
+#if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__)
+#define ERL_ALLOC_UTIL_IMPL__
+
+#ifdef USE_THREADS
+#define ERL_THREADS_EMU_INTERNAL__
+#include "erl_threads.h"
+#endif
+
+#include "erl_mseg.h"
+
+#undef ERTS_ALLOC_UTIL_HARD_DEBUG
+#ifdef DEBUG
+# if 0
+# define ERTS_ALLOC_UTIL_HARD_DEBUG
+# endif
+#endif
+
+#undef MIN
+#undef MAX
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#define FLOOR(X, I) (((X)/(I))*(I))
+#define CEILING(X, I) ((((X) - 1)/(I) + 1)*(I))
+
+#undef WORD_MASK
+#define INV_WORD_MASK ((Uint) (sizeof(Uint) - 1))
+#define WORD_MASK (~INV_WORD_MASK)
+#define WORD_FLOOR(X) ((X) & WORD_MASK)
+#define WORD_CEILING(X) WORD_FLOOR((X) + INV_WORD_MASK)
+
+#undef UNIT_MASK
+#define INV_UNIT_MASK ((Uint) (sizeof(Unit_t) - 1))
+#define UNIT_MASK (~INV_UNIT_MASK)
+#define UNIT_FLOOR(X) ((X) & UNIT_MASK)
+#define UNIT_CEILING(X) UNIT_FLOOR((X) + INV_UNIT_MASK)
+
+
+#define SZ_MASK (~((Uint) 0) << 3)
+#define FLG_MASK (~(SZ_MASK))
+
+
+#define BLK_SZ(B) \
+ (*((Block_t *) (B)) & SZ_MASK)
+
+#define CARRIER_SZ(C) \
+ ((C)->chdr & SZ_MASK)
+
+typedef union {char c[8]; long l; double d;} Unit_t;
+
+typedef struct Carrier_t_ Carrier_t;
+struct Carrier_t_ {
+ Uint chdr;
+ Carrier_t *next;
+ Carrier_t *prev;
+};
+
+typedef struct {
+ Carrier_t *first;
+ Carrier_t *last;
+} CarrierList_t;
+
+typedef Uint Block_t;
+typedef Uint FreeBlkFtr_t;
+
+typedef struct {
+ Uint giga_no;
+ Uint no;
+} CallCounter_t;
+
+typedef struct {
+ Uint no;
+ Uint size;
+} StatValues_t;
+
+typedef struct {
+ StatValues_t curr_mseg;
+ StatValues_t curr_sys_alloc;
+ StatValues_t max;
+ StatValues_t max_ever;
+ struct {
+ StatValues_t curr;
+ StatValues_t max;
+ StatValues_t max_ever;
+ } blocks;
+} CarriersStats_t;
+
+struct Allctr_t_ {
+
+ /* Allocator name prefix */
+ char * name_prefix;
+
+ /* Allocator number */
+ ErtsAlcType_t alloc_no;
+
+ /* Alloc, realloc and free names as atoms */
+ struct {
+ Eterm alloc;
+ Eterm realloc;
+ Eterm free;
+ } name;
+
+ /* Version string */
+ char * vsn_str;
+
+ /* Options */
+ int t;
+ int ramv;
+ Uint sbc_threshold;
+ Uint sbc_move_threshold;
+ Uint mbc_move_threshold;
+ Uint main_carrier_size;
+ Uint max_mseg_sbcs;
+ Uint max_mseg_mbcs;
+ Uint largest_mbc_size;
+ Uint smallest_mbc_size;
+ Uint mbc_growth_stages;
+#if HAVE_ERTS_MSEG
+ ErtsMsegOpt_t mseg_opt;
+#endif
+
+ /* */
+ Uint mbc_header_size;
+ Uint sbc_header_size;
+ Uint min_mbc_size;
+ Uint min_mbc_first_free_size;
+ Uint min_block_size;
+
+ /* Carriers */
+ CarrierList_t mbc_list;
+ CarrierList_t sbc_list;
+
+ /* Main carrier (if there is one) */
+ Carrier_t * main_carrier;
+
+ /* Callback functions (first 4 are mandatory) */
+ Block_t * (*get_free_block) (Allctr_t *, Uint,
+ Block_t *, Uint);
+ void (*link_free_block) (Allctr_t *, Block_t *);
+ void (*unlink_free_block) (Allctr_t *, Block_t *);
+ Eterm (*info_options) (Allctr_t *, char *, int *,
+ void *, Uint **, Uint *);
+
+ Uint (*get_next_mbc_size) (Allctr_t *);
+ void (*creating_mbc) (Allctr_t *, Carrier_t *);
+ void (*destroying_mbc) (Allctr_t *, Carrier_t *);
+ void (*init_atoms) (void);
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ void (*check_block) (Allctr_t *, Block_t *, int);
+ void (*check_mbc) (Allctr_t *, Carrier_t *);
+#endif
+
+#ifdef USE_THREADS
+ /* Mutex for this allocator */
+ erts_mtx_t mutex;
+ int thread_safe;
+ struct {
+ Allctr_t *prev;
+ Allctr_t *next;
+ } ts_list;
+#endif
+
+ int atoms_initialized;
+
+ int stopped;
+
+ /* Some statistics ... */
+ struct {
+ CallCounter_t this_alloc;
+ CallCounter_t this_free;
+ CallCounter_t this_realloc;
+ CallCounter_t mseg_alloc;
+ CallCounter_t mseg_dealloc;
+ CallCounter_t mseg_realloc;
+ CallCounter_t sys_alloc;
+ CallCounter_t sys_free;
+ CallCounter_t sys_realloc;
+ } calls;
+
+ CarriersStats_t sbcs;
+ CarriersStats_t mbcs;
+
+#ifdef DEBUG
+#ifdef USE_THREADS
+ struct {
+ int saved_tid;
+ erts_tid_t tid;
+ } debug;
+#endif
+#endif
+};
+
+int erts_alcu_start(Allctr_t *, AllctrInit_t *);
+void erts_alcu_stop(Allctr_t *);
+
+unsigned long erts_alcu_test(unsigned long, unsigned long, unsigned long);
+
+
+
+#endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL)
+ && !defined(ERL_ALLOC_UTIL_IMPL__) */
+
diff --git a/erts/emulator/beam/erl_arith.c b/erts/emulator/beam/erl_arith.c
new file mode 100644
index 0000000000..b692832677
--- /dev/null
+++ b/erts/emulator/beam/erl_arith.c
@@ -0,0 +1,2040 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Arithmetic functions formerly found in beam_emu.c
+ * now available as bifs as erl_db_util and db_match_compile needs
+ * them.
+ */
+
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+#include "atom.h"
+
+#ifndef MAX
+# define MAX(x, y) (((x) > (y)) ? (x) : (y))
+#endif
+
+static Eterm shift(Process* p, Eterm arg1, Eterm arg2, int right);
+
+static ERTS_INLINE void maybe_shrink(Process* p, Eterm* hp, Eterm res, Uint alloc)
+{
+ Uint actual;
+
+ if (is_immed(res)) {
+ if (p->heap <= hp && hp < p->htop) {
+ p->htop = hp;
+#if defined(CHECK_FOR_HOLES)
+ } else {
+ erts_arith_shrink(p, hp);
+#endif
+ }
+ } else if ((actual = bignum_header_arity(*hp)+1) < alloc) {
+ if (p->heap <= hp && hp < p->htop) {
+ p->htop = hp+actual;
+#if defined(CHECK_FOR_HOLES)
+ } else {
+ erts_arith_shrink(p, hp+actual);
+#endif
+ }
+ }
+}
+
+/*
+ * BIF interfaces. They will only be from match specs and
+ * when a BIF is applied.
+ */
+
+BIF_RETTYPE splus_1(BIF_ALIST_1)
+{
+ if (is_number(BIF_ARG_1)) {
+ BIF_RET(BIF_ARG_1);
+ } else {
+ BIF_ERROR(BIF_P, BADARITH);
+ }
+}
+
+BIF_RETTYPE splus_2(BIF_ALIST_2)
+{
+ BIF_RET(erts_mixed_plus(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+BIF_RETTYPE sminus_1(BIF_ALIST_1)
+{
+ BIF_RET(erts_mixed_minus(BIF_P, make_small(0), BIF_ARG_1));
+}
+
+BIF_RETTYPE sminus_2(BIF_ALIST_2)
+{
+ BIF_RET(erts_mixed_minus(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+BIF_RETTYPE stimes_2(BIF_ALIST_2)
+{
+ BIF_RET(erts_mixed_times(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+BIF_RETTYPE div_2(BIF_ALIST_2)
+{
+ BIF_RET(erts_mixed_div(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+BIF_RETTYPE intdiv_2(BIF_ALIST_2)
+{
+ if (BIF_ARG_2 == SMALL_ZERO) {
+ BIF_ERROR(BIF_P, BADARITH);
+ }
+ if (is_both_small(BIF_ARG_1,BIF_ARG_2)){
+ Sint ires = signed_val(BIF_ARG_1) / signed_val(BIF_ARG_2);
+ if (MY_IS_SSMALL(ires))
+ BIF_RET(make_small(ires));
+ }
+ BIF_RET(erts_int_div(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+BIF_RETTYPE rem_2(BIF_ALIST_2)
+{
+ if (BIF_ARG_2 == SMALL_ZERO) {
+ BIF_ERROR(BIF_P, BADARITH);
+ }
+ if (is_both_small(BIF_ARG_1,BIF_ARG_2)){
+ /* Is this really correct? Isn't there a difference between
+ remainder and modulo that is not defined in C? Well, I don't
+ remember, this is the way it's done in beam_emu anyway... */
+ BIF_RET(make_small(signed_val(BIF_ARG_1) % signed_val(BIF_ARG_2)));
+ }
+ BIF_RET(erts_int_rem(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+BIF_RETTYPE band_2(BIF_ALIST_2)
+{
+ if (is_both_small(BIF_ARG_1,BIF_ARG_2)){
+ BIF_RET(BIF_ARG_1 & BIF_ARG_2);
+ }
+ BIF_RET(erts_band(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+BIF_RETTYPE bor_2(BIF_ALIST_2)
+{
+ if (is_both_small(BIF_ARG_1,BIF_ARG_2)){
+ BIF_RET(BIF_ARG_1 | BIF_ARG_2);
+ }
+ BIF_RET(erts_bor(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+BIF_RETTYPE bxor_2(BIF_ALIST_2)
+{
+ if (is_both_small(BIF_ARG_1,BIF_ARG_2)){
+ BIF_RET(make_small(signed_val(BIF_ARG_1) ^ signed_val(BIF_ARG_2)));
+ }
+ BIF_RET(erts_bxor(BIF_P, BIF_ARG_1, BIF_ARG_2));
+}
+
+BIF_RETTYPE bsl_2(Process* p, Eterm arg1, Eterm arg2)
+{
+ BIF_RET(shift(p, arg1, arg2, 0));
+}
+
+BIF_RETTYPE bsr_2(Process* p, Eterm arg1, Eterm arg2)
+{
+ BIF_RET(shift(p, arg1, arg2, 1));
+}
+
+static Eterm
+shift(Process* p, Eterm arg1, Eterm arg2, int right)
+{
+ Sint i;
+ Sint ires;
+ Eterm tmp_big1[2];
+ Eterm* bigp;
+ Uint need;
+
+ if (right) {
+ if (is_small(arg2)) {
+ i = -signed_val(arg2);
+ if (is_small(arg1)) {
+ goto small_shift;
+ } else if (is_big(arg1)) {
+ if (i == 0) {
+ BIF_RET(arg1);
+ }
+ goto big_shift;
+ }
+ } else if (is_big(arg2)) {
+ /*
+ * N bsr NegativeBigNum == N bsl MAX_SMALL
+ * N bsr PositiveBigNum == N bsl MIN_SMALL
+ */
+ arg2 = make_small(bignum_header_is_neg(*big_val(arg2)) ?
+ MAX_SMALL : MIN_SMALL);
+ goto do_bsl;
+ }
+ } else {
+ do_bsl:
+ if (is_small(arg2)) {
+ i = signed_val(arg2);
+
+ if (is_small(arg1)) {
+ small_shift:
+ ires = signed_val(arg1);
+
+ if (i == 0 || ires == 0) {
+ BIF_RET(arg1);
+ } else if (i < 0) { /* Right shift */
+ i = -i;
+ if (i >= SMALL_BITS-1) {
+ arg1 = (ires < 0) ? SMALL_MINUS_ONE : SMALL_ZERO;
+ } else {
+ arg1 = make_small(ires >> i);
+ }
+ BIF_RET(arg1);
+ } else if (i < SMALL_BITS-1) { /* Left shift */
+ if ((ires > 0 && ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ires) == 0) ||
+ ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ~ires) == 0) {
+ arg1 = make_small(ires << i);
+ BIF_RET(arg1);
+ }
+ }
+ arg1 = small_to_big(ires, tmp_big1);
+
+ big_shift:
+ if (i > 0) { /* Left shift. */
+ ires = big_size(arg1) + (i / D_EXP);
+ } else { /* Right shift. */
+ ires = big_size(arg1);
+ if (ires <= (-i / D_EXP))
+ ires = 3;
+ else
+ ires -= (-i / D_EXP);
+ }
+
+ /*
+ * Slightly conservative check the size to avoid
+ * allocating huge amounts of memory for bignums that
+ * clearly would overflow the arity in the header
+ * word.
+ */
+ if (ires-8 > BIG_ARITY_MAX) {
+ BIF_ERROR(p, SYSTEM_LIMIT);
+ }
+ need = BIG_NEED_SIZE(ires+1);
+ bigp = HAlloc(p, need);
+ arg1 = big_lshift(arg1, i, bigp);
+ maybe_shrink(p, bigp, arg1, need);
+ if (is_nil(arg1)) {
+ /*
+ * This result must have been only slight larger
+ * than allowed since it wasn't caught by the
+ * previous test.
+ */
+ BIF_ERROR(p, SYSTEM_LIMIT);
+ }
+ BIF_RET(arg1);
+ } else if (is_big(arg1)) {
+ if (i == 0) {
+ BIF_RET(arg1);
+ }
+ goto big_shift;
+ }
+ } else if (is_big(arg2)) {
+ if (bignum_header_is_neg(*big_val(arg2))) {
+ /*
+ * N bsl NegativeBigNum is either 0 or -1, depending on
+ * the sign of N. Since we don't believe this case
+ * is common, do the calculation with the minimum
+ * amount of code.
+ */
+ arg2 = make_small(MIN_SMALL);
+ goto do_bsl;
+ } else if (is_small(arg1) || is_big(arg1)) {
+ /*
+ * N bsl PositiveBigNum is too large to represent.
+ */
+ BIF_ERROR(p, SYSTEM_LIMIT);
+ }
+ /* Fall through if the left argument is not an integer. */
+ }
+ }
+ BIF_ERROR(p, BADARITH);
+}
+
+BIF_RETTYPE bnot_1(BIF_ALIST_1)
+{
+ Eterm ret;
+
+ if (is_small(BIF_ARG_1)) {
+ ret = make_small(~signed_val(BIF_ARG_1));
+ } else if (is_big(BIF_ARG_1)) {
+ Uint need = BIG_NEED_SIZE(big_size(BIF_ARG_1)+1);
+ Eterm* bigp = HAlloc(BIF_P, need);
+
+ ret = big_bnot(BIF_ARG_1, bigp);
+ maybe_shrink(BIF_P, bigp, ret, need);
+ if (is_nil(ret)) {
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
+ } else {
+ BIF_ERROR(BIF_P, BADARITH);
+ }
+ BIF_RET(ret);
+}
+
+/*
+ * Implementation and interfaces for the rest of the runtime system.
+ * The functions that follow are only used in match specs and when
+ * arithmetic functions are applied.
+ */
+
+Eterm
+erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2)
+{
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ Eterm res;
+ Eterm hdr;
+ FloatDef f1, f2;
+ dsize_t sz1, sz2, sz;
+ int need_heap;
+ Eterm* hp;
+ Sint ires;
+
+ ERTS_FP_CHECK_INIT(p);
+ switch (arg1 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ ires = signed_val(arg1) + signed_val(arg2);
+ ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
+ if (MY_IS_SSMALL(ires)) {
+ return make_small(ires);
+ } else {
+ hp = HAlloc(p, 2);
+ res = small_to_big(ires, hp);
+ return res;
+ }
+ default:
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ if (arg1 == SMALL_ZERO) {
+ return arg2;
+ }
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ goto do_big;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg1);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if (arg2 == SMALL_ZERO) {
+ return arg1;
+ }
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ goto do_big;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ do_big:
+ sz1 = big_size(arg1);
+ sz2 = big_size(arg2);
+ sz = MAX(sz1, sz2)+1;
+ need_heap = BIG_NEED_SIZE(sz);
+ hp = HAlloc(p, need_heap);
+ res = big_plus(arg1, arg2, hp);
+ if (is_nil(res)) {
+ erts_arith_shrink(p, hp);
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ maybe_shrink(p, hp, res, need_heap);
+ return res;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ GET_DOUBLE(arg2, f2);
+
+ do_float:
+ f1.fd = f1.fd + f2.fd;
+ ERTS_FP_ERROR(p, f1.fd, goto badarith);
+ hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ res = make_float(hp);
+ PUT_DOUBLE(f1, hp);
+ return res;
+ default:
+ goto badarith;
+ }
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+}
+
+Eterm
+erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2)
+{
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ Eterm hdr;
+ Eterm res;
+ FloatDef f1, f2;
+ dsize_t sz1, sz2, sz;
+ int need_heap;
+ Eterm* hp;
+ Sint ires;
+
+ ERTS_FP_CHECK_INIT(p);
+ switch (arg1 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ ires = signed_val(arg1) - signed_val(arg2);
+ ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
+ if (MY_IS_SSMALL(ires)) {
+ return make_small(ires);
+ } else {
+ hp = HAlloc(p, 2);
+ res = small_to_big(ires, hp);
+ return res;
+ }
+ default:
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ goto do_big;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg1);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if (arg2 == SMALL_ZERO) {
+ return arg1;
+ }
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+
+ do_big:
+ sz1 = big_size(arg1);
+ sz2 = big_size(arg2);
+ sz = MAX(sz1, sz2)+1;
+ need_heap = BIG_NEED_SIZE(sz);
+ hp = HAlloc(p, need_heap);
+ res = big_minus(arg1, arg2, hp);
+ if (is_nil(res)) {
+ erts_arith_shrink(p, hp);
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ maybe_shrink(p, hp, res, need_heap);
+ return res;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ goto do_big;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ GET_DOUBLE(arg2, f2);
+
+ do_float:
+ f1.fd = f1.fd - f2.fd;
+ ERTS_FP_ERROR(p, f1.fd, goto badarith);
+ hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ res = make_float(hp);
+ PUT_DOUBLE(f1, hp);
+ return res;
+ default:
+ goto badarith;
+ }
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+}
+
+Eterm
+erts_mixed_times(Process* p, Eterm arg1, Eterm arg2)
+{
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ Eterm hdr;
+ Eterm res;
+ FloatDef f1, f2;
+ dsize_t sz1, sz2, sz;
+ int need_heap;
+ Eterm* hp;
+
+ ERTS_FP_CHECK_INIT(p);
+ switch (arg1 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if ((arg1 == SMALL_ZERO) || (arg2 == SMALL_ZERO)) {
+ return(SMALL_ZERO);
+ } else if (arg1 == SMALL_ONE) {
+ return(arg2);
+ } else if (arg2 == SMALL_ONE) {
+ return(arg1);
+ } else {
+ Eterm big_res[3];
+
+ /*
+ * The following code is optimized for the case that
+ * result is small (which should be the most common case
+ * in practice).
+ */
+ res = small_times(signed_val(arg1), signed_val(arg2), big_res);
+ if (is_small(res)) {
+ return res;
+ } else {
+ /*
+ * The result is a a big number.
+ * Allocate a heap fragment and copy the result.
+ * Be careful to allocate exactly what we need
+ * to not leave any holes.
+ */
+ Uint arity;
+
+ ASSERT(is_big(res));
+ hdr = big_res[0];
+ arity = bignum_header_arity(hdr);
+ ASSERT(arity == 1 || arity == 2);
+ hp = HAlloc(p, arity+1);
+ res = make_big(hp);
+ *hp++ = hdr;
+ *hp++ = big_res[1];
+ if (arity > 1) {
+ *hp = big_res[2];
+ }
+ return res;
+ }
+ }
+ default:
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ if (arg1 == SMALL_ZERO)
+ return(SMALL_ZERO);
+ if (arg1 == SMALL_ONE)
+ return(arg2);
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ sz = 2 + big_size(arg2);
+ goto do_big;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg1);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if (arg2 == SMALL_ZERO)
+ return(SMALL_ZERO);
+ if (arg2 == SMALL_ONE)
+ return(arg1);
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ sz = 2 + big_size(arg1);
+ goto do_big;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ sz1 = big_size(arg1);
+ sz2 = big_size(arg2);
+ sz = sz1 + sz2;
+
+ do_big:
+ need_heap = BIG_NEED_SIZE(sz);
+ hp = HAlloc(p, need_heap);
+ res = big_times(arg1, arg2, hp);
+
+ /*
+ * Note that the result must be big in this case, since
+ * at least one operand was big to begin with, and
+ * the absolute value of the other is > 1.
+ */
+
+ if (is_nil(res)) {
+ erts_arith_shrink(p, hp);
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ maybe_shrink(p, hp, res, need_heap);
+ return res;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ GET_DOUBLE(arg2, f2);
+
+ do_float:
+ f1.fd = f1.fd * f2.fd;
+ ERTS_FP_ERROR(p, f1.fd, goto badarith);
+ hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ res = make_float(hp);
+ PUT_DOUBLE(f1, hp);
+ return res;
+ default:
+ goto badarith;
+ }
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+}
+
+Eterm
+erts_mixed_div(Process* p, Eterm arg1, Eterm arg2)
+{
+ FloatDef f1, f2;
+ Eterm* hp;
+ Eterm hdr;
+
+ ERTS_FP_CHECK_INIT(p);
+ switch (arg1 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg1);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0 ||
+ big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ GET_DOUBLE(arg2, f2);
+
+ do_float:
+ f1.fd = f1.fd / f2.fd;
+ ERTS_FP_ERROR(p, f1.fd, goto badarith);
+ hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ PUT_DOUBLE(f1, hp);
+ return make_float(hp);
+ default:
+ goto badarith;
+ }
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+}
+
+Eterm
+erts_int_div(Process* p, Eterm arg1, Eterm arg2)
+{
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ int ires;
+
+ switch (NUMBER_CODE(arg1, arg2)) {
+ case SMALL_SMALL:
+ /* This case occurs if the most negative fixnum is divided by -1. */
+ ASSERT(arg2 == make_small(-1));
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ /*FALLTHROUGH*/
+ case BIG_SMALL:
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ goto L_big_div;
+ case SMALL_BIG:
+ if (arg1 != make_small(MIN_SMALL)) {
+ return SMALL_ZERO;
+ }
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ /*FALLTHROUGH*/
+ case BIG_BIG:
+ L_big_div:
+ ires = big_ucomp(arg1, arg2);
+ if (ires < 0) {
+ arg1 = SMALL_ZERO;
+ } else if (ires == 0) {
+ arg1 = (big_sign(arg1) == big_sign(arg2)) ?
+ SMALL_ONE : SMALL_MINUS_ONE;
+ } else {
+ Eterm* hp;
+ int i = big_size(arg1);
+ Uint need;
+
+ ires = big_size(arg2);
+ need = BIG_NEED_SIZE(i-ires+1) + BIG_NEED_SIZE(i);
+ hp = HAlloc(p, need);
+ arg1 = big_div(arg1, arg2, hp);
+ if (is_nil(arg1)) {
+ erts_arith_shrink(p, hp);
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ maybe_shrink(p, hp, arg1, need);
+ }
+ return arg1;
+ default:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+}
+
+Eterm
+erts_int_rem(Process* p, Eterm arg1, Eterm arg2)
+{
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ int ires;
+
+ switch (NUMBER_CODE(arg1, arg2)) {
+ case BIG_SMALL:
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ goto L_big_rem;
+ case SMALL_BIG:
+ if (arg1 != make_small(MIN_SMALL)) {
+ return arg1;
+ } else {
+ Eterm tmp = small_to_big(signed_val(arg1), tmp_big1);
+ if ((ires = big_ucomp(tmp, arg2)) == 0) {
+ return SMALL_ZERO;
+ } else {
+ ASSERT(ires < 0);
+ return arg1;
+ }
+ }
+ /* All paths returned */
+ case BIG_BIG:
+ L_big_rem:
+ ires = big_ucomp(arg1, arg2);
+ if (ires == 0) {
+ arg1 = SMALL_ZERO;
+ } else if (ires > 0) {
+ Uint need = BIG_NEED_SIZE(big_size(arg1));
+ Eterm* hp = HAlloc(p, need);
+
+ arg1 = big_rem(arg1, arg2, hp);
+ if (is_nil(arg1)) {
+ erts_arith_shrink(p, hp);
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ maybe_shrink(p, hp, arg1, need);
+ }
+ return arg1;
+ default:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+}
+
+Eterm erts_band(Process* p, Eterm arg1, Eterm arg2)
+{
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ Eterm* hp;
+ int need;
+
+ switch (NUMBER_CODE(arg1, arg2)) {
+ case SMALL_BIG:
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ break;
+ case BIG_SMALL:
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ break;
+ case BIG_BIG:
+ break;
+ default:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ need = BIG_NEED_SIZE(MAX(big_size(arg1), big_size(arg2)) + 1);
+ hp = HAlloc(p, need);
+ arg1 = big_band(arg1, arg2, hp);
+ ASSERT(is_not_nil(arg1));
+ maybe_shrink(p, hp, arg1, need);
+ return arg1;
+}
+
+Eterm erts_bor(Process* p, Eterm arg1, Eterm arg2)
+{
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ Eterm* hp;
+ int need;
+
+ switch (NUMBER_CODE(arg1, arg2)) {
+ case SMALL_BIG:
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ break;
+ case BIG_SMALL:
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ break;
+ case BIG_BIG:
+ break;
+ default:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ need = BIG_NEED_SIZE(MAX(big_size(arg1), big_size(arg2)) + 1);
+ hp = HAlloc(p, need);
+ arg1 = big_bor(arg1, arg2, hp);
+ ASSERT(is_not_nil(arg1));
+ maybe_shrink(p, hp, arg1, need);
+ return arg1;
+}
+
+Eterm erts_bxor(Process* p, Eterm arg1, Eterm arg2)
+{
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ Eterm* hp;
+ int need;
+
+ switch (NUMBER_CODE(arg1, arg2)) {
+ case SMALL_BIG:
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ break;
+ case BIG_SMALL:
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ break;
+ case BIG_BIG:
+ break;
+ default:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ need = BIG_NEED_SIZE(MAX(big_size(arg1), big_size(arg2)) + 1);
+ hp = HAlloc(p, need);
+ arg1 = big_bxor(arg1, arg2, hp);
+ ASSERT(is_not_nil(arg1));
+ maybe_shrink(p, hp, arg1, need);
+ return arg1;
+}
+
+Eterm erts_bnot(Process* p, Eterm arg)
+{
+ Eterm ret;
+
+ if (is_big(arg)) {
+ Uint need = BIG_NEED_SIZE(big_size(arg)+1);
+ Eterm* bigp = HAlloc(p, need);
+
+ ret = big_bnot(arg, bigp);
+ maybe_shrink(p, bigp, ret, need);
+ if (is_nil(ret)) {
+ p->freason = SYSTEM_LIMIT;
+ return NIL;
+ }
+ } else {
+ p->freason = BADARITH;
+ return NIL;
+ }
+ return ret;
+}
+
+#define ERTS_NEED_GC(p, need) ((HEAP_LIMIT((p)) - HEAP_TOP((p))) <= (need))
+
+static ERTS_INLINE void
+trim_heap(Process* p, Eterm* hp, Eterm res)
+{
+ if (is_immed(res)) {
+ ASSERT(p->heap <= hp && hp <= p->htop);
+ p->htop = hp;
+ } else {
+ Eterm* new_htop;
+ ASSERT(is_big(res));
+ new_htop = hp + bignum_header_arity(*hp) + 1;
+ ASSERT(p->heap <= new_htop && new_htop <= p->htop);
+ p->htop = new_htop;
+ }
+ ASSERT(p->heap <= p->htop && p->htop <= p->stop);
+}
+
+/*
+ * The functions that follow are called from the emulator loop.
+ * They are not allowed to allocate heap fragments, but must do
+ * a garbage collection if there is insufficient heap space.
+ */
+
+#define erts_arith_shrink horrible error
+#define maybe_shrink horrible error
+
+Eterm
+erts_gc_mixed_plus(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg1;
+ Eterm arg2;
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ Eterm res;
+ Eterm hdr;
+ FloatDef f1, f2;
+ dsize_t sz1, sz2, sz;
+ int need_heap;
+ Eterm* hp;
+ Sint ires;
+
+ arg1 = reg[live];
+ arg2 = reg[live+1];
+ ERTS_FP_CHECK_INIT(p);
+ switch (arg1 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ ires = signed_val(arg1) + signed_val(arg2);
+ ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
+ if (MY_IS_SSMALL(ires)) {
+ return make_small(ires);
+ } else {
+ if (ERTS_NEED_GC(p, 2)) {
+ erts_garbage_collect(p, 2, reg, live);
+ }
+ hp = p->htop;
+ p->htop += 2;
+ res = small_to_big(ires, hp);
+ return res;
+ }
+ default:
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ if (arg1 == SMALL_ZERO) {
+ return arg2;
+ }
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ goto do_big;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg1);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if (arg2 == SMALL_ZERO) {
+ return arg1;
+ }
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ goto do_big;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ do_big:
+ sz1 = big_size(arg1);
+ sz2 = big_size(arg2);
+ sz = MAX(sz1, sz2)+1;
+ need_heap = BIG_NEED_SIZE(sz);
+ if (ERTS_NEED_GC(p, need_heap)) {
+ erts_garbage_collect(p, need_heap, reg, live+2);
+ if (arg1 != make_big(tmp_big1)) {
+ arg1 = reg[live];
+ }
+ if (arg2 != make_big(tmp_big2)) {
+ arg2 = reg[live+1];
+ }
+ }
+ hp = p->htop;
+ p->htop += need_heap;
+ res = big_plus(arg1, arg2, hp);
+ trim_heap(p, hp, res);
+ if (is_nil(res)) {
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ return res;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ GET_DOUBLE(arg2, f2);
+
+ do_float:
+ f1.fd = f1.fd + f2.fd;
+ ERTS_FP_ERROR(p, f1.fd, goto badarith);
+ if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
+ erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live);
+ }
+ hp = p->htop;
+ p->htop += FLOAT_SIZE_OBJECT;
+ res = make_float(hp);
+ PUT_DOUBLE(f1, hp);
+ return res;
+ default:
+ goto badarith;
+ }
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+}
+
+Eterm
+erts_gc_mixed_minus(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg1;
+ Eterm arg2;
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ Eterm hdr;
+ Eterm res;
+ FloatDef f1, f2;
+ dsize_t sz1, sz2, sz;
+ int need_heap;
+ Eterm* hp;
+ Sint ires;
+
+ arg1 = reg[live];
+ arg2 = reg[live+1];
+ ERTS_FP_CHECK_INIT(p);
+ switch (arg1 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ ires = signed_val(arg1) - signed_val(arg2);
+ ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires));
+ if (MY_IS_SSMALL(ires)) {
+ return make_small(ires);
+ } else {
+ if (ERTS_NEED_GC(p, 2)) {
+ erts_garbage_collect(p, 2, reg, live);
+ }
+ hp = p->htop;
+ p->htop += 2;
+ res = small_to_big(ires, hp);
+ return res;
+ }
+ default:
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ goto do_big;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg1);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if (arg2 == SMALL_ZERO) {
+ return arg1;
+ }
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+
+ do_big:
+ sz1 = big_size(arg1);
+ sz2 = big_size(arg2);
+ sz = MAX(sz1, sz2)+1;
+ need_heap = BIG_NEED_SIZE(sz);
+ if (ERTS_NEED_GC(p, need_heap)) {
+ erts_garbage_collect(p, need_heap, reg, live+2);
+ if (arg1 != make_big(tmp_big1)) {
+ arg1 = reg[live];
+ }
+ if (arg2 != make_big(tmp_big2)) {
+ arg2 = reg[live+1];
+ }
+ }
+ hp = p->htop;
+ p->htop += need_heap;
+ res = big_minus(arg1, arg2, hp);
+ trim_heap(p, hp, res);
+ if (is_nil(res)) {
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ return res;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ goto do_big;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ GET_DOUBLE(arg2, f2);
+
+ do_float:
+ f1.fd = f1.fd - f2.fd;
+ ERTS_FP_ERROR(p, f1.fd, goto badarith);
+ if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
+ erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live);
+ }
+ hp = p->htop;
+ p->htop += FLOAT_SIZE_OBJECT;
+ res = make_float(hp);
+ PUT_DOUBLE(f1, hp);
+ return res;
+ default:
+ goto badarith;
+ }
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+}
+
+Eterm
+erts_gc_mixed_times(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg1;
+ Eterm arg2;
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ Eterm hdr;
+ Eterm res;
+ FloatDef f1, f2;
+ dsize_t sz1, sz2, sz;
+ int need_heap;
+ Eterm* hp;
+
+ arg1 = reg[live];
+ arg2 = reg[live+1];
+ ERTS_FP_CHECK_INIT(p);
+ switch (arg1 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if ((arg1 == SMALL_ZERO) || (arg2 == SMALL_ZERO)) {
+ return(SMALL_ZERO);
+ } else if (arg1 == SMALL_ONE) {
+ return(arg2);
+ } else if (arg2 == SMALL_ONE) {
+ return(arg1);
+ } else {
+ Eterm big_res[3];
+
+ /*
+ * The following code is optimized for the case that
+ * result is small (which should be the most common case
+ * in practice).
+ */
+ res = small_times(signed_val(arg1), signed_val(arg2),
+ big_res);
+ if (is_small(res)) {
+ return res;
+ } else {
+ /*
+ * The result is a a big number.
+ * Allocate a heap fragment and copy the result.
+ * Be careful to allocate exactly what we need
+ * to not leave any holes.
+ */
+ Uint arity;
+ Uint need;
+
+ ASSERT(is_big(res));
+ hdr = big_res[0];
+ arity = bignum_header_arity(hdr);
+ ASSERT(arity == 1 || arity == 2);
+ need = arity + 1;
+ if (ERTS_NEED_GC(p, need)) {
+ erts_garbage_collect(p, need, reg, live);
+ }
+ hp = p->htop;
+ p->htop += need;
+ res = make_big(hp);
+ *hp++ = hdr;
+ *hp++ = big_res[1];
+ if (arity > 1) {
+ *hp = big_res[2];
+ }
+ return res;
+ }
+ }
+ default:
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ if (arg1 == SMALL_ZERO)
+ return(SMALL_ZERO);
+ if (arg1 == SMALL_ONE)
+ return(arg2);
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ sz = 2 + big_size(arg2);
+ goto do_big;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg1);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if (arg2 == SMALL_ZERO)
+ return(SMALL_ZERO);
+ if (arg2 == SMALL_ONE)
+ return(arg1);
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ sz = 2 + big_size(arg1);
+ goto do_big;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ sz1 = big_size(arg1);
+ sz2 = big_size(arg2);
+ sz = sz1 + sz2;
+
+ do_big:
+ need_heap = BIG_NEED_SIZE(sz);
+ if (ERTS_NEED_GC(p, need_heap)) {
+ erts_garbage_collect(p, need_heap, reg, live+2);
+ if (arg1 != make_big(tmp_big1)) {
+ arg1 = reg[live];
+ }
+ if (arg2 != make_big(tmp_big2)) {
+ arg2 = reg[live+1];
+ }
+ }
+ hp = p->htop;
+ p->htop += need_heap;
+ res = big_times(arg1, arg2, hp);
+ trim_heap(p, hp, res);
+
+ /*
+ * Note that the result must be big in this case, since
+ * at least one operand was big to begin with, and
+ * the absolute value of the other is > 1.
+ */
+
+ if (is_nil(res)) {
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ return res;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ GET_DOUBLE(arg2, f2);
+
+ do_float:
+ f1.fd = f1.fd * f2.fd;
+ ERTS_FP_ERROR(p, f1.fd, goto badarith);
+ if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
+ erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live);
+ }
+ hp = p->htop;
+ p->htop += FLOAT_SIZE_OBJECT;
+ res = make_float(hp);
+ PUT_DOUBLE(f1, hp);
+ return res;
+ default:
+ goto badarith;
+ }
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+}
+
+Eterm
+erts_gc_mixed_div(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg1;
+ Eterm arg2;
+ FloatDef f1, f2;
+ Eterm* hp;
+ Eterm hdr;
+
+ arg1 = reg[live];
+ arg2 = reg[live+1];
+ ERTS_FP_CHECK_INIT(p);
+ switch (arg1 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg1 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ f1.fd = signed_val(arg1);
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg1);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0 ||
+ big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ if (big_to_double(arg1, &f1.fd) < 0) {
+ goto badarith;
+ }
+ GET_DOUBLE(arg2, f2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ }
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ switch (arg2 & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ f2.fd = signed_val(arg2);
+ goto do_float;
+ default:
+ goto badarith;
+ }
+ case TAG_PRIMARY_BOXED:
+ hdr = *boxed_val(arg2);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ if (big_to_double(arg2, &f2.fd) < 0) {
+ goto badarith;
+ }
+ goto do_float;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ GET_DOUBLE(arg1, f1);
+ GET_DOUBLE(arg2, f2);
+
+ do_float:
+ f1.fd = f1.fd / f2.fd;
+ ERTS_FP_ERROR(p, f1.fd, goto badarith);
+ if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
+ erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live);
+ }
+ hp = p->htop;
+ p->htop += FLOAT_SIZE_OBJECT;
+ PUT_DOUBLE(f1, hp);
+ return make_float(hp);
+ default:
+ goto badarith;
+ }
+ default:
+ goto badarith;
+ }
+ }
+ default:
+ goto badarith;
+ }
+}
+
+Eterm
+erts_gc_int_div(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg1;
+ Eterm arg2;
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ int ires;
+
+ arg1 = reg[live];
+ arg2 = reg[live+1];
+ switch (NUMBER_CODE(arg1, arg2)) {
+ case SMALL_SMALL:
+ /* This case occurs if the most negative fixnum is divided by -1. */
+ ASSERT(arg2 == make_small(-1));
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ /*FALLTHROUGH*/
+ case BIG_SMALL:
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ goto L_big_div;
+ case SMALL_BIG:
+ if (arg1 != make_small(MIN_SMALL)) {
+ return SMALL_ZERO;
+ }
+ arg1 = small_to_big(signed_val(arg1), tmp_big1);
+ /*FALLTHROUGH*/
+ case BIG_BIG:
+ L_big_div:
+ ires = big_ucomp(arg1, arg2);
+ if (ires < 0) {
+ arg1 = SMALL_ZERO;
+ } else if (ires == 0) {
+ arg1 = (big_sign(arg1) == big_sign(arg2)) ?
+ SMALL_ONE : SMALL_MINUS_ONE;
+ } else {
+ Eterm* hp;
+ int i = big_size(arg1);
+ Uint need;
+
+ ires = big_size(arg2);
+ need = BIG_NEED_SIZE(i-ires+1) + BIG_NEED_SIZE(i);
+ if (ERTS_NEED_GC(p, need)) {
+ erts_garbage_collect(p, need, reg, live+2);
+ if (arg1 != make_big(tmp_big1)) {
+ arg1 = reg[live];
+ }
+ if (arg2 != make_big(tmp_big2)) {
+ arg2 = reg[live+1];
+ }
+ }
+ hp = p->htop;
+ p->htop += need;
+ arg1 = big_div(arg1, arg2, hp);
+ trim_heap(p, hp, arg1);
+ if (is_nil(arg1)) {
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ }
+ return arg1;
+ default:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+}
+
+Eterm
+erts_gc_int_rem(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg1;
+ Eterm arg2;
+ Eterm tmp_big1[2];
+ Eterm tmp_big2[2];
+ int ires;
+
+ arg1 = reg[live];
+ arg2 = reg[live+1];
+ switch (NUMBER_CODE(arg1, arg2)) {
+ case BIG_SMALL:
+ arg2 = small_to_big(signed_val(arg2), tmp_big2);
+ goto L_big_rem;
+ case SMALL_BIG:
+ if (arg1 != make_small(MIN_SMALL)) {
+ return arg1;
+ } else {
+ Eterm tmp = small_to_big(signed_val(arg1), tmp_big1);
+ if ((ires = big_ucomp(tmp, arg2)) == 0) {
+ return SMALL_ZERO;
+ } else {
+ ASSERT(ires < 0);
+ return arg1;
+ }
+ }
+ /* All paths returned */
+ case BIG_BIG:
+ L_big_rem:
+ ires = big_ucomp(arg1, arg2);
+ if (ires == 0) {
+ arg1 = SMALL_ZERO;
+ } else if (ires > 0) {
+ Eterm* hp;
+ Uint need = BIG_NEED_SIZE(big_size(arg1));
+
+ if (ERTS_NEED_GC(p, need)) {
+ erts_garbage_collect(p, need, reg, live+2);
+ if (arg1 != make_big(tmp_big1)) {
+ arg1 = reg[live];
+ }
+ if (arg2 != make_big(tmp_big2)) {
+ arg2 = reg[live+1];
+ }
+ }
+ hp = p->htop;
+ p->htop += need;
+ arg1 = big_rem(arg1, arg2, hp);
+ trim_heap(p, hp, arg1);
+ if (is_nil(arg1)) {
+ p->freason = SYSTEM_LIMIT;
+ return THE_NON_VALUE;
+ }
+ }
+ return arg1;
+ default:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+}
+
+#define DEFINE_GC_LOGIC_FUNC(func) \
+Eterm erts_gc_##func(Process* p, Eterm* reg, Uint live) \
+{ \
+ Eterm arg1; \
+ Eterm arg2; \
+ Eterm tmp_big1[2]; \
+ Eterm tmp_big2[2]; \
+ Eterm* hp; \
+ int need; \
+ \
+ arg1 = reg[live]; \
+ arg2 = reg[live+1]; \
+ switch (NUMBER_CODE(arg1, arg2)) { \
+ case SMALL_BIG: \
+ arg1 = small_to_big(signed_val(arg1), tmp_big1); \
+ need = BIG_NEED_SIZE(big_size(arg2) + 1); \
+ if (ERTS_NEED_GC(p, need)) { \
+ erts_garbage_collect(p, need, reg, live+2); \
+ arg2 = reg[live+1]; \
+ } \
+ break; \
+ case BIG_SMALL: \
+ arg2 = small_to_big(signed_val(arg2), tmp_big2); \
+ need = BIG_NEED_SIZE(big_size(arg1) + 1); \
+ if (ERTS_NEED_GC(p, need)) { \
+ erts_garbage_collect(p, need, reg, live+2); \
+ arg1 = reg[live]; \
+ } \
+ break; \
+ case BIG_BIG: \
+ need = BIG_NEED_SIZE(MAX(big_size(arg1), big_size(arg2)) + 1); \
+ if (ERTS_NEED_GC(p, need)) { \
+ erts_garbage_collect(p, need, reg, live+2); \
+ arg1 = reg[live]; \
+ arg2 = reg[live+1]; \
+ } \
+ break; \
+ default: \
+ p->freason = BADARITH; \
+ return THE_NON_VALUE; \
+ } \
+ hp = p->htop; \
+ p->htop += need; \
+ arg1 = big_##func(arg1, arg2, hp); \
+ trim_heap(p, hp, arg1); \
+ return arg1; \
+}
+
+DEFINE_GC_LOGIC_FUNC(band)
+DEFINE_GC_LOGIC_FUNC(bor)
+DEFINE_GC_LOGIC_FUNC(bxor)
+
+Eterm erts_gc_bnot(Process* p, Eterm* reg, Uint live)
+{
+ Eterm result;
+ Eterm arg;
+ Uint need;
+ Eterm* bigp;
+
+ arg = reg[live];
+ if (is_not_big(arg)) {
+ p->freason = BADARITH;
+ return NIL;
+ } else {
+ need = BIG_NEED_SIZE(big_size(arg)+1);
+ if (ERTS_NEED_GC(p, need)) {
+ erts_garbage_collect(p, need, reg, live+1);
+ arg = reg[live];
+ }
+ bigp = p->htop;
+ p->htop += need;
+ result = big_bnot(arg, bigp);
+ trim_heap(p, bigp, result);
+ if (is_nil(result)) {
+ p->freason = SYSTEM_LIMIT;
+ return NIL;
+ }
+ }
+ return result;
+}
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
new file mode 100644
index 0000000000..b090564649
--- /dev/null
+++ b/erts/emulator/beam/erl_async.c
@@ -0,0 +1,469 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_sys_driver.h"
+#include "global.h"
+#include "erl_threads.h"
+
+typedef struct _erl_async {
+ struct _erl_async* next;
+ struct _erl_async* prev;
+ DE_Handle* hndl; /* The DE_Handle is needed when port is gone */
+ Eterm port;
+ long async_id;
+ void* async_data;
+ ErlDrvPDL pdl;
+ void (*async_invoke)(void*);
+ void (*async_free)(void*);
+} ErlAsync;
+
+typedef struct {
+ erts_mtx_t mtx;
+ erts_cnd_t cv;
+ erts_tid_t thr;
+ int len;
+#ifndef ERTS_SMP
+ int hndl;
+#endif
+ ErlAsync* head;
+ ErlAsync* tail;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ int no;
+#endif
+} AsyncQueue;
+
+static erts_smp_spinlock_t async_id_lock;
+static long async_id = 0;
+
+
+#ifndef ERTS_SMP
+
+erts_mtx_t async_ready_mtx;
+static ErlAsync* async_ready_list = NULL;
+
+#endif
+
+/*
+** Initialize worker threads (if supported)
+*/
+
+/* Detach from driver */
+static void async_detach(DE_Handle* dh)
+{
+ /* XXX:PaN what should happen here? we want to unload the driver or??? */
+ return;
+}
+
+
+#ifdef USE_THREADS
+
+static AsyncQueue* async_q;
+
+static void* async_main(void*);
+static void async_add(ErlAsync*, AsyncQueue*);
+
+#ifndef ERTS_SMP
+typedef struct ErtsAsyncReadyCallback_ ErtsAsyncReadyCallback;
+struct ErtsAsyncReadyCallback_ {
+ struct ErtsAsyncReadyCallback_ *next;
+ void (*callback)(void);
+};
+
+static ErtsAsyncReadyCallback *callbacks;
+static int async_handle;
+
+int erts_register_async_ready_callback(void (*funcp)(void))
+{
+ ErtsAsyncReadyCallback *cb = erts_alloc(ERTS_ALC_T_ARCALLBACK,
+ sizeof(ErtsAsyncReadyCallback));
+ cb->next = callbacks;
+ cb->callback = funcp;
+ erts_mtx_lock(&async_ready_mtx);
+ callbacks = cb;
+ erts_mtx_unlock(&async_ready_mtx);
+ return async_handle;
+}
+#endif
+
+int init_async(int hndl)
+{
+ erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
+ AsyncQueue* q;
+ int i;
+
+ thr_opts.detached = 0;
+ thr_opts.suggested_stack_size = erts_async_thread_suggested_stack_size;
+
+#ifndef ERTS_SMP
+ callbacks = NULL;
+ async_handle = hndl;
+ erts_mtx_init(&async_ready_mtx, "async_ready");
+ async_ready_list = NULL;
+#endif
+
+ async_id = 0;
+ erts_smp_spinlock_init(&async_id_lock, "async_id");
+
+ async_q = q = (AsyncQueue*)
+ (erts_async_max_threads
+ ? erts_alloc(ERTS_ALC_T_ASYNC_Q,
+ erts_async_max_threads * sizeof(AsyncQueue))
+ : NULL);
+ for (i = 0; i < erts_async_max_threads; i++) {
+ q->head = NULL;
+ q->tail = NULL;
+ q->len = 0;
+#ifndef ERTS_SMP
+ q->hndl = hndl;
+#endif
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ q->no = i;
+#endif
+ erts_mtx_init(&q->mtx, "asyncq");
+ erts_cnd_init(&q->cv);
+ erts_thr_create(&q->thr, async_main, (void*)q, &thr_opts);
+ q++;
+ }
+ return 0;
+}
+
+
+int exit_async()
+{
+ int i;
+
+ /* terminate threads */
+ for (i = 0; i < erts_async_max_threads; i++) {
+ ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC,
+ sizeof(ErlAsync));
+ a->port = NIL;
+ async_add(a, &async_q[i]);
+ }
+
+ for (i = 0; i < erts_async_max_threads; i++) {
+ erts_thr_join(async_q[i].thr, NULL);
+ erts_mtx_destroy(&async_q[i].mtx);
+ erts_cnd_destroy(&async_q[i].cv);
+ }
+#ifndef ERTS_SMP
+ erts_mtx_destroy(&async_ready_mtx);
+#endif
+ if (async_q)
+ erts_free(ERTS_ALC_T_ASYNC_Q, (void *) async_q);
+ return 0;
+}
+
+
+static void async_add(ErlAsync* a, AsyncQueue* q)
+{
+ /* XXX:PaN Is this still necessary when ports lock drivers? */
+ if (is_internal_port(a->port)) {
+ ERTS_LC_ASSERT(erts_drvportid2port(a->port));
+ /* make sure the driver will stay around */
+ driver_lock_driver(internal_port_index(a->port));
+ }
+
+ erts_mtx_lock(&q->mtx);
+
+ if (q->len == 0) {
+ q->head = a;
+ q->tail = a;
+ q->len = 1;
+ erts_cnd_signal(&q->cv);
+ }
+ else { /* no need to signal (since the worker is working) */
+ a->next = q->head;
+ q->head->prev = a;
+ q->head = a;
+ q->len++;
+ }
+ erts_mtx_unlock(&q->mtx);
+}
+
+static ErlAsync* async_get(AsyncQueue* q)
+{
+ ErlAsync* a;
+
+ erts_mtx_lock(&q->mtx);
+ while((a = q->tail) == NULL) {
+ erts_cnd_wait(&q->cv, &q->mtx);
+ }
+#ifdef ERTS_SMP
+ ASSERT(a && q->tail == a);
+#endif
+ if (q->head == q->tail) {
+ q->head = q->tail = NULL;
+ q->len = 0;
+ }
+ else {
+ q->tail->prev->next = NULL;
+ q->tail = q->tail->prev;
+ q->len--;
+ }
+ erts_mtx_unlock(&q->mtx);
+ return a;
+}
+
+
+static int async_del(long id)
+{
+ int i;
+ /* scan all queue for an entry with async_id == 'id' */
+
+ for (i = 0; i < erts_async_max_threads; i++) {
+ ErlAsync* a;
+ erts_mtx_lock(&async_q[i].mtx);
+
+ a = async_q[i].head;
+ while(a != NULL) {
+ if (a->async_id == id) {
+ if (a->prev != NULL)
+ a->prev->next = a->next;
+ else
+ async_q[i].head = a->next;
+ if (a->next != NULL)
+ a->next->prev = a->prev;
+ else
+ async_q[i].tail = a->prev;
+ async_q[i].len--;
+ erts_mtx_unlock(&async_q[i].mtx);
+ if (a->async_free != NULL)
+ a->async_free(a->async_data);
+ async_detach(a->hndl);
+ erts_free(ERTS_ALC_T_ASYNC, a);
+ return 1;
+ }
+ }
+ erts_mtx_unlock(&async_q[i].mtx);
+ }
+ return 0;
+}
+
+static void* async_main(void* arg)
+{
+ AsyncQueue* q = (AsyncQueue*) arg;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[27];
+ erts_snprintf(&buf[0], 27, "async %d", q->no);
+ erts_lc_set_thread_name(&buf[0]);
+ }
+#endif
+
+ while(1) {
+ ErlAsync* a = async_get(q);
+
+ if (a->port == NIL) { /* TIME TO DIE SIGNAL */
+ erts_free(ERTS_ALC_T_ASYNC, (void *) a);
+ break;
+ }
+ else {
+ (*a->async_invoke)(a->async_data);
+ /* Major problem if the code for async_invoke
+ or async_free is removed during a blocking operation */
+#ifdef ERTS_SMP
+ {
+ Port *p;
+ p = erts_id2port_sflgs(a->port,
+ NULL,
+ 0,
+ ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+ if (!p) {
+ if (a->async_free)
+ (*a->async_free)(a->async_data);
+ }
+ else {
+ if (async_ready(p, a->async_data)) {
+ if (a->async_free)
+ (*a->async_free)(a->async_data);
+ }
+ async_detach(a->hndl);
+ erts_port_release(p);
+ }
+ if (a->pdl) {
+ driver_pdl_dec_refc(a->pdl);
+ }
+ erts_free(ERTS_ALC_T_ASYNC, (void *) a);
+ }
+#else
+ if (a->pdl) {
+ driver_pdl_dec_refc(a->pdl);
+ }
+ erts_mtx_lock(&async_ready_mtx);
+ a->next = async_ready_list;
+ async_ready_list = a;
+ erts_mtx_unlock(&async_ready_mtx);
+ sys_async_ready(q->hndl);
+#endif
+ }
+ }
+
+ return NULL;
+}
+
+
+#endif
+
+#ifndef ERTS_SMP
+
+int check_async_ready(void)
+{
+#ifdef USE_THREADS
+ ErtsAsyncReadyCallback *cbs;
+#endif
+ ErlAsync* a;
+ int count = 0;
+
+ erts_mtx_lock(&async_ready_mtx);
+ a = async_ready_list;
+ async_ready_list = NULL;
+#ifdef USE_THREADS
+ cbs = callbacks;
+#endif
+ erts_mtx_unlock(&async_ready_mtx);
+
+ while(a != NULL) {
+ ErlAsync* a_next = a->next;
+ /* Every port not dead */
+ Port *p = erts_id2port_sflgs(a->port,
+ NULL,
+ 0,
+ ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+ if (!p) {
+ if (a->async_free)
+ (*a->async_free)(a->async_data);
+ }
+ else {
+ count++;
+ if (async_ready(p, a->async_data)) {
+ if (a->async_free != NULL)
+ (*a->async_free)(a->async_data);
+ }
+ async_detach(a->hndl);
+ erts_port_release(p);
+ }
+ erts_free(ERTS_ALC_T_ASYNC, (void *) a);
+ a = a_next;
+ }
+#ifdef USE_THREADS
+ for (; cbs; cbs = cbs->next)
+ (*cbs->callback)();
+#endif
+ return count;
+}
+
+#endif
+
+
+/*
+** Schedule async_invoke on a worker thread
+** NOTE will be syncrounous when threads are unsupported
+** return values:
+** 0 completed
+** -1 error
+** N handle value (used with async_cancel)
+** arguments:
+** ix driver index
+** key pointer to secedule queue (NULL means round robin)
+** async_invoke function to run in thread
+** async_data data to pass to invoke function
+** async_free function for relase async_data in case of failure
+*/
+long driver_async(ErlDrvPort ix, unsigned int* key,
+ void (*async_invoke)(void*), void* async_data,
+ void (*async_free)(void*))
+{
+ ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErlAsync));
+ Port* prt = erts_drvport2port(ix);
+ long id;
+ unsigned int qix;
+
+
+ if (!prt)
+ return -1;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ a->next = NULL;
+ a->prev = NULL;
+ a->hndl = (DE_Handle*)prt->drv_ptr->handle;
+ a->port = prt->id;
+ a->pdl = NULL;
+ a->async_data = async_data;
+ a->async_invoke = async_invoke;
+ a->async_free = async_free;
+
+ erts_smp_spin_lock(&async_id_lock);
+ async_id = (async_id + 1) & 0x7fffffff;
+ if (async_id == 0)
+ async_id++;
+ id = async_id;
+ erts_smp_spin_unlock(&async_id_lock);
+
+ a->async_id = id;
+
+ if (key == NULL) {
+ qix = (erts_async_max_threads > 0)
+ ? (id % erts_async_max_threads) : 0;
+ }
+ else {
+ qix = (erts_async_max_threads > 0) ?
+ (*key % erts_async_max_threads) : 0;
+ *key = qix;
+ }
+#ifdef USE_THREADS
+ if (erts_async_max_threads > 0) {
+ if (prt->port_data_lock) {
+ driver_pdl_inc_refc(prt->port_data_lock);
+ a->pdl = prt->port_data_lock;
+ }
+ async_add(a, &async_q[qix]);
+ return id;
+ }
+#endif
+
+ (*a->async_invoke)(a->async_data);
+
+ if (async_ready(prt, a->async_data)) {
+ if (a->async_free != NULL)
+ (*a->async_free)(a->async_data);
+ }
+ erts_free(ERTS_ALC_T_ASYNC, (void *) a);
+
+ return id;
+}
+
+int driver_async_cancel(unsigned int id)
+{
+#ifdef USE_THREADS
+ if (erts_async_max_threads > 0)
+ return async_del(id);
+#endif
+ return 0;
+}
+
+
+
+
+
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
new file mode 100644
index 0000000000..3035e5df16
--- /dev/null
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -0,0 +1,1161 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description: A combined "address order best fit"/"best fit" allocator
+ * based on a Red-Black (binary search) Tree. The search,
+ * insert, and delete operations are all O(log n) operations
+ * on a Red-Black Tree. In the "address order best fit" case
+ * n equals number of free blocks, and in the "best fit" case
+ * n equals number of distinct sizes of free blocks. Red-Black
+ * Trees are described in "Introduction to Algorithms", by
+ * Thomas H. Cormen, Charles E. Leiserson, and
+ * Ronald L. Riverest.
+ *
+ * This module is a callback-module for erl_alloc_util.c
+ *
+ * Author: Rickard Green
+ */
+
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "global.h"
+#define GET_ERL_BF_ALLOC_IMPL
+#include "erl_bestfit_alloc.h"
+
+#ifdef DEBUG
+#if 0
+#define HARD_DEBUG
+#endif
+#else
+#undef HARD_DEBUG
+#endif
+
+#define MIN_MBC_SZ (16*1024)
+#define MIN_MBC_FIRST_FREE_SZ (4*1024)
+
+#define TREE_NODE_FLG (((Uint) 1) << 0)
+#define RED_FLG (((Uint) 1) << 1)
+#ifdef HARD_DEBUG
+# define LEFT_VISITED_FLG (((Uint) 1) << 2)
+# define RIGHT_VISITED_FLG (((Uint) 1) << 3)
+#endif
+
+#define IS_TREE_NODE(N) (((RBTree_t *) (N))->flags & TREE_NODE_FLG)
+#define IS_LIST_ELEM(N) (!IS_TREE_NODE(((RBTree_t *) (N))))
+
+#define SET_TREE_NODE(N) (((RBTree_t *) (N))->flags |= TREE_NODE_FLG)
+#define SET_LIST_ELEM(N) (((RBTree_t *) (N))->flags &= ~TREE_NODE_FLG)
+
+#define IS_RED(N) (((RBTree_t *) (N)) \
+ && ((RBTree_t *) (N))->flags & RED_FLG)
+#define IS_BLACK(N) (!IS_RED(((RBTree_t *) (N))))
+
+#define SET_RED(N) (((RBTree_t *) (N))->flags |= RED_FLG)
+#define SET_BLACK(N) (((RBTree_t *) (N))->flags &= ~RED_FLG)
+
+#undef ASSERT
+#define ASSERT ASSERT_EXPR
+
+#if 1
+#define RBT_ASSERT ASSERT
+#else
+#define RBT_ASSERT(x)
+#endif
+
+
+#ifdef HARD_DEBUG
+static RBTree_t * check_tree(BFAllctr_t *, Uint);
+#endif
+
+static void tree_delete(Allctr_t *allctr, Block_t *del);
+
+/* Prototypes of callback functions */
+
+/* "address order best fit" specific callback functions */
+static Block_t * aobf_get_free_block (Allctr_t *, Uint,
+ Block_t *, Uint);
+static void aobf_link_free_block (Allctr_t *, Block_t *);
+#define aobf_unlink_free_block tree_delete
+
+/* "best fit" specific callback functions */
+static Block_t * bf_get_free_block (Allctr_t *, Uint,
+ Block_t *, Uint);
+static void bf_link_free_block (Allctr_t *, Block_t *);
+static ERTS_INLINE void bf_unlink_free_block (Allctr_t *, Block_t *);
+
+
+static Eterm info_options (Allctr_t *, char *, int *,
+ void *, Uint **, Uint *);
+static void init_atoms (void);
+
+/* Types... */
+struct RBTree_t_ {
+ Block_t hdr;
+ Uint flags;
+ RBTree_t *parent;
+ RBTree_t *left;
+ RBTree_t *right;
+};
+
+typedef struct {
+ RBTree_t t;
+ RBTree_t *next;
+} RBTreeList_t;
+
+#define LIST_NEXT(N) (((RBTreeList_t *) (N))->next)
+#define LIST_PREV(N) (((RBTreeList_t *) (N))->t.parent)
+
+
+#ifdef DEBUG
+
+/* Destroy all tree fields */
+#define DESTROY_TREE_NODE(N) \
+ sys_memset((void *) (((Block_t *) (N)) + 1), \
+ 0xff, \
+ (sizeof(RBTree_t) - sizeof(Block_t)))
+
+/* Destroy all tree and list fields */
+#define DESTROY_LIST_ELEM(N) \
+ sys_memset((void *) (((Block_t *) (N)) + 1), \
+ 0xff, \
+ (sizeof(RBTreeList_t) - sizeof(Block_t)))
+
+#else
+
+#define DESTROY_TREE_NODE(N)
+#define DESTROY_LIST_ELEM(N)
+
+#endif
+
+
+static int atoms_initialized = 0;
+
+void
+erts_bfalc_init(void)
+{
+ atoms_initialized = 0;
+}
+
+Allctr_t *
+erts_bfalc_start(BFAllctr_t *bfallctr,
+ BFAllctrInit_t *bfinit,
+ AllctrInit_t *init)
+{
+ BFAllctr_t nulled_state = {{0}};
+ /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
+ warning. gcc warns if {0} is used as initializer of a struct when
+ the first member is a struct (not if, for example, the third member
+ is a struct). */
+ Allctr_t *allctr = (Allctr_t *) bfallctr;
+
+ sys_memcpy((void *) bfallctr, (void *) &nulled_state, sizeof(BFAllctr_t));
+
+ bfallctr->address_order = bfinit->ao;
+
+
+ allctr->mbc_header_size = sizeof(Carrier_t);
+ allctr->min_mbc_size = MIN_MBC_SZ;
+ allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
+ allctr->min_block_size = (bfinit->ao
+ ? sizeof(RBTree_t)
+ : sizeof(RBTreeList_t));
+
+ allctr->vsn_str = (bfinit->ao
+ ? ERTS_ALC_AOBF_ALLOC_VSN_STR
+ : ERTS_ALC_BF_ALLOC_VSN_STR);
+
+
+ /* Callback functions */
+
+ if (bfinit->ao) {
+ allctr->get_free_block = aobf_get_free_block;
+ allctr->link_free_block = aobf_link_free_block;
+ allctr->unlink_free_block = aobf_unlink_free_block;
+ }
+ else {
+ allctr->get_free_block = bf_get_free_block;
+ allctr->link_free_block = bf_link_free_block;
+ allctr->unlink_free_block = bf_unlink_free_block;
+ }
+ allctr->info_options = info_options;
+
+ allctr->get_next_mbc_size = NULL;
+ allctr->creating_mbc = NULL;
+ allctr->destroying_mbc = NULL;
+ allctr->init_atoms = init_atoms;
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ allctr->check_block = NULL;
+ allctr->check_mbc = NULL;
+#endif
+
+ allctr->atoms_initialized = 0;
+
+ if (!erts_alcu_start(allctr, init))
+ return NULL;
+
+ return allctr;
+}
+
+/*
+ * Red-Black Tree operations needed
+ */
+
+static ERTS_INLINE void
+left_rotate(RBTree_t **root, RBTree_t *x)
+{
+ RBTree_t *y = x->right;
+ x->right = y->left;
+ if (y->left)
+ y->left->parent = x;
+ y->parent = x->parent;
+ if (!y->parent) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == x->parent->left)
+ x->parent->left = y;
+ else {
+ RBT_ASSERT(x == x->parent->right);
+ x->parent->right = y;
+ }
+ y->left = x;
+ x->parent = y;
+}
+
+static ERTS_INLINE void
+right_rotate(RBTree_t **root, RBTree_t *x)
+{
+ RBTree_t *y = x->left;
+ x->left = y->right;
+ if (y->right)
+ y->right->parent = x;
+ y->parent = x->parent;
+ if (!y->parent) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == x->parent->right)
+ x->parent->right = y;
+ else {
+ RBT_ASSERT(x == x->parent->left);
+ x->parent->left = y;
+ }
+ y->right = x;
+ x->parent = y;
+}
+
+
+/*
+ * Replace node x with node y
+ * NOTE: block header of y is not changed
+ */
+static ERTS_INLINE void
+replace(RBTree_t **root, RBTree_t *x, RBTree_t *y)
+{
+
+ if (!x->parent) {
+ RBT_ASSERT(*root == x);
+ *root = y;
+ }
+ else if (x == x->parent->left)
+ x->parent->left = y;
+ else {
+ RBT_ASSERT(x == x->parent->right);
+ x->parent->right = y;
+ }
+ if (x->left) {
+ RBT_ASSERT(x->left->parent == x);
+ x->left->parent = y;
+ }
+ if (x->right) {
+ RBT_ASSERT(x->right->parent == x);
+ x->right->parent = y;
+ }
+
+ y->flags = x->flags;
+ y->parent = x->parent;
+ y->right = x->right;
+ y->left = x->left;
+
+ DESTROY_TREE_NODE(x);
+
+}
+
+static void
+tree_insert_fixup(BFAllctr_t *bfallctr, RBTree_t *blk)
+{
+ RBTree_t *x = blk, *y;
+
+ /*
+ * Rearrange the tree so that it satisfies the Red-Black Tree properties
+ */
+
+ RBT_ASSERT(x != bfallctr->root && IS_RED(x->parent));
+ do {
+
+ /*
+ * x and its parent are both red. Move the red pair up the tree
+ * until we get to the root or until we can separate them.
+ */
+
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_BLACK(x->parent->parent));
+ RBT_ASSERT(x->parent->parent);
+
+ if (x->parent == x->parent->parent->left) {
+ y = x->parent->parent->right;
+ if (IS_RED(y)) {
+ SET_BLACK(y);
+ x = x->parent;
+ SET_BLACK(x);
+ x = x->parent;
+ SET_RED(x);
+ }
+ else {
+
+ if (x == x->parent->right) {
+ x = x->parent;
+ left_rotate(&bfallctr->root, x);
+ }
+
+ RBT_ASSERT(x == x->parent->parent->left->left);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent));
+ RBT_ASSERT(IS_BLACK(x->parent->parent));
+ RBT_ASSERT(IS_BLACK(y));
+
+ SET_BLACK(x->parent);
+ SET_RED(x->parent->parent);
+ right_rotate(&bfallctr->root, x->parent->parent);
+
+ RBT_ASSERT(x == x->parent->left);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent->right));
+ RBT_ASSERT(IS_BLACK(x->parent));
+ break;
+ }
+ }
+ else {
+ RBT_ASSERT(x->parent == x->parent->parent->right);
+ y = x->parent->parent->left;
+ if (IS_RED(y)) {
+ SET_BLACK(y);
+ x = x->parent;
+ SET_BLACK(x);
+ x = x->parent;
+ SET_RED(x);
+ }
+ else {
+
+ if (x == x->parent->left) {
+ x = x->parent;
+ right_rotate(&bfallctr->root, x);
+ }
+
+ RBT_ASSERT(x == x->parent->parent->right->right);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent));
+ RBT_ASSERT(IS_BLACK(x->parent->parent));
+ RBT_ASSERT(IS_BLACK(y));
+
+ SET_BLACK(x->parent);
+ SET_RED(x->parent->parent);
+ left_rotate(&bfallctr->root, x->parent->parent);
+
+ RBT_ASSERT(x == x->parent->right);
+ RBT_ASSERT(IS_RED(x));
+ RBT_ASSERT(IS_RED(x->parent->left));
+ RBT_ASSERT(IS_BLACK(x->parent));
+ break;
+ }
+ }
+ } while (x != bfallctr->root && IS_RED(x->parent));
+
+ SET_BLACK(bfallctr->root);
+
+}
+
+/*
+ * The argument types of "Allctr_t *" and "Block_t *" have been
+ * chosen since we then can use tree_delete() as unlink_free_block
+ * callback function in the address order case.
+ */
+static void
+tree_delete(Allctr_t *allctr, Block_t *del)
+{
+ BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ Uint spliced_is_black;
+ RBTree_t *x, *y, *z = (RBTree_t *) del;
+ RBTree_t null_x; /* null_x is used to get the fixup started when we
+ splice out a node without children. */
+
+ null_x.parent = NULL;
+
+#ifdef HARD_DEBUG
+ check_tree(bfallctr, 0);
+#endif
+
+ /* Remove node from tree... */
+
+ /* Find node to splice out */
+ if (!z->left || !z->right)
+ y = z;
+ else
+ /* Set y to z:s successor */
+ for(y = z->right; y->left; y = y->left);
+ /* splice out y */
+ x = y->left ? y->left : y->right;
+ spliced_is_black = IS_BLACK(y);
+ if (x) {
+ x->parent = y->parent;
+ }
+ else if (!x && spliced_is_black) {
+ x = &null_x;
+ x->flags = 0;
+ SET_BLACK(x);
+ x->right = x->left = NULL;
+ x->parent = y->parent;
+ y->left = x;
+ }
+
+ if (!y->parent) {
+ RBT_ASSERT(bfallctr->root == y);
+ bfallctr->root = x;
+ }
+ else if (y == y->parent->left)
+ y->parent->left = x;
+ else {
+ RBT_ASSERT(y == y->parent->right);
+ y->parent->right = x;
+ }
+ if (y != z) {
+ /* We spliced out the successor of z; replace z by the successor */
+ replace(&bfallctr->root, z, y);
+ }
+
+ if (spliced_is_black) {
+ /* We removed a black node which makes the resulting tree
+ violate the Red-Black Tree properties. Fixup tree... */
+
+ while (IS_BLACK(x) && x->parent) {
+
+ /*
+ * x has an "extra black" which we move up the tree
+ * until we reach the root or until we can get rid of it.
+ *
+ * y is the sibbling of x
+ */
+
+ if (x == x->parent->left) {
+ y = x->parent->right;
+ RBT_ASSERT(y);
+ if (IS_RED(y)) {
+ RBT_ASSERT(y->right);
+ RBT_ASSERT(y->left);
+ SET_BLACK(y);
+ RBT_ASSERT(IS_BLACK(x->parent));
+ SET_RED(x->parent);
+ left_rotate(&bfallctr->root, x->parent);
+ y = x->parent->right;
+ }
+ RBT_ASSERT(y);
+ RBT_ASSERT(IS_BLACK(y));
+ if (IS_BLACK(y->left) && IS_BLACK(y->right)) {
+ SET_RED(y);
+ x = x->parent;
+ }
+ else {
+ if (IS_BLACK(y->right)) {
+ SET_BLACK(y->left);
+ SET_RED(y);
+ right_rotate(&bfallctr->root, y);
+ y = x->parent->right;
+ }
+ RBT_ASSERT(y);
+ if (IS_RED(x->parent)) {
+
+ SET_BLACK(x->parent);
+ SET_RED(y);
+ }
+ RBT_ASSERT(y->right);
+ SET_BLACK(y->right);
+ left_rotate(&bfallctr->root, x->parent);
+ x = bfallctr->root;
+ break;
+ }
+ }
+ else {
+ RBT_ASSERT(x == x->parent->right);
+ y = x->parent->left;
+ RBT_ASSERT(y);
+ if (IS_RED(y)) {
+ RBT_ASSERT(y->right);
+ RBT_ASSERT(y->left);
+ SET_BLACK(y);
+ RBT_ASSERT(IS_BLACK(x->parent));
+ SET_RED(x->parent);
+ right_rotate(&bfallctr->root, x->parent);
+ y = x->parent->left;
+ }
+ RBT_ASSERT(y);
+ RBT_ASSERT(IS_BLACK(y));
+ if (IS_BLACK(y->right) && IS_BLACK(y->left)) {
+ SET_RED(y);
+ x = x->parent;
+ }
+ else {
+ if (IS_BLACK(y->left)) {
+ SET_BLACK(y->right);
+ SET_RED(y);
+ left_rotate(&bfallctr->root, y);
+ y = x->parent->left;
+ }
+ RBT_ASSERT(y);
+ if (IS_RED(x->parent)) {
+ SET_BLACK(x->parent);
+ SET_RED(y);
+ }
+ RBT_ASSERT(y->left);
+ SET_BLACK(y->left);
+ right_rotate(&bfallctr->root, x->parent);
+ x = bfallctr->root;
+ break;
+ }
+ }
+ }
+ SET_BLACK(x);
+
+ if (null_x.parent) {
+ if (null_x.parent->left == &null_x)
+ null_x.parent->left = NULL;
+ else {
+ RBT_ASSERT(null_x.parent->right == &null_x);
+ null_x.parent->right = NULL;
+ }
+ RBT_ASSERT(!null_x.left);
+ RBT_ASSERT(!null_x.right);
+ }
+ else if (bfallctr->root == &null_x) {
+ bfallctr->root = NULL;
+ RBT_ASSERT(!null_x.left);
+ RBT_ASSERT(!null_x.right);
+ }
+ }
+
+
+ DESTROY_TREE_NODE(del);
+
+#ifdef HARD_DEBUG
+ check_tree(bfallctr, 0);
+#endif
+
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * "Address order best fit" specific callbacks. *
+\* */
+
+static void
+aobf_link_free_block(Allctr_t *allctr, Block_t *block)
+{
+ BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ RBTree_t *blk = (RBTree_t *) block;
+ Uint blk_sz = BLK_SZ(blk);
+
+ blk->flags = 0;
+ blk->left = NULL;
+ blk->right = NULL;
+
+ if (!bfallctr->root) {
+ blk->parent = NULL;
+ SET_BLACK(blk);
+ bfallctr->root = blk;
+ }
+ else {
+ RBTree_t *x = bfallctr->root;
+ while (1) {
+ Uint size;
+
+ size = BLK_SZ(x);
+
+ if (blk_sz < size || (blk_sz == size && blk < x)) {
+ if (!x->left) {
+ blk->parent = x;
+ x->left = blk;
+ break;
+ }
+ x = x->left;
+ }
+ else {
+ if (!x->right) {
+ blk->parent = x;
+ x->right = blk;
+ break;
+ }
+ x = x->right;
+ }
+
+ }
+
+ /* Insert block into size tree */
+ RBT_ASSERT(blk->parent);
+
+ SET_RED(blk);
+ if (IS_RED(blk->parent))
+ tree_insert_fixup(bfallctr, blk);
+ }
+
+#ifdef HARD_DEBUG
+ check_tree(bfallctr, 0);
+#endif
+}
+
+#if 0 /* tree_delete() is directly used instead */
+static void
+aobf_unlink_free_block(Allctr_t *allctr, Block_t *block)
+{
+ tree_delete(allctr, block);
+}
+#endif
+
+static Block_t *
+aobf_get_free_block(Allctr_t *allctr, Uint size,
+ Block_t *cand_blk, Uint cand_size)
+{
+ BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ RBTree_t *x = bfallctr->root;
+ RBTree_t *blk = NULL;
+ Uint blk_sz;
+
+ ASSERT(!cand_blk || cand_size >= size);
+
+ while (x) {
+ blk_sz = BLK_SZ(x);
+ if (blk_sz < size) {
+ x = x->right;
+ }
+ else {
+ blk = x;
+ x = x->left;
+ }
+ }
+
+ if (!blk)
+ return NULL;
+
+#ifdef HARD_DEBUG
+ ASSERT(blk == check_tree(bfallctr, size));
+#endif
+
+ if (cand_blk) {
+ blk_sz = BLK_SZ(blk);
+ if (cand_size < blk_sz)
+ return NULL; /* cand_blk was better */
+ if (cand_size == blk_sz && ((void *) cand_blk) < ((void *) blk))
+ return NULL; /* cand_blk was better */
+ }
+
+ aobf_unlink_free_block(allctr, (Block_t *) blk);
+
+ return (Block_t *) blk;
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * "Best fit" specific callbacks. *
+\* */
+
+static void
+bf_link_free_block(Allctr_t *allctr, Block_t *block)
+{
+ BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ RBTree_t *blk = (RBTree_t *) block;
+ Uint blk_sz = BLK_SZ(blk);
+
+ SET_TREE_NODE(blk);
+
+
+ blk->flags = 0;
+ blk->left = NULL;
+ blk->right = NULL;
+
+ if (!bfallctr->root) {
+ blk->parent = NULL;
+ SET_BLACK(blk);
+ bfallctr->root = blk;
+ }
+ else {
+ RBTree_t *x = bfallctr->root;
+ while (1) {
+ Uint size;
+
+ size = BLK_SZ(x);
+
+ if (blk_sz == size) {
+
+ SET_LIST_ELEM(blk);
+ LIST_NEXT(blk) = LIST_NEXT(x);
+ LIST_PREV(blk) = x;
+ if (LIST_NEXT(x))
+ LIST_PREV(LIST_NEXT(x)) = blk;
+ LIST_NEXT(x) = blk;
+
+ return; /* Finnished */
+ }
+ else if (blk_sz < size) {
+ if (!x->left) {
+ blk->parent = x;
+ x->left = blk;
+ break;
+ }
+ x = x->left;
+ }
+ else {
+ if (!x->right) {
+ blk->parent = x;
+ x->right = blk;
+ break;
+ }
+ x = x->right;
+ }
+ }
+
+ RBT_ASSERT(blk->parent);
+
+ SET_RED(blk);
+ if (IS_RED(blk->parent))
+ tree_insert_fixup(bfallctr, blk);
+
+ }
+
+ SET_TREE_NODE(blk);
+ LIST_NEXT(blk) = NULL;
+
+#ifdef HARD_DEBUG
+ check_tree(bfallctr, 0);
+#endif
+}
+
+static ERTS_INLINE void
+bf_unlink_free_block(Allctr_t *allctr, Block_t *block)
+{
+ BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ RBTree_t *x = (RBTree_t *) block;
+
+ if (IS_LIST_ELEM(x)) {
+ /* Remove from list */
+ ASSERT(LIST_PREV(x));
+ LIST_NEXT(LIST_PREV(x)) = LIST_NEXT(x);
+ if (LIST_NEXT(x))
+ LIST_PREV(LIST_NEXT(x)) = LIST_PREV(x);
+ }
+ else if (LIST_NEXT(x)) {
+ /* Replace tree node by next element in list... */
+
+ ASSERT(BLK_SZ(LIST_NEXT(x)) == BLK_SZ(x));
+ ASSERT(IS_TREE_NODE(x));
+ ASSERT(IS_LIST_ELEM(LIST_NEXT(x)));
+
+#ifdef HARD_DEBUG
+ check_tree(bfallctr, 0);
+#endif
+ replace(&bfallctr->root, x, LIST_NEXT(x));
+
+#ifdef HARD_DEBUG
+ check_tree(bfallctr, 0);
+#endif
+ }
+ else {
+ /* Remove from tree */
+ tree_delete(allctr, block);
+ }
+
+ DESTROY_LIST_ELEM(x);
+}
+
+
+static Block_t *
+bf_get_free_block(Allctr_t *allctr, Uint size,
+ Block_t *cand_blk, Uint cand_size)
+{
+ BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ RBTree_t *x = bfallctr->root;
+ RBTree_t *blk = NULL;
+ Uint blk_sz;
+
+ ASSERT(!cand_blk || cand_size >= size);
+
+ while (x) {
+ blk_sz = BLK_SZ(x);
+ if (blk_sz < size) {
+ x = x->right;
+ }
+ else {
+ blk = x;
+ if (blk_sz == size)
+ break;
+ x = x->left;
+ }
+ }
+
+ if (!blk)
+ return NULL;
+
+ ASSERT(IS_TREE_NODE(blk));
+
+
+#ifdef HARD_DEBUG
+ {
+ RBTree_t *ct_blk = check_tree(bfallctr, size);
+ ASSERT(BLK_SZ(ct_blk) == BLK_SZ(blk));
+ }
+#endif
+
+ if (cand_blk && cand_size <= BLK_SZ(blk))
+ return NULL; /* cand_blk was better */
+
+ /* Use next block if it exist in order to avoid replacing
+ the tree node */
+ blk = LIST_NEXT(blk) ? LIST_NEXT(blk) : blk;
+
+ bf_unlink_free_block(allctr, (Block_t *) blk);
+ return (Block_t *) blk;
+}
+
+
+/*
+ * info_options()
+ */
+
+static struct {
+ Eterm as;
+ Eterm aobf;
+ Eterm bf;
+#ifdef DEBUG
+ Eterm end_of_atoms;
+#endif
+} am;
+
+static void ERTS_INLINE atom_init(Eterm *atom, char *name)
+{
+ *atom = am_atom_put(name, strlen(name));
+}
+#define AM_INIT(AM) atom_init(&am.AM, #AM)
+
+static void
+init_atoms(void)
+{
+#ifdef DEBUG
+ Eterm *atom;
+#endif
+
+ if (atoms_initialized)
+ return;
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
+ *atom = THE_NON_VALUE;
+ }
+#endif
+ AM_INIT(as);
+ AM_INIT(aobf);
+ AM_INIT(bf);
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
+ ASSERT(*atom != THE_NON_VALUE);
+ }
+#endif
+
+ atoms_initialized = 1;
+}
+
+
+#define bld_uint erts_bld_uint
+#define bld_cons erts_bld_cons
+#define bld_tuple erts_bld_tuple
+
+static ERTS_INLINE void
+add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
+{
+ *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 2, el1, el2), *lp);
+}
+
+static Eterm
+info_options(Allctr_t *allctr,
+ char *prefix,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ BFAllctr_t *bfallctr = (BFAllctr_t *) allctr;
+ Eterm res = THE_NON_VALUE;
+
+ if (print_to_p) {
+ erts_print(*print_to_p,
+ print_to_arg,
+ "%sas: %s\n",
+ prefix,
+ bfallctr->address_order ? "aobf" : "bf");
+ }
+
+ if (hpp || szp) {
+
+ if (!atoms_initialized)
+ erl_exit(1, "%s:%d: Internal error: Atoms not initialized",
+ __FILE__, __LINE__);;
+
+ res = NIL;
+ add_2tup(hpp, szp, &res,
+ am.as,
+ bfallctr->address_order ? am.aobf : am.bf);
+ }
+
+ return res;
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * NOTE: erts_bfalc_test() is only supposed to be used for testing. *
+ * *
+ * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
+ * to erts_bfalc_test() *
+\* */
+
+unsigned long
+erts_bfalc_test(unsigned long op, unsigned long a1, unsigned long a2)
+{
+ switch (op) {
+ case 0x200: return (unsigned long) ((BFAllctr_t *) a1)->address_order;
+ case 0x201: return (unsigned long) ((BFAllctr_t *) a1)->root;
+ case 0x202: return (unsigned long) ((RBTree_t *) a1)->parent;
+ case 0x203: return (unsigned long) ((RBTree_t *) a1)->left;
+ case 0x204: return (unsigned long) ((RBTree_t *) a1)->right;
+ case 0x205: return (unsigned long) ((RBTreeList_t *) a1)->next;
+ case 0x206: return (unsigned long) IS_BLACK((RBTree_t *) a1);
+ case 0x207: return (unsigned long) IS_TREE_NODE((RBTree_t *) a1);
+ default: ASSERT(0); return ~((unsigned long) 0);
+ }
+}
+
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Debug functions *
+\* */
+
+
+#ifdef HARD_DEBUG
+
+#define IS_LEFT_VISITED(FB) ((FB)->flags & LEFT_VISITED_FLG)
+#define IS_RIGHT_VISITED(FB) ((FB)->flags & RIGHT_VISITED_FLG)
+
+#define SET_LEFT_VISITED(FB) ((FB)->flags |= LEFT_VISITED_FLG)
+#define SET_RIGHT_VISITED(FB) ((FB)->flags |= RIGHT_VISITED_FLG)
+
+#define UNSET_LEFT_VISITED(FB) ((FB)->flags &= ~LEFT_VISITED_FLG)
+#define UNSET_RIGHT_VISITED(FB) ((FB)->flags &= ~RIGHT_VISITED_FLG)
+
+
+#if 0
+# define PRINT_TREE
+#else
+# undef PRINT_TREE
+#endif
+
+#ifdef PRINT_TREE
+static void print_tree(BFAllctr_t *);
+#endif
+
+/*
+ * Checks that the order between parent and children are correct,
+ * and that the Red-Black Tree properies are satisfied. if size > 0,
+ * check_tree() returns a node that satisfies "best fit" resp.
+ * "address order best fit".
+ *
+ * The Red-Black Tree properies are:
+ * 1. Every node is either red or black.
+ * 2. Every leaf (NIL) is black.
+ * 3. If a node is red, then both its children are black.
+ * 4. Every simple path from a node to a descendant leaf
+ * contains the same number of black nodes.
+ */
+
+static RBTree_t *
+check_tree(BFAllctr_t *bfallctr, Uint size)
+{
+ RBTree_t *res = NULL;
+ Sint blacks;
+ Sint curr_blacks;
+ RBTree_t *x;
+
+#ifdef PRINT_TREE
+ print_tree(bfallctr);
+#endif
+
+ if (!bfallctr->root)
+ return res;
+
+ x = bfallctr->root;
+ ASSERT(IS_BLACK(x));
+ ASSERT(!x->parent);
+ curr_blacks = 1;
+ blacks = -1;
+
+ while (x) {
+ if (!IS_LEFT_VISITED(x)) {
+ SET_LEFT_VISITED(x);
+ if (x->left) {
+ x = x->left;
+ if (IS_BLACK(x))
+ curr_blacks++;
+ continue;
+ }
+ else {
+ if (blacks < 0)
+ blacks = curr_blacks;
+ ASSERT(blacks == curr_blacks);
+ }
+ }
+
+ if (!IS_RIGHT_VISITED(x)) {
+ SET_RIGHT_VISITED(x);
+ if (x->right) {
+ x = x->right;
+ if (IS_BLACK(x))
+ curr_blacks++;
+ continue;
+ }
+ else {
+ if (blacks < 0)
+ blacks = curr_blacks;
+ ASSERT(blacks == curr_blacks);
+ }
+ }
+
+
+ if (IS_RED(x)) {
+ ASSERT(IS_BLACK(x->right));
+ ASSERT(IS_BLACK(x->left));
+ }
+
+ ASSERT(x->parent || x == bfallctr->root);
+
+ if (x->left) {
+ ASSERT(x->left->parent == x);
+ if (bfallctr->address_order) {
+ ASSERT(BLK_SZ(x->left) < BLK_SZ(x)
+ || (BLK_SZ(x->left) == BLK_SZ(x) && x->left < x));
+ }
+ else {
+ ASSERT(IS_TREE_NODE(x->left));
+ ASSERT(BLK_SZ(x->left) < BLK_SZ(x));
+ }
+ }
+
+ if (x->right) {
+ ASSERT(x->right->parent == x);
+ if (bfallctr->address_order) {
+ ASSERT(BLK_SZ(x->right) > BLK_SZ(x)
+ || (BLK_SZ(x->right) == BLK_SZ(x) && x->right > x));
+ }
+ else {
+ ASSERT(IS_TREE_NODE(x->right));
+ ASSERT(BLK_SZ(x->right) > BLK_SZ(x));
+ }
+ }
+
+ if (size && BLK_SZ(x) >= size) {
+ if (bfallctr->address_order) {
+ if (!res
+ || BLK_SZ(x) < BLK_SZ(res)
+ || (BLK_SZ(x) == BLK_SZ(res) && x < res))
+ res = x;
+ }
+ else {
+ if (!res || BLK_SZ(x) < BLK_SZ(res))
+ res = x;
+ }
+ }
+
+ UNSET_LEFT_VISITED(x);
+ UNSET_RIGHT_VISITED(x);
+ if (IS_BLACK(x))
+ curr_blacks--;
+ x = x->parent;
+
+ }
+
+ ASSERT(curr_blacks == 0);
+
+ UNSET_LEFT_VISITED(bfallctr->root);
+ UNSET_RIGHT_VISITED(bfallctr->root);
+
+ return res;
+
+}
+
+
+#ifdef PRINT_TREE
+#define INDENT_STEP 2
+
+#include <stdio.h>
+
+static void
+print_tree_aux(RBTree_t *x, int indent)
+{
+ int i;
+
+ if (!x) {
+ for (i = 0; i < indent; i++) {
+ putc(' ', stderr);
+ }
+ fprintf(stderr, "BLACK: nil\r\n");
+ }
+ else {
+ print_tree_aux(x->right, indent + INDENT_STEP);
+ for (i = 0; i < indent; i++) {
+ putc(' ', stderr);
+ }
+ fprintf(stderr, "%s: sz=%lu addr=0x%lx\r\n",
+ IS_BLACK(x) ? "BLACK" : "RED",
+ BLK_SZ(x),
+ (Uint) x);
+ print_tree_aux(x->left, indent + INDENT_STEP);
+ }
+}
+
+
+static void
+print_tree(BFAllctr_t *bfallctr)
+{
+ char *type = bfallctr->address_order ? "Size-Adress" : "Size";
+ fprintf(stderr, " --- %s tree begin ---\r\n", type);
+ print_tree_aux(bfallctr->root, 0);
+ fprintf(stderr, " --- %s tree end ---\r\n", type);
+}
+
+#endif
+
+#endif
diff --git a/erts/emulator/beam/erl_bestfit_alloc.h b/erts/emulator/beam/erl_bestfit_alloc.h
new file mode 100644
index 0000000000..cb35e21e57
--- /dev/null
+++ b/erts/emulator/beam/erl_bestfit_alloc.h
@@ -0,0 +1,64 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+#ifndef ERL_BESTFIT_ALLOC__
+#define ERL_BESTFIT_ALLOC__
+
+#include "erl_alloc_util.h"
+
+#define ERTS_ALC_BF_ALLOC_VSN_STR "0.9"
+#define ERTS_ALC_AOBF_ALLOC_VSN_STR "0.9"
+
+typedef struct BFAllctr_t_ BFAllctr_t;
+
+typedef struct {
+ int ao;
+} BFAllctrInit_t;
+
+#define ERTS_DEFAULT_BF_ALLCTR_INIT { \
+ 0 /* (bool) ao: address order */\
+}
+
+void erts_bfalc_init(void);
+Allctr_t *erts_bfalc_start(BFAllctr_t *, BFAllctrInit_t *, AllctrInit_t *);
+
+#endif /* #ifndef ERL_BESTFIT_ALLOC__ */
+
+
+
+#if defined(GET_ERL_BF_ALLOC_IMPL) && !defined(ERL_BF_ALLOC_IMPL__)
+#define ERL_BF_ALLOC_IMPL__
+
+#define GET_ERL_ALLOC_UTIL_IMPL
+#include "erl_alloc_util.h"
+
+typedef struct RBTree_t_ RBTree_t;
+
+struct BFAllctr_t_ {
+ Allctr_t allctr; /* Has to be first! */
+
+ RBTree_t * root;
+ int address_order;
+};
+
+unsigned long erts_bfalc_test(unsigned long, unsigned long, unsigned long);
+
+#endif /* #if defined(GET_ERL_BF_ALLOC_IMPL)
+ && !defined(ERL_BF_ALLOC_IMPL__) */
diff --git a/erts/emulator/beam/erl_bif_chksum.c b/erts/emulator/beam/erl_bif_chksum.c
new file mode 100644
index 0000000000..445ba00ca7
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_chksum.c
@@ -0,0 +1,612 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "erl_binary.h"
+#include "big.h"
+#include "zlib.h"
+
+
+typedef void (*ChksumFun)(void *sum_in_out, unsigned char *buf,
+ unsigned buflen);
+
+/* Hidden trap target */
+static BIF_RETTYPE md5_2(BIF_ALIST_2);
+
+static Export chksum_md5_2_exp;
+
+void erts_init_bif_chksum(void)
+{
+ /* Non visual BIF to trap to. */
+ memset(&chksum_md5_2_exp, 0, sizeof(Export));
+ chksum_md5_2_exp.address =
+ &chksum_md5_2_exp.code[3];
+ chksum_md5_2_exp.code[0] = am_erlang;
+ chksum_md5_2_exp.code[1] = am_atom_put("md5_trap",8);
+ chksum_md5_2_exp.code[2] = 2;
+ chksum_md5_2_exp.code[3] =
+ (Eterm) em_apply_bif;
+ chksum_md5_2_exp.code[4] =
+ (Eterm) &md5_2;
+}
+
+
+static Eterm do_chksum(ChksumFun sumfun, Process *p, Eterm ioterm, int left,
+ void *sum, int *res, int *err)
+{
+ Eterm *objp;
+ Eterm obj;
+ int c;
+ DECLARE_ESTACK(stack);
+ unsigned char *bytes = NULL;
+ int numbytes = 0;
+
+ *err = 0;
+ if (left <= 0 || is_nil(ioterm)) {
+ DESTROY_ESTACK(stack);
+ *res = 0;
+ return ioterm;
+ }
+ if(is_binary(ioterm)) {
+ Uint bitoffs;
+ Uint bitsize;
+ Uint size;
+ Eterm res_term = NIL;
+ unsigned char *bytes;
+ byte *temp_alloc = NULL;
+
+ ERTS_GET_BINARY_BYTES(ioterm, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ *res = 0;
+ *err = 1;
+ DESTROY_ESTACK(stack);
+ return NIL;
+ }
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(ioterm, &temp_alloc);
+ /* The call to erts_get_aligned_binary_bytes cannot fail as
+ we'we already checked bitsize and that this is a binary */
+ }
+
+ size = binary_size(ioterm);
+
+
+ if (size > left) {
+ Eterm *hp;
+ ErlSubBin *sb;
+ Eterm orig;
+ Uint offset;
+ /* Split the binary in two parts, of which we
+ only process the first */
+ hp = HAlloc(p, ERL_SUB_BIN_SIZE);
+ sb = (ErlSubBin *) hp;
+ ERTS_GET_REAL_BIN(ioterm, orig, offset, bitoffs, bitsize);
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = size - left;
+ sb->offs = offset + left;
+ sb->orig = orig;
+ sb->bitoffs = bitoffs;
+ sb->bitsize = bitsize;
+ sb->is_writable = 0;
+ res_term = make_binary(sb);
+ size = left;
+ }
+ (*sumfun)(sum, bytes, size);
+ *res = size;
+ DESTROY_ESTACK(stack);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return res_term;
+ }
+
+ if (!is_list(ioterm)) {
+ *res = 0;
+ *err = 1;
+ DESTROY_ESTACK(stack);
+ return NIL;
+ }
+
+ /* OK a list, needs to be processed in order, handling each flat list-level
+ as they occur, just like io_list_to_binary would */
+ *res = 0;
+ ESTACK_PUSH(stack,ioterm);
+ while (!ESTACK_ISEMPTY(stack) && left) {
+ ioterm = ESTACK_POP(stack);
+ if (is_nil(ioterm)) {
+ /* ignore empty lists */
+ continue;
+ }
+ if(is_list(ioterm)) {
+L_Again: /* Restart with sublist, old listend was pushed on stack */
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ for(;;) { /* loop over one flat list of bytes and binaries
+ until sublist or list end is encountered */
+ if (is_byte(obj)) {
+ int bsize = 0;
+ for(;;) {
+ if (bsize >= numbytes) {
+ if (!bytes) {
+ bytes = erts_alloc(ERTS_ALC_T_TMP,
+ numbytes = 500);
+ } else {
+ if (numbytes > left) {
+ numbytes += left;
+ } else {
+ numbytes *= 2;
+ }
+ bytes = erts_realloc(ERTS_ALC_T_TMP, bytes,
+ numbytes);
+ }
+ }
+ bytes[bsize++] = (unsigned char) unsigned_val(obj);
+ --left;
+ ioterm = CDR(objp);
+ if (!is_list(ioterm)) {
+ break;
+ }
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ if (!is_byte(obj))
+ break;
+ if (!left) {
+ break;
+ }
+ }
+ (*sumfun)(sum, bytes, bsize);
+ *res += bsize;
+ } else if (is_nil(obj)) {
+ ioterm = CDR(objp);
+ if (!is_list(ioterm)) {
+ break;
+ }
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ } else if (is_list(obj)) {
+ /* push rest of list for later processing, start
+ again with sublist */
+ ESTACK_PUSH(stack,CDR(objp));
+ ioterm = obj;
+ goto L_Again;
+ } else if (is_binary(obj)) {
+ int sres, serr;
+ Eterm rest_term;
+ rest_term = do_chksum(sumfun, p, obj, left, sum, &sres,
+ &serr);
+ *res += sres;
+ if (serr != 0) {
+ *err = 1;
+ DESTROY_ESTACK(stack);
+ if (bytes != NULL)
+ erts_free(ERTS_ALC_T_TMP, bytes);
+ return NIL;
+ }
+ left -= sres;
+ if (rest_term != NIL) {
+ Eterm *hp;
+ hp = HAlloc(p, 2);
+ obj = CDR(objp);
+ ioterm = CONS(hp, rest_term, obj);
+ left = 0;
+ break;
+ }
+ ioterm = CDR(objp);
+ if (is_list(ioterm)) {
+ /* objp and obj need to be updated if
+ loop is to continue */
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ }
+ } else {
+ *err = 1;
+ DESTROY_ESTACK(stack);
+ if (bytes != NULL)
+ erts_free(ERTS_ALC_T_TMP, bytes);
+ return NIL;
+ }
+ if (!left || is_nil(ioterm) || !is_list(ioterm)) {
+ break;
+ }
+ } /* for(;;) */
+ } /* is_list(ioterm) */
+
+ if (!left) {
+#ifdef ALLOW_BYTE_TAIL
+ if (is_byte(ioterm)) {
+ /* inproper list with byte tail*/
+ Eterm *hp;
+ hp = HAlloc(p, 2);
+ ioterm = CONS(hp, ioterm, NIL);
+ }
+#else
+ ;
+#endif
+ } else if (!is_list(ioterm) && !is_nil(ioterm)) {
+ /* inproper list end */
+#ifdef ALLOW_BYTE_TAIL
+ if (is_byte(ioterm)) {
+ unsigned char b[1];
+ b[0] = (unsigned char) unsigned_val(ioterm);
+ (*sumfun)(sum, b, 1);
+ ++(*res);
+ --left;
+ ioterm = NIL;
+ } else
+#endif
+ if is_binary(ioterm) {
+ int sres, serr;
+ ioterm = do_chksum(sumfun, p, ioterm, left, sum, &sres, &serr);
+ *res +=sres;
+ if (serr != 0) {
+ *err = 1;
+ DESTROY_ESTACK(stack);
+ if (bytes != NULL)
+ erts_free(ERTS_ALC_T_TMP, bytes);
+ return NIL;
+ }
+ left -= sres;
+ } else {
+ *err = 1;
+ DESTROY_ESTACK(stack);
+ if (bytes != NULL)
+ erts_free(ERTS_ALC_T_TMP, bytes);
+ return NIL;
+ }
+ }
+ } /* while left and not estack empty */
+ c = ESTACK_COUNT(stack);
+ if (c > 0) {
+ Eterm *hp = HAlloc(p,2*c);
+ while(!ESTACK_ISEMPTY(stack)) {
+ Eterm st = ESTACK_POP(stack);
+ ioterm = CONS(hp, ioterm, st);
+ hp += 2;
+ }
+ }
+ DESTROY_ESTACK(stack);
+ if (bytes != NULL)
+ erts_free(ERTS_ALC_T_TMP, bytes);
+ return ioterm;
+}
+
+static void adler32_wrap(void *vsum, unsigned char *buf, unsigned buflen)
+{
+ unsigned long sum = *((unsigned long *) vsum);
+ sum = adler32(sum,buf,buflen);
+ *((unsigned long *) vsum) = sum;
+}
+
+static void crc32_wrap(void *vsum, unsigned char *buf, unsigned buflen)
+{
+ unsigned long sum = *((unsigned long *) vsum);
+ sum = crc32(sum,buf,buflen);
+ *((unsigned long *) vsum) = sum;
+}
+
+static void md5_wrap(void *vsum, unsigned char *buf, unsigned buflen)
+{
+ MD5_CTX *ctx = ((MD5_CTX *) vsum);
+ MD5Update(ctx,buf,buflen);
+}
+
+#define BYTES_PER_REDUCTION 10
+#define CHUNK_PER_SCHEDULE (BYTES_PER_REDUCTION * CONTEXT_REDS)
+
+BIF_RETTYPE
+crc32_1(BIF_ALIST_1)
+{
+ unsigned long chksum;
+ int res, err;
+ Eterm rest,res_sum;
+ chksum = crc32(0,NULL,0);
+
+ rest = do_chksum(&crc32_wrap,BIF_P,BIF_ARG_1,CHUNK_PER_SCHEDULE,
+ (void *) &chksum,&res,
+ &err);
+ BUMP_REDS(BIF_P,res / BYTES_PER_REDUCTION);
+ if (err != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ res_sum = erts_make_integer(chksum,BIF_P);
+ if (rest != NIL) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_crc32_2], BIF_P, res_sum, rest);
+ }
+ BIF_RET(res_sum);
+}
+
+BIF_RETTYPE
+crc32_2(BIF_ALIST_2)
+{
+ unsigned long chksum;
+ int res, err;
+ Eterm rest,res_sum;
+ Uint u;
+ if (!term_to_Uint(BIF_ARG_1, &u) || ((u >> 16) >> 16) != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ chksum = (unsigned long) u;
+
+ rest = do_chksum(&crc32_wrap,BIF_P,BIF_ARG_2,CHUNK_PER_SCHEDULE,
+ (void *) &chksum,&res,
+ &err);
+ BUMP_REDS(BIF_P,res / BYTES_PER_REDUCTION);
+ if (err != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ res_sum = erts_make_integer(chksum,BIF_P);
+ if (rest != NIL) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_crc32_2], BIF_P, res_sum, rest);
+ }
+ BIF_RET(res_sum);
+}
+
+BIF_RETTYPE
+crc32_combine_3(BIF_ALIST_3)
+{
+ unsigned long chksum1,chksum2;
+ z_off_t length;
+ Uint32 res;
+ Eterm res_sum;
+ Uint u;
+
+ if (!term_to_Uint(BIF_ARG_1, &u) || ((u >> 16) >> 16) != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ chksum1 = (unsigned long) u;
+
+ if (!term_to_Uint(BIF_ARG_2, &u) || ((u >> 16) >> 16) != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ chksum2 = (unsigned long) u;
+
+ if (!term_to_Uint(BIF_ARG_3, &u) || ((u >> 16) >> 16) != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ length = (z_off_t) u;
+
+ res = (Uint32) crc32_combine(chksum1,chksum2,length);
+
+ res_sum = erts_make_integer(res,BIF_P);
+ BIF_RET(res_sum);
+}
+
+BIF_RETTYPE
+adler32_1(BIF_ALIST_1)
+{
+ unsigned long chksum;
+ int res, err;
+ Eterm rest,res_sum;
+ chksum = adler32(0,NULL,0);
+
+ rest = do_chksum(&adler32_wrap,BIF_P,BIF_ARG_1,CHUNK_PER_SCHEDULE,
+ (void *) &chksum,&res,
+ &err);
+ BUMP_REDS(BIF_P,res / BYTES_PER_REDUCTION);
+ if (err != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ res_sum = erts_make_integer(chksum,BIF_P);
+ if (rest != NIL) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_adler32_2], BIF_P, res_sum, rest);
+ }
+ BIF_RET(res_sum);
+}
+
+BIF_RETTYPE
+adler32_2(BIF_ALIST_2)
+{
+ unsigned long chksum;
+ int res, err;
+ Eterm rest,res_sum;
+ Uint u;
+ if (!term_to_Uint(BIF_ARG_1, &u) || ((u >> 16) >> 16) != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ chksum = (unsigned long) u;
+
+ rest = do_chksum(&adler32_wrap,BIF_P,BIF_ARG_2,CHUNK_PER_SCHEDULE,
+ (void *) &chksum,&res,
+ &err);
+ BUMP_REDS(BIF_P,res / BYTES_PER_REDUCTION);
+ if (err != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ res_sum = erts_make_integer(chksum,BIF_P);
+ if (rest != NIL) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_adler32_2], BIF_P, res_sum, rest);
+ }
+ BIF_RET(res_sum);
+}
+
+BIF_RETTYPE
+adler32_combine_3(BIF_ALIST_3)
+{
+ unsigned long chksum1,chksum2;
+ z_off_t length;
+ Uint32 res;
+ Eterm res_sum;
+ Uint u;
+
+ if (!term_to_Uint(BIF_ARG_1, &u) || ((u >> 16) >> 16) != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ chksum1 = (unsigned long) u;
+
+ if (!term_to_Uint(BIF_ARG_2, &u) || ((u >> 16) >> 16) != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ chksum2 = (unsigned long) u;
+
+ if (!term_to_Uint(BIF_ARG_3, &u) || ((u >> 16) >> 16) != 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ length = (z_off_t) u;
+
+ if (length == 0) { /* Workaround for unexpected behaviour in zlib. */
+ res = (Uint32) chksum1;
+ } else {
+ res = (Uint32) adler32_combine(chksum1,chksum2,length);
+ }
+
+ res_sum = erts_make_integer(res,BIF_P);
+ BIF_RET(res_sum);
+}
+
+
+BIF_RETTYPE
+md5_1(BIF_ALIST_1)
+{
+ Eterm bin;
+ byte* bytes;
+ Eterm rest;
+ int res, err;
+
+ MD5_CTX context;
+ MD5Init(&context);
+
+ rest = do_chksum(&md5_wrap,BIF_P,BIF_ARG_1,100,(void *) &context,&res,
+ &err);
+ if (err != 0) {
+ BUMP_REDS(BIF_P,res);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (rest != NIL) {
+ BUMP_ALL_REDS(BIF_P);
+ bin = new_binary(BIF_P, (byte *) &context, sizeof(MD5_CTX));
+ BIF_TRAP2(&chksum_md5_2_exp, BIF_P, bin, rest);
+ }
+ BUMP_REDS(BIF_P,res);
+ bin = new_binary(BIF_P, (byte *)NULL, 16);
+ bytes = binary_bytes(bin);
+ MD5Final(bytes, &context);
+ BIF_RET(bin);
+}
+
+/* Hidden trap target */
+static BIF_RETTYPE
+md5_2(BIF_ALIST_2)
+{
+ byte *bytes;
+ MD5_CTX context;
+ Eterm rest;
+ Eterm bin;
+ int res, err;
+
+ /* No need to check context, this function cannot be called with unaligned
+ or badly sized context as it's always trapped to. */
+ bytes = binary_bytes(BIF_ARG_1);
+ memcpy(&context,bytes,sizeof(MD5_CTX));
+ rest = do_chksum(&md5_wrap,BIF_P,BIF_ARG_2,100,(void *) &context,&res,
+ &err);
+ if (err != 0) {
+ BUMP_REDS(BIF_P,res);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (rest != NIL) {
+ BUMP_ALL_REDS(BIF_P);
+ bin = new_binary(BIF_P, (byte *) &context, sizeof(MD5_CTX));
+ BIF_TRAP2(&chksum_md5_2_exp, BIF_P, bin, rest);
+ }
+ BUMP_REDS(BIF_P,res);
+ bin = new_binary(BIF_P, (byte *)NULL, 16);
+ bytes = binary_bytes(bin);
+ MD5Final(bytes, &context);
+ BIF_RET(bin);
+}
+
+BIF_RETTYPE
+md5_init_0(BIF_ALIST_0)
+{
+ Eterm bin;
+ byte* bytes;
+
+ bin = erts_new_heap_binary(BIF_P, (byte *)NULL, sizeof(MD5_CTX), &bytes);
+ MD5Init((MD5_CTX *)bytes);
+ BIF_RET(bin);
+}
+
+BIF_RETTYPE
+md5_update_2(BIF_ALIST_2)
+{
+ byte *bytes;
+ MD5_CTX context;
+ Eterm rest;
+ Eterm bin;
+ int res, err;
+ byte *temp_alloc = NULL;
+
+ if ((bytes = erts_get_aligned_binary_bytes(BIF_ARG_1, &temp_alloc)) == NULL) {
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (binary_size(BIF_ARG_1) != sizeof(MD5_CTX)) {
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ memcpy(&context,bytes,sizeof(MD5_CTX));
+ erts_free_aligned_binary_bytes(temp_alloc);
+ rest = do_chksum(&md5_wrap,BIF_P,BIF_ARG_2,100,(void *) &context,&res,
+ &err);
+ if (err != 0) {
+ BUMP_REDS(BIF_P,res);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ bin = new_binary(BIF_P, (byte *) &context, sizeof(MD5_CTX));
+ if (rest != NIL) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_md5_update_2], BIF_P, bin, rest);
+ }
+ BUMP_REDS(BIF_P,res);
+ BIF_RET(bin);
+}
+
+BIF_RETTYPE
+md5_final_1(BIF_ALIST_1)
+{
+ Eterm bin;
+ byte* context;
+ byte* result;
+ MD5_CTX ctx_copy;
+ byte* temp_alloc = NULL;
+
+ if ((context = erts_get_aligned_binary_bytes(BIF_ARG_1, &temp_alloc)) == NULL) {
+ error:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (binary_size(BIF_ARG_1) != sizeof(MD5_CTX)) {
+ goto error;
+ }
+ bin = erts_new_heap_binary(BIF_P, (byte *)NULL, 16, &result);
+ memcpy(&ctx_copy, context, sizeof(MD5_CTX));
+ erts_free_aligned_binary_bytes(temp_alloc);
+ MD5Final(result, &ctx_copy);
+ BIF_RET(bin);
+}
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
new file mode 100644
index 0000000000..9d5f0d9c02
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -0,0 +1,1964 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * BIFs belonging to the 'erl_ddll' module together with utility
+ * functions for dynamic loading. The actual loading is done in
+ * erl_sys_ddll.c in respective system dependent directory. The
+ * driver structure contains a handle to the actual loaded "module" as
+ * well as record keeping information about processes having loaded
+ * the driver and processes monitoring the driver. A process in any
+ * way involved in ddll-drivers, get a special flag, which triggers
+ * cleenup at process exit.
+ */
+
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERL_SYS_DRV
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "big.h"
+#include "dist.h"
+#include "erl_version.h"
+
+#ifdef ERTS_SMP
+#define DDLL_SMP 1
+#else
+#define DDLL_SMP 0
+#endif
+
+
+/*
+ * Local types
+ */
+
+typedef struct {
+ Eterm pid;
+ Process *proc;
+ Uint status;
+ Uint count;
+} ProcEntryInfo;
+
+/*
+ * Forward
+ */
+static char *pick_list_or_atom(Eterm name_term);
+static erts_driver_t *lookup_driver(char *name);
+static Eterm mkatom(char *str);
+static void add_proc_loaded(DE_Handle *dh, Process *proc);
+static void add_proc_loaded_deref(DE_Handle *dh, Process *proc);
+static void set_driver_reloading(DE_Handle *dh, Process *proc, char *path, char *name, Uint flags);
+static int load_driver_entry(DE_Handle **dhp, char *path, char *name);
+static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name);
+static int do_load_driver_entry(DE_Handle *dh, char *path, char *name);
+#if 0
+static void unload_driver_entry(DE_Handle *dh);
+#endif
+static int reload_driver_entry(DE_Handle *dh);
+static int build_proc_info(DE_Handle *dh, ProcEntryInfo **out_pei, Uint filter);
+static int is_last_user(DE_Handle *dh, Process *proc);
+static DE_ProcEntry *find_proc_entry(DE_Handle *dh, Process *proc, Uint status);
+static void remove_proc_entry(DE_Handle *dh, DE_ProcEntry *pe);
+static int num_procs(DE_Handle *dh, Uint status);
+/*static int num_entries(DE_Handle *dh, Process *proc, Uint status);*/
+static void notify_proc(Process *proc, Eterm ref, Eterm driver_name,
+ Eterm type, Eterm tag, int errcode);
+static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Eterm tag);
+static int load_error_need(int code);
+static Eterm build_load_error_hp(Eterm *hp, int code);
+static Eterm build_load_error(Process *p, int code);
+static int errdesc_to_code(Eterm errdesc, int *code /* out */);
+static Eterm add_monitor(Process *p, DE_Handle *dh, Uint status);
+static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name,
+ ErtsProcLocks plocks);
+static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name,
+ ErtsProcLocks plocks, Uint flag);
+static void first_ddll_reference(DE_Handle *dh);
+static void dereference_all_processes(DE_Handle *dh);
+static void restore_process_references(DE_Handle *dh);
+static void ddll_no_more_references(void *vdh);
+
+#define lock_drv_list() erts_smp_mtx_lock(&erts_driver_list_lock)
+#define unlock_drv_list() erts_smp_mtx_unlock(&erts_driver_list_lock)
+#define assert_drv_list_locked() \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&erts_driver_list_lock))
+#define assert_drv_list_not_locked() \
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_mtx_is_locked(&erts_driver_list_lock))
+
+
+#define FREE_PORT_FLAGS (ERTS_PORT_SFLGS_DEAD & (~ERTS_PORT_SFLG_INITIALIZING))
+
+/*
+ * try_load(Path, Name, OptionList) -> {ok,Status} |
+ * {ok, PendingStatus, Ref} |
+ * {error, ErrorDesc}
+ * Path = Name = string() | atom()
+ * OptionList = [ Option ]
+ * Option = {driver_options, DriverOptionList} |
+ * {monitor,MonitorOption} |
+ * {reload, ReloadOption}
+ * DriverOptionList = [ DriverOption ]
+ * DriverOption = kill_ports
+ * MonitorOption = pending_driver | pending
+ * ReloadOption = pending_driver | pending
+ * Status = loaded | already_loaded | PendingStatus
+ * PendingStatus = pending_driver | pending_process
+ * Ref = ref()
+ * ErrorDesc = ErrorAtom | OpaqueError
+ * ErrorAtom = linked_in_driver | inconsistent |
+ * permanent | pending
+ */
+/*
+ * Try to load. If the driver is OK, add as LOADED. If the driver is
+ * UNLOAD, possibly change to reload and add as LOADED,
+ * there should be no other
+ * LOADED tagged pid's. If the driver is RELOAD then add/increment as
+ * LOADED (should be some LOADED pid). If the driver is not present,
+ * really load and add as LOADED {ok,loaded} {ok,pending_driver}
+ * {error, permanent} {error,load_error()}
+ */
+BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term,
+ Eterm name_term, Eterm options)
+{
+ char *path = NULL;
+ int path_len;
+ char *name = NULL;
+ DE_Handle *dh;
+ erts_driver_t *drv;
+ int res;
+ Eterm soft_error_term = NIL;
+ Eterm ok_term = NIL;
+ Eterm *hp;
+ Eterm t;
+ int monitor = 0;
+ int reload = 0;
+ Eterm l;
+ Uint flags = 0;
+ int kill_ports = 0;
+ int do_build_load_error = 0;
+ int build_this_load_error = 0;
+
+ for(l = options; is_list(l); l = CDR(list_val(l))) {
+ Eterm opt = CAR(list_val(l));
+ Eterm *tp;
+ if (is_not_tuple(opt)) {
+ goto error;
+ }
+ tp = tuple_val(opt);
+ if (*tp != make_arityval(2) || is_not_atom(tp[1])) {
+ goto error;
+ }
+ switch (tp[1]) {
+ case am_driver_options:
+ {
+ Eterm ll;
+ for(ll = tp[2]; is_list(ll); ll = CDR(list_val(ll))) {
+ Eterm dopt = CAR(list_val(ll));
+ if (dopt == am_kill_ports) {
+ flags |= ERL_DE_FL_KILL_PORTS;
+ } else {
+ goto error;
+ }
+ }
+ if (is_not_nil(ll)) {
+ goto error;
+ }
+ }
+ break;
+ case am_monitor:
+ if (tp[2] == am_pending_driver) {
+ monitor = 1;
+ } else if (tp[2] == am_pending ) {
+ monitor = 2;
+ } else {
+ goto error;
+ }
+ break;
+ case am_reload:
+ if (tp[2] == am_pending_driver) {
+ reload = 1;
+ } else if (tp[2] == am_pending ) {
+ reload = 2;
+ } else {
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+ }
+ if (is_not_nil(l)) {
+ goto error;
+ }
+
+
+ if ((name = pick_list_or_atom(name_term)) == NULL) {
+ goto error;
+ }
+
+ path_len = io_list_len(path_term);
+
+ if (path_len <= 0) {
+ goto error;
+ }
+ path = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, path_len + 1 /* might need path separator */ + sys_strlen(name) + 1);
+ if (io_list_to_buf(path_term, path, path_len) != 0) {
+ goto error;
+ }
+ while (path_len > 0 && (path[path_len-1] == '\\' || path[path_len-1] == '/')) {
+ --path_len;
+ }
+ path[path_len++] = '/';
+ /*path[path_len] = '\0';*/
+ sys_strcpy(path+path_len,name);
+
+#if DDLL_SMP
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ lock_drv_list();
+#endif
+ if ((drv = lookup_driver(name)) != NULL) {
+ if (drv->handle == NULL) {
+ /* static_driver */
+ soft_error_term = am_linked_in_driver;
+ goto soft_error;
+ } else {
+ dh = drv->handle;
+ if (dh->status == ERL_DE_OK) {
+ int is_last = is_last_user(dh,p);
+ if (reload == 1 && !is_last) {
+ /*Want reload if no other users,
+ but there are others...*/
+ soft_error_term = am_pending_process;
+ goto soft_error;
+ }
+ if (reload != 0) {
+ DE_ProcEntry *old;
+ if ((dh->flags & ERL_FL_CONSISTENT_MASK) !=
+ (flags & ERL_FL_CONSISTENT_MASK)) {
+ soft_error_term = am_inconsistent;
+ goto soft_error;
+ }
+ if ((old = find_proc_entry(dh, p, ERL_DE_PROC_LOADED)) ==
+ NULL) {
+ soft_error_term = am_not_loaded_by_this_process;
+ goto soft_error;
+ } else {
+ remove_proc_entry(dh, old);
+ erts_ddll_dereference_driver(dh);
+ erts_free(ERTS_ALC_T_DDLL_PROCESS, old);
+ }
+ /* Reload requested and granted */
+ dereference_all_processes(dh);
+ set_driver_reloading(dh, p, path, name, flags);
+ if (dh->flags & ERL_DE_FL_KILL_PORTS) {
+ kill_ports = 1;
+ }
+ ok_term = (reload == 1) ? am_pending_driver :
+ am_pending_process;
+ } else {
+ /* Already loaded and healthy (might be by me) */
+ if (sys_strcmp(dh->full_path, path) ||
+ (dh->flags & ERL_FL_CONSISTENT_MASK) !=
+ (flags & ERL_FL_CONSISTENT_MASK)) {
+ soft_error_term = am_inconsistent;
+ goto soft_error;
+ }
+ add_proc_loaded(dh,p);
+ erts_ddll_reference_driver(dh);
+ monitor = 0;
+ ok_term = mkatom("already_loaded");
+ }
+ } else if (dh->status == ERL_DE_UNLOAD ||
+ dh->status == ERL_DE_FORCE_UNLOAD) {
+ /* pending driver */
+ if (reload != 0) {
+ soft_error_term = am_not_loaded_by_this_process;
+ goto soft_error;
+ }
+ if (sys_strcmp(dh->full_path, path) ||
+ (dh->flags & ERL_FL_CONSISTENT_MASK) !=
+ (flags & ERL_FL_CONSISTENT_MASK)) {
+ soft_error_term = am_inconsistent;
+ goto soft_error;
+ }
+ dh->status = ERL_DE_OK;
+ notify_all(dh, drv->name,
+ ERL_DE_PROC_AWAIT_UNLOAD, am_UP,
+ am_unload_cancelled);
+ add_proc_loaded(dh,p);
+ erts_ddll_reference_driver(dh);
+ monitor = 0;
+ ok_term = mkatom("already_loaded");
+ } else if (dh->status == ERL_DE_RELOAD ||
+ dh->status == ERL_DE_FORCE_RELOAD) {
+ if (reload != 0) {
+ soft_error_term = am_pending_reload;
+ goto soft_error;
+ }
+ if (sys_strcmp(dh->reload_full_path, path) ||
+ (dh->reload_flags & ERL_FL_CONSISTENT_MASK) !=
+ (flags & ERL_FL_CONSISTENT_MASK)) {
+ soft_error_term = am_inconsistent;
+ goto soft_error;
+ }
+ /* Load of granted unload... */
+ add_proc_loaded_deref(dh,p); /* Dont reference, will happen after reload */
+ ++monitor;
+ ok_term = am_pending_driver;
+ } else { /* ERL_DE_PERMANENT */
+ soft_error_term = am_permanent;
+ goto soft_error;
+ }
+ }
+ } else { /* driver non-existing */
+ if (reload != 0) {
+ soft_error_term = am_not_loaded;
+ goto soft_error;
+ }
+ if ((res = load_driver_entry(&dh, path, name)) != ERL_DE_NO_ERROR) {
+ build_this_load_error = res;
+ do_build_load_error = 1;
+ soft_error_term = am_undefined;
+ goto soft_error;
+ } else {
+ dh->flags = flags;
+ add_proc_loaded(dh,p);
+ first_ddll_reference(dh);
+ monitor = 0;
+ ok_term = mkatom("loaded");
+ }
+ }
+ assert_drv_list_locked();
+ if (kill_ports) {
+ int j;
+ /* Avoid closing the driver by referencing it */
+ erts_ddll_reference_driver(dh);
+ ASSERT(dh->status == ERL_DE_RELOAD);
+ dh->status = ERL_DE_FORCE_RELOAD;
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ for (j = 0; j < erts_max_ports; j++) {
+ Port* prt = &erts_port[j];
+#ifdef DDLL_SMP
+ erts_smp_port_state_lock(prt);
+#endif
+ if (!(prt->status & FREE_PORT_FLAGS) &&
+ prt->drv_ptr->handle == dh) {
+#if DDLL_SMP
+ erts_smp_atomic_inc(&prt->refc);
+ /* Extremely rare spinlock */
+ while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
+ erts_smp_port_state_unlock(prt);
+ erts_smp_port_state_lock(prt);
+ }
+ erts_smp_port_state_unlock(prt);
+ erts_smp_mtx_lock(prt->lock);
+ if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) {
+ driver_failure_atom(j, "driver_unloaded");
+ }
+#else
+ driver_failure_atom(j, "driver_unloaded");
+#endif
+ erts_port_release(prt);
+ }
+ else erts_smp_port_state_unlock(prt);
+ }
+ /* Dereference, eventually causing driver destruction */
+#if DDLL_SMP
+ lock_drv_list();
+#endif
+ erts_ddll_dereference_driver(dh);
+ }
+
+#if DDLL_SMP
+ erts_ddll_reference_driver(dh);
+ unlock_drv_list();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ lock_drv_list();
+ erts_ddll_dereference_driver(dh);
+#endif
+
+ p->flags |= F_USING_DDLL;
+ if (monitor) {
+ Eterm mref = add_monitor(p, dh, ERL_DE_PROC_AWAIT_LOAD);
+ hp = HAlloc(p,4);
+ t = TUPLE3(hp, am_ok, ok_term, mref);
+ } else {
+ hp = HAlloc(p,3);
+ t = TUPLE2(hp, am_ok, ok_term);
+ }
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ BIF_RET(t);
+ soft_error:
+#if DDLL_SMP
+ unlock_drv_list();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+#endif
+ if (do_build_load_error) {
+ soft_error_term = build_load_error(p, build_this_load_error);
+ }
+
+ hp = HAlloc(p,3);
+ t = TUPLE2(hp, am_error, soft_error_term);
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ BIF_RET(t);
+ error:
+ assert_drv_list_not_locked();
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ if (path != NULL) {
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
+ }
+ if (name != NULL) {
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ }
+ BIF_ERROR(p,BADARG);
+}
+
+/*
+ * try_unload(Name, OptionList) -> {ok,Status} |
+ * {ok,PendingStatus, Ref} |
+ * {error, ErrorAtom}
+ * Name = string() | atom()
+ * OptionList = [ Option ]
+ * Option = {monitor,MonitorOption} | kill_ports
+ * MonitorOption = pending_driver | pending
+ * Status = unloaded | PendingStatus
+ * PendingStatus = pending_driver | pending_process
+ * Ref = ref()
+ * ErrorAtom = linked_in_driver | not_loaded |
+ * not_loaded_by_this_process | permanent
+ */
+
+/*
+ You have to have loaded the driver and the pid state
+ is LOADED or AWAIT_LOAD. You will be removed from the list
+ regardless of driver state.
+ If the driver is loaded by someone else to, return is
+ {ok, pending_process}
+ If the driver is loaded but locked by a port, return is
+ {ok, pending_driver}
+ If the driver is loaded and free to unload (you're the last holding it)
+ {ok, unloaded}
+ If it's not loaded or not loaded by you
+ {error, not_loaded} or {error, not_loaded_by_you}
+
+ Internally, if its in state UNLOADING, just return {ok, pending_driver} and
+ remove/decrement this pid (which should be an LOADED tagged one).
+ If the state is RELOADING, this pid should be in list as LOADED tagged,
+ only AWAIT_LOAD would be possible but not allowed for unloading, remove it
+ and, if the last LOADED tagged, change from RELOAD to UNLOAD and notify
+ any AWAIT_LOAD-waiters with {'DOWN', ref(), driver, name(), load_cancelled}
+ If the driver made itself permanent, {'UP', ref(), driver, name(), permanent}
+*/
+Eterm erl_ddll_try_unload_2(Process *p, Eterm name_term, Eterm options)
+{
+ char *name = NULL;
+ Eterm ok_term = NIL;
+ Eterm soft_error_term = NIL;
+ erts_driver_t *drv;
+ DE_Handle *dh;
+ DE_ProcEntry *pe;
+ Eterm *hp;
+ Eterm t;
+ int monitor = 0;
+ Eterm l;
+ int kill_ports = 0;
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+
+ for(l = options; is_list(l); l = CDR(list_val(l))) {
+ Eterm opt = CAR(list_val(l));
+ Eterm *tp;
+ if (is_not_tuple(opt)) {
+ if (opt == am_kill_ports) {
+ kill_ports = 1;
+ continue;
+ } else {
+ goto error;
+ }
+ }
+ tp = tuple_val(opt);
+ if (*tp != make_arityval(2) || tp[1] != am_monitor) {
+ goto error;
+ }
+ if (tp[2] == am_pending_driver) {
+ monitor = 1;
+ } else if (tp[2] == am_pending) {
+ monitor = 2;
+ } else {
+ goto error;
+ }
+ }
+ if (is_not_nil(l)) {
+ goto error;
+ }
+
+ if ((name = pick_list_or_atom(name_term)) == NULL) {
+ goto error;
+ }
+
+#if DDLL_SMP
+ lock_drv_list();
+#endif
+
+ if ((drv = lookup_driver(name)) == NULL) {
+ soft_error_term = am_not_loaded;
+ goto soft_error;
+ }
+
+ if (drv->handle == NULL) {
+ soft_error_term = am_linked_in_driver;
+ goto soft_error;
+ } else if (drv->handle->status == ERL_DE_PERMANENT) {
+ soft_error_term = am_permanent;
+ goto soft_error;
+ }
+ dh = drv->handle;
+ if (dh->flags & ERL_DE_FL_KILL_PORTS) {
+ kill_ports = 1;
+ }
+ if ((pe = find_proc_entry(dh, p, ERL_DE_PROC_LOADED)) == NULL) {
+ if (num_procs(dh, ERL_DE_PROC_LOADED) > 0) {
+ soft_error_term = am_not_loaded_by_this_process;
+ goto soft_error;
+ }
+ } else {
+ remove_proc_entry(dh, pe);
+ if (!(pe->flags & ERL_DE_FL_DEREFERENCED)) {
+ erts_ddll_dereference_driver(dh);
+ }
+ erts_free(ERTS_ALC_T_DDLL_PROCESS, pe);
+ }
+ if (num_procs(dh, ERL_DE_PROC_LOADED) > 0) {
+ ok_term = am_pending_process;
+ --monitor;
+ goto done;
+ }
+ if (dh->status == ERL_DE_RELOAD ||
+ dh->status == ERL_DE_FORCE_RELOAD) {
+ notify_all(dh, drv->name,
+ ERL_DE_PROC_AWAIT_LOAD, am_DOWN, am_load_cancelled);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_full_path);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_driver_name);
+ dh->reload_full_path = dh->reload_driver_name = NULL;
+ dh->reload_flags = 0;
+ }
+ if (dh->port_count > 0) {
+ ++kill_ports;
+ }
+ dh->status = ERL_DE_UNLOAD;
+ ok_term = am_pending_driver;
+done:
+ assert_drv_list_locked();
+ if (kill_ports > 1) {
+ int j;
+ /* Avoid closing the driver by referencing it */
+ erts_ddll_reference_driver(dh);
+ dh->status = ERL_DE_FORCE_UNLOAD;
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ for (j = 0; j < erts_max_ports; j++) {
+ Port* prt = &erts_port[j];
+#if DDLL_SMP
+ erts_smp_port_state_lock(prt);
+#endif
+ if (!(prt->status & FREE_PORT_FLAGS)
+ && prt->drv_ptr->handle == dh) {
+#if DDLL_SMP
+ erts_smp_atomic_inc(&prt->refc);
+ /* Extremely rare spinlock */
+ while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
+ erts_smp_port_state_unlock(prt);
+ erts_smp_port_state_lock(prt);
+ }
+ erts_smp_port_state_unlock(prt);
+ erts_smp_mtx_lock(prt->lock);
+ if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) {
+ driver_failure_atom(j, "driver_unloaded");
+ }
+#else
+ driver_failure_atom(j, "driver_unloaded");
+#endif
+ erts_port_release(prt);
+ }
+ else erts_smp_port_state_unlock(prt);
+ }
+#if DDLL_SMP
+ lock_drv_list();
+#endif
+ erts_ddll_dereference_driver(dh);
+ }
+
+#if DDLL_SMP
+ erts_ddll_reference_driver(dh);
+ unlock_drv_list();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ lock_drv_list();
+ erts_ddll_dereference_driver(dh);
+#endif
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ p->flags |= F_USING_DDLL;
+ if (monitor > 0) {
+ Eterm mref = add_monitor(p, dh, ERL_DE_PROC_AWAIT_UNLOAD);
+ hp = HAlloc(p,4);
+ t = TUPLE3(hp, am_ok, ok_term, mref);
+ } else {
+ hp = HAlloc(p,3);
+ t = TUPLE2(hp, am_ok, ok_term);
+ }
+ if (kill_ports > 1) {
+ ERTS_BIF_CHK_EXITED(p); /* May be exited by port killing */
+ }
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ BIF_RET(t);
+
+soft_error:
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ hp = HAlloc(p,3);
+ t = TUPLE2(hp, am_error, soft_error_term);
+ BIF_RET(t);
+
+ error: /* No lock fiddling before going here */
+ assert_drv_list_not_locked();
+ if (name != NULL) {
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ }
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ BIF_ERROR(p,BADARG);
+}
+
+
+/*
+ * A shadow of the "real" demonitor BIF
+ */
+BIF_RETTYPE erl_ddll_demonitor_1(Process *p, Eterm ref)
+{
+ if (is_not_internal_ref(ref)) {
+ BIF_ERROR(p, BADARG);
+ }
+ if (p->flags & F_USING_DDLL) {
+ erts_ddll_remove_monitor(p, ref, ERTS_PROC_LOCK_MAIN);
+ }
+ BIF_RET(am_true);
+}
+
+/*
+ * A shadow of the "real" monitor BIF
+ */
+BIF_RETTYPE erl_ddll_monitor_2(Process *p, Eterm dr, Eterm what)
+{
+ if (dr != am_driver) {
+ BIF_ERROR(p,BADARG);
+ }
+ return erts_ddll_monitor_driver(p, what, ERTS_PROC_LOCK_MAIN);
+}
+
+/*
+ * Return list of loaded drivers {ok,[string()]}
+ */
+Eterm erl_ddll_loaded_drivers_0(Process *p)
+{
+ Eterm *hp;
+ int need = 3;
+ Eterm res = NIL;
+ erts_driver_t *drv;
+#if DDLL_SMP
+ lock_drv_list();
+#endif
+ for (drv = driver_list; drv; drv = drv->next) {
+ need += sys_strlen(drv->name)*2+2;
+ }
+ hp = HAlloc(p,need);
+ for (drv = driver_list; drv; drv = drv->next) {
+ Eterm l;
+ l = buf_to_intlist(&hp, drv->name, sys_strlen(drv->name), NIL);
+ res = CONS(hp,l,res);
+ hp += 2;
+ }
+ res = TUPLE2(hp,am_ok,res);
+ /* hp += 3 */
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ BIF_RET(res);
+}
+
+/*
+ * More detailed info about loaded drivers:
+ * item is processes, driver_options, port_count, linked_in_driver,
+ * permanent, awaiting_load, awaiting_unload
+ */
+Eterm erl_ddll_info_2(Process *p, Eterm name_term, Eterm item)
+{
+ char *name = NULL;
+ Eterm res = NIL;
+ erts_driver_t *drv;
+ ProcEntryInfo *pei = NULL;
+ int num_pei;
+ Eterm *hp;
+ int i;
+ Uint filter;
+#if DDLL_SMP
+ int have_lock = 0;
+#endif
+
+ if ((name = pick_list_or_atom(name_term)) == NULL) {
+ goto error;
+ }
+
+ if (!is_atom(item)) {
+ goto error;
+ }
+
+#if DDLL_SMP
+ lock_drv_list();
+ have_lock = 1;
+#endif
+ if ((drv = lookup_driver(name)) == NULL) {
+ goto error;
+ }
+
+ switch (item) {
+ case am_processes:
+ filter = ERL_DE_PROC_LOADED;
+ break;
+ case am_driver_options:
+ if (drv->handle == NULL) {
+ res = am_linked_in_driver;
+ } else {
+ Uint start_flags = drv->handle->flags & ERL_FL_CONSISTENT_MASK;
+ /* Cheating, only one flag for now... */
+ if (start_flags & ERL_DE_FL_KILL_PORTS) {
+ Eterm *myhp;
+ myhp = HAlloc(p,2);
+ res = CONS(myhp,am_kill_ports,NIL);
+ } else {
+ res = NIL;
+ }
+ }
+ goto done;
+ case am_port_count:
+ if (drv->handle == NULL) {
+ res = am_linked_in_driver;
+ } else if (drv->handle->status == ERL_DE_PERMANENT) {
+ res = am_permanent;
+ } else {
+ res = make_small(drv->handle->port_count);
+ }
+ goto done;
+ case am_linked_in_driver:
+ if (drv->handle == NULL){
+ res = am_true;
+ } else {
+ res = am_false;
+ }
+ goto done;
+ case am_permanent:
+ if (drv->handle != NULL && drv->handle->status == ERL_DE_PERMANENT) {
+ res = am_true;
+ } else {
+ res = am_false;
+ }
+ goto done;
+ case am_awaiting_load:
+ filter = ERL_DE_PROC_AWAIT_LOAD;
+ break;
+ case am_awaiting_unload:
+ filter = ERL_DE_PROC_AWAIT_UNLOAD;
+ break;
+ default:
+ goto error;
+ }
+
+ if (drv->handle == NULL) {
+ res = am_linked_in_driver;
+ goto done;
+ } else if (drv->handle->status == ERL_DE_PERMANENT) {
+ res = am_permanent;
+ goto done;
+ }
+ num_pei = build_proc_info(drv->handle, &pei, filter);
+ if (!num_pei) {
+ goto done;
+ }
+ hp = HAlloc(p,num_pei * (2+3));
+ for (i = 0; i < num_pei; ++ i) {
+ Eterm tpl = TUPLE2(hp,pei[i].pid,make_small(pei[i].count));
+ hp += 3;
+ res = CONS(hp,tpl,res);
+ hp += 2;
+ }
+ done:
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ if (pei)
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, pei);
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ BIF_RET(res);
+ error:
+ if (name != NULL) {
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ }
+#if DDLL_SMP
+ if (have_lock) {
+ unlock_drv_list();
+ }
+#endif
+ BIF_ERROR(p,BADARG);
+}
+
+/*
+ * Backend for erl_ddll:format_error, handles all "soft" errors returned by builtins,
+ * possibly by calling the system specific error handler
+ */
+Eterm erl_ddll_format_error_int_1(Process *p, Eterm code_term)
+{
+ char *errstring = NULL;
+ int errint;
+ int len;
+ Eterm ret = NIL;
+ Eterm *hp;
+
+ /* These errors can only appear in the erlang interface, not in the interface provided
+ to drivers... */
+ switch (code_term) {
+ case am_inconsistent:
+ errstring = "Driver name and/or driver options are inconsistent with "
+ "currently loaded driver";
+ break;
+ case am_linked_in_driver:
+ errstring = "Driver is statically linked and "
+ "cannot be loaded/unloaded";
+ break;
+ case am_permanent:
+ errstring = "DDLL driver is permanent an can not be unloaded/loaded";
+ break;
+ case am_not_loaded:
+ errstring = "DDLL driver is not loaded";
+ break;
+ case am_not_loaded_by_this_process:
+ errstring = "DDLL driver was not loaded by this process";
+ break;
+ case am_not_pending:
+ errstring = "DDLL load not pending for this driver name";
+ break;
+ case am_already_loaded:
+ errstring = "DDLL driver is already loaded successfully";
+ break;
+ case am_pending_reload:
+ errstring = "Driver reloading is already pending";
+ break;
+ case am_pending_process:
+ errstring = "Driver is loaded by others when attempting "
+ "option {reload, pending_driver}";
+ break;
+ default:
+ /* A "real" error, we translate the atom to a code and translate the code
+ to a string in the same manner as in the interface provided to drivers... */
+ if (errdesc_to_code(code_term,&errint) != 0) {
+ goto error;
+ }
+#if DDLL_SMP
+ lock_drv_list();
+#endif
+ errstring = erts_ddll_error(errint);
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ break;
+ }
+ if (errstring == NULL) {
+ goto error;
+ }
+ len = sys_strlen(errstring);
+ hp = HAlloc(p, 2 * len);
+ ret = buf_to_intlist(&hp, errstring, len, NIL);
+ BIF_RET(ret);
+ error:
+ BIF_ERROR(p,BADARG);
+}
+
+void erts_ddll_init(void)
+{
+ erl_sys_ddll_init();
+}
+
+/* Return value as a bif, called by erlang:monitor */
+Eterm erts_ddll_monitor_driver(Process *p,
+ Eterm description,
+ ErtsProcLocks plocks)
+{
+ Eterm *tp;
+ Eterm ret;
+ char *name;
+
+ if (is_not_tuple(description)) {
+ BIF_ERROR(p,BADARG);
+ }
+ tp = tuple_val(description);
+ if (*tp != make_arityval(2)) {
+ BIF_ERROR(p,BADARG);
+ }
+ if ((name = pick_list_or_atom(tp[1])) == NULL) {
+ BIF_ERROR(p,BADARG);
+ }
+ switch (tp[2]) {
+ case am_loaded:
+ ERTS_BIF_PREP_RET(ret, notify_when_loaded(p,tp[1],name,plocks));
+ break;
+ case am_unloaded:
+ ERTS_BIF_PREP_RET(ret, notify_when_unloaded(p,tp[1],name,plocks,
+ ERL_DE_PROC_AWAIT_UNLOAD));
+ break;
+ case am_unloaded_only:
+ ERTS_BIF_PREP_RET(ret,
+ notify_when_unloaded(p,tp[1],name,plocks,
+ ERL_DE_PROC_AWAIT_UNLOAD_ONLY));
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(ret,p,BADARG);
+ break;
+ }
+
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ return ret;
+}
+
+void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks)
+{
+ erts_driver_t *drv;
+ erts_smp_proc_unlock(p, plocks);
+ lock_drv_list();
+ drv = driver_list;
+ while (drv != NULL) {
+ if (drv->handle != NULL && drv->handle->status != ERL_DE_PERMANENT) {
+ DE_ProcEntry **pe = &(drv->handle->procs);
+ while ((*pe) != NULL) {
+ if ((*pe)->proc == p &&
+ ((*pe)->awaiting_status == ERL_DE_PROC_AWAIT_LOAD ||
+ (*pe)->awaiting_status == ERL_DE_PROC_AWAIT_UNLOAD ||
+ (*pe)->awaiting_status ==
+ ERL_DE_PROC_AWAIT_UNLOAD_ONLY) &&
+ eq(make_internal_ref(&((*pe)->heap)),ref)) {
+ DE_ProcEntry *r = *pe;
+ *pe = r->next;
+ erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) r);
+ goto done;
+ }
+ pe = &((*pe)->next);
+ }
+ }
+ drv = drv->next;
+ }
+ done:
+ unlock_drv_list();
+ erts_smp_proc_lock(p, plocks);
+}
+
+/*
+ * Called from erl_process.c.
+ */
+void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
+{
+ erts_driver_t *drv;
+ erts_smp_proc_unlock(p, plocks);
+ lock_drv_list();
+ drv = driver_list;
+ while (drv != NULL) {
+ if (drv->handle != NULL && drv->handle->status != ERL_DE_PERMANENT) {
+ DE_ProcEntry **pe = &(drv->handle->procs);
+ int kill_ports = (drv->handle->flags & ERL_DE_FL_KILL_PORTS);
+ int left = 0;
+ while ((*pe) != NULL) {
+ if ((*pe)->proc == p) {
+ DE_ProcEntry *r = *pe;
+ *pe = r->next;
+ if (!(r->flags & ERL_DE_FL_DEREFERENCED) &&
+ r->awaiting_status == ERL_DE_PROC_LOADED) {
+ erts_ddll_dereference_driver(drv->handle);
+ }
+ erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) r);
+ } else {
+ if ((*pe)->awaiting_status == ERL_DE_PROC_LOADED) {
+ ++left;
+ }
+ pe = &((*pe)->next);
+ }
+ }
+ if (!left) {
+ DE_Handle *dh = drv->handle;
+ if (dh->status == ERL_DE_RELOAD ||
+ dh->status == ERL_DE_FORCE_RELOAD) {
+ notify_all(dh, drv->name,
+ ERL_DE_PROC_AWAIT_LOAD, am_DOWN, am_load_cancelled);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_full_path);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE,dh->reload_driver_name);
+ dh->reload_full_path = dh->reload_driver_name = NULL;
+ dh->reload_flags = 0;
+ }
+ dh->status = ERL_DE_UNLOAD;
+ }
+ if (!left && drv->handle->port_count > 0) {
+ if (kill_ports) {
+ int j;
+ DE_Handle *dh = drv->handle;
+ erts_ddll_reference_driver(dh);
+ dh->status = ERL_DE_FORCE_UNLOAD;
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ for (j = 0; j < erts_max_ports; j++) {
+ Port* prt = &erts_port[j];
+#if DDLL_SMP
+ erts_smp_port_state_lock(prt);
+#endif
+ if (!(prt->status & FREE_PORT_FLAGS) &&
+ prt->drv_ptr->handle == dh) {
+#if DDLL_SMP
+ erts_smp_atomic_inc(&prt->refc);
+ while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
+ erts_smp_port_state_unlock(prt);
+ erts_smp_port_state_lock(prt);
+ }
+ erts_smp_port_state_unlock(prt);
+ erts_smp_mtx_lock(prt->lock);
+ if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) {
+ driver_failure_atom(j, "driver_unloaded");
+ }
+#else
+ driver_failure_atom(j, "driver_unloaded");
+#endif
+ erts_port_release(prt);
+ }
+ else erts_smp_port_state_unlock(prt);
+ }
+#if DDLL_SMP
+ lock_drv_list(); /* Needed for future list operations */
+#endif
+ drv = drv->next; /* before allowing destruction */
+ erts_ddll_dereference_driver(dh);
+ } else {
+ drv = drv->next;
+ }
+ } else {
+ drv = drv->next;
+ }
+ } else {
+ drv = drv->next;
+ }
+ }
+ unlock_drv_list();
+ erts_smp_proc_lock(p, plocks);
+}
+void erts_ddll_lock_driver(DE_Handle *dh, char *name)
+{
+ DE_ProcEntry *p,*q;
+ assert_drv_list_locked();
+ notify_all(dh, name,
+ ERL_DE_PROC_AWAIT_LOAD, am_UP, am_permanent);
+ notify_all(dh, name,
+ ERL_DE_PROC_AWAIT_UNLOAD, am_UP, am_permanent);
+ notify_all(dh, name,
+ ERL_DE_PROC_AWAIT_UNLOAD_ONLY, am_UP, am_permanent);
+
+ p = dh->procs;
+ while(p != NULL) {
+ q = p;
+ p = p->next;
+ erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) q);
+ }
+ dh->procs = NULL;
+ erts_ddll_reference_driver(dh);
+ dh->status = ERL_DE_PERMANENT;
+}
+
+
+void erts_ddll_increment_port_count(DE_Handle *dh)
+{
+ assert_drv_list_locked();
+ dh->port_count++;
+}
+
+void erts_ddll_decrement_port_count(DE_Handle *dh)
+{
+ assert_drv_list_locked();
+ ASSERT(dh->port_count > 0);
+ dh->port_count--;
+}
+
+static void first_ddll_reference(DE_Handle *dh)
+{
+ assert_drv_list_locked();
+ erts_refc_init(&(dh->refc),1);
+}
+
+void erts_ddll_reference_driver(DE_Handle *dh)
+{
+ assert_drv_list_locked();
+ if (erts_refc_inctest(&(dh->refc),1) == 1) {
+ erts_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */
+ }
+}
+
+void erts_ddll_reference_referenced_driver(DE_Handle *dh)
+{
+ erts_refc_inc(&(dh->refc),2);
+}
+
+void erts_ddll_dereference_driver(DE_Handle *dh)
+{
+ if (erts_refc_dectest(&(dh->refc),0) == 0) {
+ /* No lock here, but if the driver is referenced again,
+ the scheduled deletion is added as a reference too, see above */
+ erts_schedule_misc_op(ddll_no_more_references, (void *) dh);
+ }
+}
+static void dereference_all_processes(DE_Handle *dh)
+{
+ DE_ProcEntry *p;
+ assert_drv_list_locked();
+ for(p = dh->procs;p != NULL; p = p->next) {
+ if (p->awaiting_status == ERL_DE_PROC_LOADED) {
+ ASSERT(!(p->flags & ERL_DE_FL_DEREFERENCED));
+ erts_ddll_dereference_driver(dh);
+ p->flags |= ERL_DE_FL_DEREFERENCED;
+ }
+ }
+}
+
+static void restore_process_references(DE_Handle *dh)
+{
+ DE_ProcEntry *p;
+ assert_drv_list_locked();
+ ASSERT(erts_refc_read(&(dh->refc),0) == 0);
+ for(p = dh->procs;p != NULL; p = p->next) {
+ if (p->awaiting_status == ERL_DE_PROC_LOADED) {
+ ASSERT(p->flags & ERL_DE_FL_DEREFERENCED);
+ erts_refc_inc(&(dh->refc),1);
+ p->flags &= ~ERL_DE_FL_DEREFERENCED;
+ }
+ }
+}
+
+
+int erts_ddll_driver_ok(DE_Handle *dh)
+{
+ assert_drv_list_locked();
+ return ((dh == NULL) || (dh->status != ERL_DE_FORCE_UNLOAD &&
+ dh->status != ERL_DE_FORCE_RELOAD));
+}
+
+
+static void ddll_no_more_references(void *vdh)
+{
+ DE_Handle *dh = (DE_Handle *) vdh;
+ int x;
+
+ lock_drv_list();
+
+ x = erts_refc_read(&(dh->refc),0);
+ if (x > 0) {
+ x = erts_refc_dectest(&(dh->refc),0); /* delete the reference added for me */
+ }
+
+
+ if (x == 0) {
+ DE_ProcEntry **p = &(dh->procs);
+ Eterm save_driver_name = am_undefined;
+ ASSERT(dh->status != ERL_DE_OK);
+ do_unload_driver_entry(dh,&save_driver_name);
+ while (*p != NULL) {
+ DE_ProcEntry *q;
+ if ((*p)->awaiting_status == ERL_DE_PROC_AWAIT_UNLOAD ||
+ (*p)->awaiting_status == ERL_DE_PROC_AWAIT_UNLOAD_ONLY) {
+ notify_proc((*p)->proc,
+ make_internal_ref(&((*p)->heap)),
+ save_driver_name,am_DOWN,am_unloaded, 0);
+ q = *p;
+ *p = q->next;
+ erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) q);
+ } else {
+ ASSERT(dh->status == ERL_DE_RELOAD ||
+ dh->status == ERL_DE_FORCE_RELOAD);
+ p = &((*p)->next);
+ }
+ }
+
+ if (dh->status == ERL_DE_UNLOAD || dh->status == ERL_DE_FORCE_UNLOAD) {
+ ASSERT(dh->full_path != NULL);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh->full_path);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh);
+ } else { /* ERL_DE_RELOAD || ERL_DE_FORCE_RELOAD */
+ int reload_res =
+ reload_driver_entry(dh);
+ p = &(dh->procs);
+ while (*p != NULL) {
+ DE_ProcEntry *q;
+ if ((*p)->awaiting_status == ERL_DE_PROC_AWAIT_LOAD) {
+ if (reload_res == 0) {
+ notify_proc((*p)->proc,
+ make_internal_ref(&((*p)->heap)),
+ save_driver_name, am_UP, am_loaded, 0);
+ } else {
+ notify_proc((*p)->proc,
+ make_internal_ref(&((*p)->heap)),
+ save_driver_name, am_DOWN, am_load_failure, reload_res);
+ }
+ q = *p;
+ *p = q->next;
+ erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) q);
+ } else {
+ if (reload_res != 0) {
+ DE_ProcEntry *q = *p;
+ *p = q->next;
+ erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) q);
+ } else {
+ p = &((*p)->next);
+ }
+ }
+ }
+ if (reload_res != 0) {
+ ASSERT(dh->full_path == NULL);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh);
+ }
+ }
+ }
+ unlock_drv_list();
+}
+
+char *erts_ddll_error(int code) {
+ switch (code) {
+ case ERL_DE_NO_ERROR:
+ return "No error";
+ case ERL_DE_LOAD_ERROR_NO_INIT:
+ return "No driver init in dynamic library";
+ case ERL_DE_LOAD_ERROR_FAILED_INIT:
+ return "Driver init failed";
+ case ERL_DE_LOAD_ERROR_BAD_NAME:
+ return "Bad driver name";
+ case ERL_DE_LOAD_ERROR_NAME_TO_LONG:
+ return "Driver name to long";
+ case ERL_DE_LOAD_ERROR_INCORRECT_VERSION:
+ return "Driver compiled with incorrect version of erl_driver.h";
+ case ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY:
+ return "DDLL functionality not available on this platform";
+ case ERL_DE_ERROR_UNSPECIFIED:
+ return "Unspecified dynamic library error";
+ case ERL_DE_LOOKUP_ERROR_NOT_FOUND:
+ return "Symbol not found in dynamic library";
+ default:
+ return erts_sys_ddll_error(code);
+ }
+}
+
+/*
+ * Utilities
+ */
+static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsProcLocks plocks)
+{
+ Eterm r = NIL;
+ Eterm immediate_tag = NIL;
+ Eterm immediate_type = NIL;
+ erts_driver_t *drv;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
+#if DDLL_SMP
+ lock_drv_list();
+#endif
+ if ((drv = lookup_driver(name)) == NULL) {
+ immediate_tag = am_unloaded;
+ immediate_type = am_DOWN;
+ goto immediate;
+ }
+ if (drv->handle == NULL || drv->handle->status == ERL_DE_PERMANENT) {
+ immediate_tag = am_permanent;
+ immediate_type = am_UP;
+ goto immediate;
+ }
+
+ switch (drv->handle->status) {
+ case ERL_DE_OK:
+ immediate_tag = am_loaded;
+ immediate_type = am_UP;
+ goto immediate;
+ case ERL_DE_UNLOAD:
+ case ERL_DE_FORCE_UNLOAD:
+ immediate_tag = am_load_cancelled;
+ immediate_type = am_DOWN;
+ goto immediate;
+ case ERL_DE_RELOAD:
+ case ERL_DE_FORCE_RELOAD:
+ break;
+ default:
+ erl_exit(1,"Internal error, unknown state %u in dynamic driver.", drv->handle->status);
+ }
+ p->flags |= F_USING_DDLL;
+ r = add_monitor(p, drv->handle, ERL_DE_PROC_AWAIT_LOAD);
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ BIF_RET(r);
+ immediate:
+ r = erts_make_ref(p);
+#if DDLL_SMP
+ erts_smp_proc_unlock(p, plocks);
+#endif
+ notify_proc(p, r, name_term, immediate_type, immediate_tag, 0);
+#if DDLL_SMP
+ unlock_drv_list();
+ erts_smp_proc_lock(p, plocks);
+#endif
+ BIF_RET(r);
+}
+
+static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsProcLocks plocks, Uint flag)
+{
+ Eterm r = NIL;
+ Eterm immediate_tag = NIL;
+ Eterm immediate_type = NIL;
+ erts_driver_t *drv;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
+#if DDLL_SMP
+ lock_drv_list();
+#endif
+ if ((drv = lookup_driver(name)) == NULL) {
+ immediate_tag = am_unloaded;
+ immediate_type = am_DOWN;
+ goto immediate;
+ }
+ if (drv->handle == NULL || drv->handle->status == ERL_DE_PERMANENT) {
+ immediate_tag = am_permanent;
+ immediate_type = am_UP;
+ goto immediate;
+ }
+
+ p->flags |= F_USING_DDLL;
+ r = add_monitor(p, drv->handle, flag);
+#if DDLL_SMP
+ unlock_drv_list();
+#endif
+ BIF_RET(r);
+ immediate:
+ r = erts_make_ref(p);
+#if DDLL_SMP
+ erts_smp_proc_unlock(p, plocks);
+#endif
+ notify_proc(p, r, name_term, immediate_type, immediate_tag, 0);
+#if DDLL_SMP
+ unlock_drv_list();
+ erts_smp_proc_lock(p, plocks);
+#endif
+ BIF_RET(r);
+}
+
+
+static int is_last_user(DE_Handle *dh, Process *proc) {
+ DE_ProcEntry *p = dh->procs;
+ int found = 0;
+
+ assert_drv_list_locked();
+
+ while (p != NULL) {
+ if (p->proc == proc && p->awaiting_status == ERL_DE_PROC_LOADED) {
+ if (found == 0) {
+ found = 1;
+ } else {
+ return 0;
+ }
+ } else if (p->awaiting_status == ERL_DE_PROC_LOADED) {
+ return 0;
+ }
+ p = p->next;
+ }
+ return found;
+}
+
+static DE_ProcEntry *find_proc_entry(DE_Handle *dh, Process *proc, Uint status)
+{
+ DE_ProcEntry *p = dh->procs;
+
+ assert_drv_list_locked();
+
+ while (p != NULL) {
+ if (p->proc == proc && p->awaiting_status == status) {
+ return p;
+ }
+ p = p->next;
+ }
+ return NULL;
+}
+
+static void remove_proc_entry(DE_Handle *dh, DE_ProcEntry *pe)
+{
+ DE_ProcEntry **p = &(dh->procs);
+
+ while (*p != NULL && *p != pe) {
+ p = &((*p)->next);
+ }
+ if ((*p) != NULL) {
+ *p = (*p)->next;
+ }
+}
+
+static int num_procs(DE_Handle *dh, Uint status) {
+ DE_ProcEntry *p = dh->procs;
+ int i = 0;
+
+ assert_drv_list_locked();
+
+ while (p != NULL) {
+ if (p->awaiting_status == status) {
+ ++i;
+ }
+ p = p->next;
+ }
+ return i;
+}
+/*
+static int num_entries(DE_Handle *dh, Process *proc, Uint status) {
+ DE_ProcEntry *p = dh->procs;
+ int i = 0;
+
+ assert_drv_list_locked();
+ while (p != NULL) {
+ if (p->awaiting_status == status && p->proc == proc) {
+ ++i;
+ }
+ p = p->next;
+ }
+ return i;
+}
+*/
+static void add_proc_loaded(DE_Handle *dh, Process *proc)
+{
+ DE_ProcEntry *p;
+ assert_drv_list_locked();
+ p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
+ p->proc = proc;
+ p->flags = 0;
+ p->awaiting_status = ERL_DE_PROC_LOADED;
+ p->next = dh->procs;
+ dh->procs = p;
+}
+
+static void add_proc_loaded_deref(DE_Handle *dh, Process *proc)
+{
+ DE_ProcEntry *p;
+ assert_drv_list_locked();
+ p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
+ p->proc = proc;
+ p->awaiting_status = ERL_DE_PROC_LOADED;
+ p->flags = ERL_DE_FL_DEREFERENCED;
+ p->next = dh->procs;
+ dh->procs = p;
+}
+
+static Eterm copy_ref(Eterm ref, Eterm *hp)
+{
+ RefThing *ptr = ref_thing_ptr(ref);
+ memcpy(hp, ptr, sizeof(RefThing));
+ return (make_internal_ref(hp));
+}
+
+static void add_proc_waiting(DE_Handle *dh, Process *proc,
+ Uint status, Eterm ref)
+{
+ DE_ProcEntry *p;
+ assert_drv_list_locked();
+ p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
+ p->proc = proc;
+ p->flags = 0;
+ p->awaiting_status = status;
+ copy_ref(ref, p->heap);
+ p->next = dh->procs;
+ dh->procs = p;
+}
+
+static Eterm add_monitor(Process *p, DE_Handle *dh, Uint status)
+{
+ Eterm r;
+
+ assert_drv_list_locked();
+ r = erts_make_ref(p);
+ add_proc_waiting(dh, p, status, r);
+ return r;
+}
+
+
+static void set_driver_reloading(DE_Handle *dh, Process *proc, char *path, char *name, Uint flags)
+{
+ DE_ProcEntry *p;
+
+ assert_drv_list_locked();
+ p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
+ p->proc = proc;
+ p->awaiting_status = ERL_DE_OK;
+ p->next = dh->procs;
+ p->flags = ERL_DE_FL_DEREFERENCED;
+ dh->procs = p;
+ dh->status = ERL_DE_RELOAD;
+ dh->reload_full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
+ strcpy(dh->reload_full_path,path);
+ dh->reload_driver_name = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(name) + 1);
+ strcpy(dh->reload_driver_name,name);
+ dh->reload_flags = flags;
+}
+
+static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
+{
+ void *init_handle;
+ int res;
+ ErlDrvEntry *dp;
+
+ assert_drv_list_locked();
+
+ if ((res = erts_sys_ddll_open(path, &(dh->handle))) != ERL_DE_NO_ERROR) {
+ return res;
+ }
+
+ if ((res = erts_sys_ddll_load_driver_init(dh->handle,
+ &init_handle)) != ERL_DE_NO_ERROR) {
+ erts_sys_ddll_close(dh->handle);
+ return ERL_DE_LOAD_ERROR_NO_INIT;
+ }
+
+ dp = erts_sys_ddll_call_init(init_handle);
+ if (dp == NULL) {
+ erts_sys_ddll_close(dh->handle);
+ return ERL_DE_LOAD_ERROR_FAILED_INIT;
+ }
+
+ switch (dp->extended_marker) {
+ case 0:
+ /*
+ * This may be an old driver that has been recompiled. If so,
+ * at least the fields that existed in extended driver version
+ * 1.0 should be zero. If not, a it is a bad driver. We cannot
+ * be completely certain that this is a valid driver but this is
+ * the best we can do with old drivers...
+ */
+ if (dp->major_version != 0
+ || dp->minor_version != 0
+ || dp->driver_flags != 0
+ || dp->handle2 != NULL
+ || dp->process_exit != NULL) {
+ /* Old driver; needs to be recompiled... */
+ return ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ }
+ break;
+ case ERL_DRV_EXTENDED_MARKER:
+ if (ERL_DRV_EXTENDED_MAJOR_VERSION != dp->major_version
+ || ERL_DRV_EXTENDED_MINOR_VERSION < dp->minor_version) {
+ /* Incompatible driver version */
+ return ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ }
+ break;
+ default:
+ /* Old driver; needs to be recompiled... */
+ return ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
+ }
+
+ if (strcmp(name, dp->driver_name) != 0) {
+ erts_sys_ddll_close(dh->handle);
+ return ERL_DE_LOAD_ERROR_BAD_NAME;
+ }
+ erts_smp_atomic_init(&(dh->refc), (long) 0);
+ dh->port_count = 0;
+ dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
+ sys_strcpy(dh->full_path, path);
+ dh->flags = 0;
+ dh->status = ERL_DE_OK;
+
+ if (erts_add_driver_entry(dp, dh, 1) != 0 /* io.c */) {
+ /*
+ * The init in the driver struct did not return 0
+ */
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, dh->full_path);
+ dh->full_path = NULL;
+ erts_sys_ddll_close(dh->handle);
+ return ERL_DE_LOAD_ERROR_FAILED_INIT;
+ }
+
+ return ERL_DE_NO_ERROR;
+}
+
+static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
+{
+ erts_driver_t *q, *p = driver_list;
+
+ assert_drv_list_locked();
+
+ while (p != NULL) {
+ if (p->handle == dh) {
+
+ q = p;
+ if (p->prev == NULL) {
+ driver_list = p->next;
+ } else {
+ p->prev->next = p->next;
+ }
+ if (p->next != NULL) {
+ p->next->prev = p->prev;
+ }
+
+ if (save_name != NULL) {
+ *save_name = mkatom(q->name);
+ }
+ /* XXX:PaN Future locking problems? Don't dare to let go of the diver_list lock here!*/
+ if (q->finish) {
+ int fpe_was_unmasked = erts_block_fpe();
+ (*(q->finish))();
+ erts_unblock_fpe(fpe_was_unmasked);
+ }
+ erts_sys_ddll_close(dh->handle);
+ erts_destroy_driver(q);
+ return 1;
+ }
+ p = p->next;
+ }
+ return 0;
+}
+
+static int load_driver_entry(DE_Handle **dhp, char *path, char *name)
+{
+ int res;
+ DE_Handle *dh = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sizeof(DE_Handle));
+
+ assert_drv_list_locked();
+
+ dh->handle = NULL;
+ dh->procs = NULL;
+ dh->port_count = 0;
+ erts_refc_init(&(dh->refc), (long) 0);
+ dh->status = -1;
+ dh->reload_full_path = NULL;
+ dh->reload_driver_name = NULL;
+ dh->reload_flags = 0;
+ dh->full_path = NULL;
+ dh->flags = 0;
+
+ if ((res = do_load_driver_entry(dh, path, name)) != ERL_DE_NO_ERROR) {
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh);
+ dh = NULL;
+ }
+ *dhp = dh;
+ return res;
+}
+
+#if 0
+static void unload_driver_entry(DE_Handle *dh)
+{
+ do_unload_driver_entry(dh, NULL);
+ if (dh->full_path != NULL) {
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh->full_path);
+ }
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh);
+}
+#endif
+static int reload_driver_entry(DE_Handle *dh)
+{
+ char *path = dh->reload_full_path;
+ char *name = dh->reload_driver_name;
+ int loadres;
+ Uint flags = dh->reload_flags;
+
+ assert_drv_list_locked();
+
+ dh->reload_full_path = NULL;
+ dh->reload_driver_name = NULL;
+
+ ASSERT(erts_refc_read(&(dh->refc),0) == 0);
+ ASSERT(dh->full_path != NULL);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh->full_path);
+ dh->full_path = NULL;
+
+ loadres = do_load_driver_entry(dh, path, name);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) path);
+ erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) name);
+ if (loadres == ERL_DE_NO_ERROR) {
+ dh->status = ERL_DE_OK;
+ dh->flags = flags;
+ }
+ restore_process_references(dh);
+ return loadres;
+}
+
+/*
+ * Notification {tag = atom(), ref = ref(), driver_name = atom()} or
+ * {'$DDLL_load_failure', ref = ref(), driver_name = atom(),
+ * error_term = atom() | {system_error, int()}}
+ */
+
+static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
+ Eterm tag, int errcode)
+{
+ Eterm mess;
+ Eterm r;
+ Eterm *hp;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+ ErtsProcLocks rp_locks = 0;
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ assert_drv_list_locked();
+ if (errcode != 0) {
+ int need = load_error_need(errcode);
+ Eterm e;
+ hp = erts_alloc_message_heap(6 /* tuple */ + 3 /* Error tuple */ +
+ REF_THING_SIZE + need, &bp, &ohp,
+ proc, &rp_locks);
+ r = copy_ref(ref,hp);
+ hp += REF_THING_SIZE;
+ e = build_load_error_hp(hp, errcode);
+ hp += need;
+ mess = TUPLE2(hp,tag,e);
+ hp += 3;
+ mess = TUPLE5(hp,type,r,am_driver,driver_name,mess);
+ } else {
+ hp = erts_alloc_message_heap(6 /* tuple */ + REF_THING_SIZE, &bp, &ohp, proc, &rp_locks);
+ r = copy_ref(ref,hp);
+ hp += REF_THING_SIZE;
+ mess = TUPLE5(hp,type,r,am_driver,driver_name,tag);
+ }
+ erts_queue_message(proc, &rp_locks, bp, mess, am_undefined);
+ erts_smp_proc_unlock(proc, rp_locks);
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+}
+
+static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Eterm tag)
+{
+ DE_ProcEntry **p;
+
+ assert_drv_list_locked();
+
+ p = &(dh->procs);
+ while (*p != NULL) {
+ if ((*p)->awaiting_status == awaiting) {
+ DE_ProcEntry *pe;
+ pe = *p;
+ *p = pe->next;
+ notify_proc(pe->proc, make_internal_ref(&(pe->heap)), mkatom(name), type, tag, 0);
+ erts_free(ERTS_ALC_T_DDLL_PROCESS, (void *) pe);
+ } else {
+ p = &((*p)->next);
+ }
+ }
+}
+
+
+
+typedef struct errcode_entry {
+ char *atm;
+ int code;
+} ErrcodeEntry;
+
+static ErrcodeEntry errcode_tab[] = {
+ {"no_error", ERL_DE_NO_ERROR},
+ {"no_driver_init", ERL_DE_LOAD_ERROR_NO_INIT},
+ {"driver_init_failed", ERL_DE_LOAD_ERROR_FAILED_INIT},
+ {"bad_driver_name", ERL_DE_LOAD_ERROR_BAD_NAME},
+ {"driver_name_to_long", ERL_DE_LOAD_ERROR_NAME_TO_LONG},
+ {"driver_incorrect_version", ERL_DE_LOAD_ERROR_INCORRECT_VERSION},
+ {"no_ddll_available", ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY},
+ {"unspecified_error", ERL_DE_ERROR_UNSPECIFIED},
+ {"symbol_not_found", ERL_DE_LOOKUP_ERROR_NOT_FOUND},
+ {NULL,0}
+};
+
+static int errdesc_to_code(Eterm errdesc, int *code /* out */)
+{
+ int i;
+ if (is_atom(errdesc)) {
+ Atom *ap = atom_tab(atom_val(errdesc));
+ for (i = 0; errcode_tab[i].atm != NULL; ++i) {
+ int len = sys_strlen(errcode_tab[i].atm);
+ if (len == ap->len &&
+ !sys_strncmp(errcode_tab[i].atm,(char *) ap->name,len)) {
+ *code = errcode_tab[i].code;
+ return 0;
+ }
+ }
+ return -1;
+ } else if (is_tuple(errdesc)) {
+ Eterm *tp = tuple_val(errdesc);
+ if (*tp != make_arityval(2) || tp[1] != am_open_error || is_not_small(tp[2])) {
+ return -1;
+ }
+ *code = signed_val(tp[2]);
+ return 0;
+ }
+ return -1;
+}
+
+static Eterm build_load_error(Process *p, int code)
+{
+ int need = load_error_need(code);
+ Eterm *hp = NULL;
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ if (need) {
+ hp = HAlloc(p,need);
+ }
+ return build_load_error_hp(hp,code);
+}
+
+static int load_error_need(int code)
+{
+ ErrcodeEntry *ee = errcode_tab;
+ while (ee->atm != NULL) {
+ if (ee->code == code) {
+ return 0;
+ }
+ ++ee;
+ }
+ return 3;
+}
+
+static Eterm build_load_error_hp(Eterm *hp, int code)
+{
+ ErrcodeEntry *ee = errcode_tab;
+ while (ee->atm != NULL) {
+ if (ee->code == code) {
+ return mkatom(ee->atm);
+ }
+ ++ee;
+ }
+ return TUPLE2(hp,am_open_error, make_small(code));
+}
+
+
+
+static Eterm mkatom(char *str)
+{
+ return am_atom_put(str, sys_strlen(str));
+}
+
+static char *pick_list_or_atom(Eterm name_term)
+{
+ char *name = NULL;
+ int name_len;
+ if (is_atom(name_term)) {
+ Atom *ap = atom_tab(atom_val(name_term));
+ if (ap->len == 0) {
+ /* If io_lists with zero length is not allowed,
+ then the empty atom shouldn't */
+ goto error;
+ }
+ name = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, ap->len + 1);
+ memcpy(name,ap->name,ap->len);
+ name[ap->len] = '\0';
+ } else {
+ name_len = io_list_len(name_term);
+ if (name_len <= 0) {
+ goto error;
+ }
+ name = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, name_len + 1);
+ if (io_list_to_buf(name_term, name, name_len) != 0) {
+ goto error;
+ }
+ name[name_len] = '\0';
+ }
+ return name;
+ error:
+ if (name != NULL) {
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
+ }
+ return NULL;
+}
+
+static int build_proc_info(DE_Handle *dh, ProcEntryInfo **out_pei, Uint filter)
+{
+ ProcEntryInfo *pei = NULL;
+ int num_pei = 0;
+ int num_pei_allocated = 0;
+ int i;
+ DE_ProcEntry *pe;
+
+ assert_drv_list_locked();
+
+ for (pe = dh->procs; pe != NULL; pe = pe->next) {
+ Eterm id = pe->proc->id;
+ Uint stat = pe->awaiting_status;
+ if (stat == ERL_DE_PROC_AWAIT_UNLOAD_ONLY) {
+ stat = ERL_DE_PROC_AWAIT_UNLOAD;
+ }
+ if (stat != filter) {
+ continue;
+ }
+ for (i = 0; i < num_pei; ++i) {
+ if (pei[i].pid == id && pei[i].status == stat) {
+ break;
+ }
+ }
+ if (i < num_pei) {
+ pei[i].count++;
+ } else {
+ if (num_pei >= num_pei_allocated) {
+ pei = (pei == NULL)
+ ? erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF,
+ sizeof(ProcEntryInfo) * (num_pei_allocated = 10))
+ : erts_realloc(ERTS_ALC_T_DDLL_TMP_BUF, pei,
+ sizeof(ProcEntryInfo) * (num_pei_allocated += 10));
+ }
+ pei[num_pei].pid = id;
+ pei[num_pei].proc = pe->proc;
+ pei[num_pei].status = stat;
+ pei[num_pei].count = 1;
+ ++num_pei;
+ }
+ }
+ *out_pei = pei;
+ return num_pei;
+}
+
+
+
+static erts_driver_t *lookup_driver(char *name)
+{
+ erts_driver_t *drv;
+ assert_drv_list_locked();
+ for (drv = driver_list; drv != NULL && strcmp(drv->name, name); drv = drv->next)
+ ;
+ return drv;
+}
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
new file mode 100644
index 0000000000..8b47db10dd
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -0,0 +1,628 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Numeric guard BIFs.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_binary.h"
+
+static Eterm gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live);
+
+static Eterm double_to_integer(Process* p, double x);
+
+/*
+ * Guard BIFs called using apply/3 and guard BIFs that never build
+ * anything on the heap.
+ */
+
+BIF_RETTYPE abs_1(BIF_ALIST_1)
+{
+ Eterm res;
+ Sint i0, i;
+ Eterm* hp;
+
+ /* integer arguments */
+ if (is_small(BIF_ARG_1)) {
+ i0 = signed_val(BIF_ARG_1);
+ i = labs(i0);
+ if (i0 == MIN_SMALL) {
+ hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(i, hp));
+ } else {
+ BIF_RET(make_small(i));
+ }
+ } else if (is_big(BIF_ARG_1)) {
+ if (!big_sign(BIF_ARG_1)) {
+ BIF_RET(BIF_ARG_1);
+ } else {
+ int sz = big_arity(BIF_ARG_1) + 1;
+ Uint* x;
+
+ hp = HAlloc(BIF_P, sz); /* See note at beginning of file */
+ sz--;
+ res = make_big(hp);
+ x = big_val(BIF_ARG_1);
+ *hp++ = make_pos_bignum_header(sz);
+ x++; /* skip thing */
+ while(sz--)
+ *hp++ = *x++;
+ BIF_RET(res);
+ }
+ } else if (is_float(BIF_ARG_1)) {
+ FloatDef f;
+
+ GET_DOUBLE(BIF_ARG_1, f);
+ if (f.fd < 0.0) {
+ hp = HAlloc(BIF_P, FLOAT_SIZE_OBJECT);
+ f.fd = fabs(f.fd);
+ res = make_float(hp);
+ PUT_DOUBLE(f, hp);
+ BIF_RET(res);
+ }
+ else
+ BIF_RET(BIF_ARG_1);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE float_1(BIF_ALIST_1)
+{
+ Eterm res;
+ Eterm* hp;
+ FloatDef f;
+
+ /* check args */
+ if (is_not_integer(BIF_ARG_1)) {
+ if (is_float(BIF_ARG_1)) {
+ BIF_RET(BIF_ARG_1);
+ } else {
+ badarg:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ }
+ if (is_small(BIF_ARG_1)) {
+ Sint i = signed_val(BIF_ARG_1);
+ f.fd = i; /* use "C"'s auto casting */
+ } else if (big_to_double(BIF_ARG_1, &f.fd) < 0) {
+ goto badarg;
+ }
+ hp = HAlloc(BIF_P, FLOAT_SIZE_OBJECT);
+ res = make_float(hp);
+ PUT_DOUBLE(f, hp);
+ BIF_RET(res);
+}
+
+BIF_RETTYPE trunc_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ /* check arg */
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ /* get the float */
+ GET_DOUBLE(BIF_ARG_1, f);
+
+ /* truncate it and return the resultant integer */
+ res = double_to_integer(BIF_P, (f.fd >= 0.0) ? floor(f.fd) : ceil(f.fd));
+ BIF_RET(res);
+}
+
+BIF_RETTYPE round_1(BIF_ALIST_1)
+{
+ Eterm res;
+ FloatDef f;
+
+ /* check arg */
+ if (is_not_float(BIF_ARG_1)) {
+ if (is_integer(BIF_ARG_1))
+ BIF_RET(BIF_ARG_1);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ /* get the float */
+ GET_DOUBLE(BIF_ARG_1, f);
+
+ /* round it and return the resultant integer */
+ res = double_to_integer(BIF_P, (f.fd > 0.0) ? f.fd + 0.5 : f.fd - 0.5);
+ BIF_RET(res);
+}
+
+BIF_RETTYPE length_1(BIF_ALIST_1)
+{
+ Eterm list;
+ Uint i;
+
+ if (is_nil(BIF_ARG_1))
+ BIF_RET(SMALL_ZERO);
+ if (is_not_list(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ list = BIF_ARG_1;
+ i = 0;
+ while (is_list(list)) {
+ i++;
+ list = CDR(list_val(list));
+ }
+ if (is_not_nil(list)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(make_small(i));
+}
+
+/* returns the size of a tuple or a binary */
+
+BIF_RETTYPE size_1(BIF_ALIST_1)
+{
+ if (is_tuple(BIF_ARG_1)) {
+ Eterm* tupleptr = tuple_val(BIF_ARG_1);
+
+ BIF_RET(make_small(arityval(*tupleptr)));
+ } else if (is_binary(BIF_ARG_1)) {
+ Uint sz = binary_size(BIF_ARG_1);
+ if (IS_USMALL(0, sz)) {
+ return make_small(sz);
+ } else {
+ Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(sz, hp));
+ }
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/**********************************************************************/
+/* returns the bitsize of a bitstring */
+
+BIF_RETTYPE bit_size_1(BIF_ALIST_1)
+{
+ Uint low_bits;
+ Uint bytesize;
+ Uint high_bits;
+ if (is_binary(BIF_ARG_1)) {
+ bytesize = binary_size(BIF_ARG_1);
+ high_bits = bytesize >> ((sizeof(Uint) * 8)-3);
+ low_bits = (bytesize << 3) + binary_bitsize(BIF_ARG_1);
+ if (high_bits == 0) {
+ if (IS_USMALL(0,low_bits)) {
+ BIF_RET(make_small(low_bits));
+ } else {
+ Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(low_bits, hp));
+ }
+ } else {
+ Uint sz = BIG_UINT_HEAP_SIZE+1;
+ Eterm* hp = HAlloc(BIF_P, sz);
+ hp[0] = make_pos_bignum_header(sz-1);
+ BIG_DIGIT(hp,0) = low_bits;
+ BIG_DIGIT(hp,1) = high_bits;
+ BIF_RET(make_big(hp));
+ }
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+/**********************************************************************/
+/* returns the number of bytes need to store a bitstring */
+
+BIF_RETTYPE byte_size_1(BIF_ALIST_1)
+{
+ if (is_binary(BIF_ARG_1)) {
+ Uint bytesize = binary_size(BIF_ARG_1);
+ if (binary_bitsize(BIF_ARG_1) > 0) {
+ bytesize++;
+ }
+ if (IS_USMALL(0, bytesize)) {
+ BIF_RET(make_small(bytesize));
+ } else {
+ Eterm* hp = HAlloc(BIF_P, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(bytesize, hp));
+ }
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+/*
+ * Generate the integer part from a double.
+ */
+static Eterm
+double_to_integer(Process* p, double x)
+{
+ int is_negative;
+ int ds;
+ ErtsDigit* xp;
+ int i;
+ Eterm res;
+ size_t sz;
+ Eterm* hp;
+ double dbase;
+
+ if ((x < (double) (MAX_SMALL+1)) && (x > (double) (MIN_SMALL-1))) {
+ Sint xi = x;
+ return make_small(xi);
+ }
+
+ if (x >= 0) {
+ is_negative = 0;
+ } else {
+ is_negative = 1;
+ x = -x;
+ }
+
+ /* Unscale & (calculate exponent) */
+ ds = 0;
+ dbase = ((double)(D_MASK)+1);
+ while(x >= 1.0) {
+ x /= dbase; /* "shift" right */
+ ds++;
+ }
+ sz = BIG_NEED_SIZE(ds); /* number of words including arity */
+
+ hp = HAlloc(p, sz);
+ res = make_big(hp);
+ xp = (ErtsDigit*) (hp + 1);
+
+ for (i = ds-1; i >= 0; i--) {
+ ErtsDigit d;
+
+ x *= dbase; /* "shift" left */
+ d = x; /* trunc */
+ xp[i] = d; /* store digit */
+ x -= d; /* remove integer part */
+ }
+ while ((ds & (BIG_DIGITS_PER_WORD-1)) != 0) {
+ xp[ds++] = 0;
+ }
+
+ if (is_negative) {
+ *hp = make_neg_bignum_header(sz-1);
+ } else {
+ *hp = make_pos_bignum_header(sz-1);
+ }
+ return res;
+}
+
+/*
+ * The following code is used when a guard that may build on the
+ * heap is called directly. They must not use HAlloc(), but must
+ * do a garbage collection if there is insufficient heap space.
+ */
+
+#define ERTS_NEED_GC(p, need) ((HEAP_LIMIT((p)) - HEAP_TOP((p))) <= (need))
+
+Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm list = reg[live];
+ int i;
+
+ if (is_nil(list))
+ return SMALL_ZERO;
+ i = 0;
+ while (is_list(list)) {
+ i++;
+ list = CDR(list_val(list));
+ }
+ if (is_not_nil(list)) {
+ BIF_ERROR(p, BADARG);
+ }
+ return make_small(i);
+}
+
+Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg = reg[live];
+ if (is_tuple(arg)) {
+ Eterm* tupleptr = tuple_val(arg);
+ return make_small(arityval(*tupleptr));
+ } else if (is_binary(arg)) {
+ Uint sz = binary_size(arg);
+ if (IS_USMALL(0, sz)) {
+ return make_small(sz);
+ } else {
+ Eterm* hp;
+ if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
+ erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
+ }
+ hp = p->htop;
+ p->htop += BIG_UINT_HEAP_SIZE;
+ return uint_to_big(sz, hp);
+ }
+ }
+ BIF_ERROR(p, BADARG);
+}
+
+Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg = reg[live];
+ if (is_binary(arg)) {
+ Uint low_bits;
+ Uint bytesize;
+ Uint high_bits;
+ bytesize = binary_size(arg);
+ high_bits = bytesize >> ((sizeof(Uint) * 8)-3);
+ low_bits = (bytesize << 3) + binary_bitsize(arg);
+ if (high_bits == 0) {
+ if (IS_USMALL(0,low_bits)) {
+ return make_small(low_bits);
+ } else {
+ Eterm* hp;
+ if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
+ erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
+ }
+ hp = p->htop;
+ p->htop += BIG_UINT_HEAP_SIZE;
+ return uint_to_big(low_bits, hp);
+ }
+ } else {
+ Uint sz = BIG_UINT_HEAP_SIZE+1;
+ Eterm* hp;
+ if (ERTS_NEED_GC(p, sz)) {
+ erts_garbage_collect(p, sz, reg, live);
+ }
+ hp = p->htop;
+ p->htop += sz;
+ hp[0] = make_pos_bignum_header(sz-1);
+ BIG_DIGIT(hp,0) = low_bits;
+ BIG_DIGIT(hp,1) = high_bits;
+ return make_big(hp);
+ }
+ } else {
+ BIF_ERROR(p, BADARG);
+ }
+}
+
+Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg = reg[live];
+ if (is_binary(arg)) {
+ Uint bytesize = binary_size(arg);
+ if (binary_bitsize(arg) > 0) {
+ bytesize++;
+ }
+ if (IS_USMALL(0, bytesize)) {
+ return make_small(bytesize);
+ } else {
+ Eterm* hp;
+ if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
+ erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live);
+ }
+ hp = p->htop;
+ p->htop += BIG_UINT_HEAP_SIZE;
+ return uint_to_big(bytesize, hp);
+ }
+ } else {
+ BIF_ERROR(p, BADARG);
+ }
+}
+
+Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ Eterm res;
+ Sint i0, i;
+ Eterm* hp;
+
+ arg = reg[live];
+
+ /* integer arguments */
+ if (is_small(arg)) {
+ i0 = signed_val(arg);
+ i = labs(i0);
+ if (i0 == MIN_SMALL) {
+ if (ERTS_NEED_GC(p, BIG_UINT_HEAP_SIZE)) {
+ erts_garbage_collect(p, BIG_UINT_HEAP_SIZE, reg, live+1);
+ arg = reg[live];
+ }
+ hp = p->htop;
+ p->htop += BIG_UINT_HEAP_SIZE;
+ return uint_to_big(i, hp);
+ } else {
+ return make_small(i);
+ }
+ } else if (is_big(arg)) {
+ if (!big_sign(arg)) {
+ return arg;
+ } else {
+ int sz = big_arity(arg) + 1;
+ Uint* x;
+
+ if (ERTS_NEED_GC(p, sz)) {
+ erts_garbage_collect(p, sz, reg, live+1);
+ arg = reg[live];
+ }
+ hp = p->htop;
+ p->htop += sz;
+ sz--;
+ res = make_big(hp);
+ x = big_val(arg);
+ *hp++ = make_pos_bignum_header(sz);
+ x++; /* skip thing */
+ while(sz--)
+ *hp++ = *x++;
+ return res;
+ }
+ } else if (is_float(arg)) {
+ FloatDef f;
+
+ GET_DOUBLE(arg, f);
+ if (f.fd < 0.0) {
+ if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
+ erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live+1);
+ arg = reg[live];
+ }
+ hp = p->htop;
+ p->htop += FLOAT_SIZE_OBJECT;
+ f.fd = fabs(f.fd);
+ res = make_float(hp);
+ PUT_DOUBLE(f, hp);
+ return res;
+ }
+ else
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+}
+
+Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ Eterm res;
+ Eterm* hp;
+ FloatDef f;
+
+ /* check args */
+ arg = reg[live];
+ if (is_not_integer(arg)) {
+ if (is_float(arg)) {
+ return arg;
+ } else {
+ badarg:
+ BIF_ERROR(p, BADARG);
+ }
+ }
+ if (is_small(arg)) {
+ Sint i = signed_val(arg);
+ f.fd = i; /* use "C"'s auto casting */
+ } else if (big_to_double(arg, &f.fd) < 0) {
+ goto badarg;
+ }
+ if (ERTS_NEED_GC(p, FLOAT_SIZE_OBJECT)) {
+ erts_garbage_collect(p, FLOAT_SIZE_OBJECT, reg, live+1);
+ arg = reg[live];
+ }
+ hp = p->htop;
+ p->htop += FLOAT_SIZE_OBJECT;
+ res = make_float(hp);
+ PUT_DOUBLE(f, hp);
+ return res;
+}
+
+Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ GET_DOUBLE(arg, f);
+
+ return gc_double_to_integer(p, (f.fd > 0.0) ? f.fd + 0.5 : f.fd - 0.5,
+ reg, live);
+}
+
+Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live)
+{
+ Eterm arg;
+ FloatDef f;
+
+ arg = reg[live];
+ if (is_not_float(arg)) {
+ if (is_integer(arg)) {
+ return arg;
+ }
+ BIF_ERROR(p, BADARG);
+ }
+ /* get the float */
+ GET_DOUBLE(arg, f);
+
+ /* truncate it and return the resultant integer */
+ return gc_double_to_integer(p, (f.fd >= 0.0) ? floor(f.fd) : ceil(f.fd),
+ reg, live);
+}
+
+static Eterm
+gc_double_to_integer(Process* p, double x, Eterm* reg, Uint live)
+{
+ int is_negative;
+ int ds;
+ ErtsDigit* xp;
+ int i;
+ Eterm res;
+ size_t sz;
+ Eterm* hp;
+ double dbase;
+
+ if ((x < (double) (MAX_SMALL+1)) && (x > (double) (MIN_SMALL-1))) {
+ Sint xi = x;
+ return make_small(xi);
+ }
+
+ if (x >= 0) {
+ is_negative = 0;
+ } else {
+ is_negative = 1;
+ x = -x;
+ }
+
+ /* Unscale & (calculate exponent) */
+ ds = 0;
+ dbase = ((double)(D_MASK)+1);
+ while(x >= 1.0) {
+ x /= dbase; /* "shift" right */
+ ds++;
+ }
+ sz = BIG_NEED_SIZE(ds); /* number of words including arity */
+ if (ERTS_NEED_GC(p, sz)) {
+ erts_garbage_collect(p, sz, reg, live);
+ }
+ hp = p->htop;
+ p->htop += sz;
+ res = make_big(hp);
+ xp = (ErtsDigit*) (hp + 1);
+
+ for (i = ds-1; i >= 0; i--) {
+ ErtsDigit d;
+
+ x *= dbase; /* "shift" left */
+ d = x; /* trunc */
+ xp[i] = d; /* store digit */
+ x -= d; /* remove integer part */
+ }
+ while ((ds & (BIG_DIGITS_PER_WORD-1)) != 0) {
+ xp[ds++] = 0;
+ }
+
+ if (is_negative) {
+ *hp = make_neg_bignum_header(sz-1);
+ } else {
+ *hp = make_pos_bignum_header(sz-1);
+ }
+ return res;
+}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
new file mode 100644
index 0000000000..60216aa8e4
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -0,0 +1,3803 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "erl_nmgc.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_version.h"
+#include "erl_db_util.h"
+#include "erl_message.h"
+#include "erl_binary.h"
+#include "erl_db.h"
+#include "erl_instrument.h"
+#include "dist.h"
+#include "erl_gc.h"
+#ifdef ELIB_ALLOC_IS_CLIB
+#include "elib_stat.h"
+#endif
+#ifdef HIPE
+#include "hipe_arch.h"
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "erl_lock_count.h"
+#endif
+
+#ifdef VALGRIND
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+#endif
+
+#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+
+/* Keep erts_system_version as a global variable for easy access from a core */
+static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
+ " (erts-" ERLANG_VERSION ")"
+#ifndef OTP_RELEASE
+ " [source]"
+#endif
+#ifdef ARCH_64
+ " [64-bit]"
+#endif
+#ifdef ERTS_SMP
+ " [smp:%bpu:%bpu]"
+#endif
+ " [rq:%bpu]"
+#ifdef USE_THREADS
+ " [async-threads:%d]"
+#endif
+#ifdef HIPE
+ " [hipe]"
+#endif
+#ifdef ERTS_ENABLE_KERNEL_POLL
+ " [kernel-poll:%s]"
+#endif
+#ifdef HYBRID
+ " [hybrid heap]"
+#endif
+#ifdef INCREMENTAL
+ " [incremental GC]"
+#endif
+#ifdef ET_DEBUG
+#if ET_DEBUG
+ " [type-assertions]"
+#endif
+#endif
+#ifdef DEBUG
+ " [debug-compiled]"
+#endif
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ " [lock-checking]"
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ " [lock-counting]"
+#endif
+#ifdef PURIFY
+ " [purify-compiled]"
+#endif
+#ifdef VALGRIND
+ " [valgrind-compiled]"
+#endif
+ "\n");
+
+#define ASIZE(a) (sizeof(a)/sizeof(a[0]))
+
+#if defined(HAVE_SOLARIS_SPARC_PERFMON)
+# include <sys/ioccom.h>
+# define PERFMON_SETPCR _IOW('P', 1, unsigned long long)
+# define PERFMON_GETPCR _IOR('P', 2, unsigned long long)
+#endif
+
+static Eterm
+bld_bin_list(Uint **hpp, Uint *szp, ProcBin* pb)
+{
+ Eterm res = NIL;
+ Eterm tuple;
+
+ for (; pb; pb = pb->next) {
+ Eterm val = erts_bld_uint(hpp, szp, (Uint) pb->val);
+ Eterm orig_size = erts_bld_uint(hpp, szp, pb->val->orig_size);
+
+ if (szp)
+ *szp += 4+2;
+ if (hpp) {
+ Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc);
+ tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
+ res = CONS(*hpp + 4, tuple, res);
+ *hpp += 4+2;
+ }
+ }
+ return res;
+}
+
+
+/*
+ make_monitor_list:
+ returns a list of records..
+ -record(erl_monitor, {
+ type, % MON_ORIGIN or MON_TARGET (1 or 3)
+ ref,
+ pid, % Process or nodename
+ name % registered name or []
+ }).
+*/
+
+static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
+{
+ Uint *psz = vpsz;
+ *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
+ *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
+ *psz += 8; /* CONS + 5-tuple */
+}
+
+typedef struct {
+ Process *p;
+ Eterm *hp;
+ Eterm res;
+ Eterm tag;
+} MonListContext;
+
+static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
+{
+ MonListContext *pmlc = vpmlc;
+ Eterm tup;
+ Eterm r = (IS_CONST(mon->ref)
+ ? mon->ref
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p).externals, mon->ref));
+ Eterm p = (IS_CONST(mon->pid)
+ ? mon->pid
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p).externals, mon->pid));
+ tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
+ pmlc->hp += 6;
+ pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
+ pmlc->hp += 2;
+}
+
+static Eterm
+make_monitor_list(Process *p, ErtsMonitor *root)
+{
+ DECL_AM(erl_monitor);
+ Uint sz = 0;
+ MonListContext mlc;
+
+ erts_doforall_monitors(root, &do_calc_mon_size, &sz);
+ if (sz == 0) {
+ return NIL;
+ }
+ mlc.p = p;
+ mlc.hp = HAlloc(p,sz);
+ mlc.res = NIL;
+ mlc.tag = AM_erl_monitor;
+ erts_doforall_monitors(root, &do_make_one_mon_element, &mlc);
+ return mlc.res;
+}
+
+/*
+ make_link_list:
+ returns a list of records..
+ -record(erl_link, {
+ type, % LINK_NODE or LINK_PID (1 or 3)
+ pid, % Process or nodename
+ targets % List of erl_link's or nil
+ }).
+*/
+
+static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
+{
+ Uint *psz = vpsz;
+ *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
+ if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
+ /* Node links use this pointer as ref counter... */
+ erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
+ }
+ *psz += 7; /* CONS + 4-tuple */
+}
+
+typedef struct {
+ Process *p;
+ Eterm *hp;
+ Eterm res;
+ Eterm tag;
+} LnkListContext;
+
+static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
+{
+ LnkListContext *pllc = vpllc;
+ Eterm tup;
+ Eterm old_res, targets = NIL;
+ Eterm p = (IS_CONST(lnk->pid)
+ ? lnk->pid
+ : STORE_NC(&(pllc->hp), &MSO(pllc->p).externals, lnk->pid));
+ if (lnk->type == LINK_NODE) {
+ targets = make_small(ERTS_LINK_REFC(lnk));
+ } else if (ERTS_LINK_ROOT(lnk) != NULL) {
+ old_res = pllc->res;
+ pllc->res = NIL;
+ erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_make_one_lnk_element, vpllc);
+ targets = pllc->res;
+ pllc->res = old_res;
+ }
+ tup = TUPLE4(pllc->hp, pllc->tag, make_small(lnk->type), p, targets);
+ pllc->hp += 5;
+ pllc->res = CONS(pllc->hp, tup, pllc->res);
+ pllc->hp += 2;
+}
+
+static Eterm
+make_link_list(Process *p, ErtsLink *root, Eterm tail)
+{
+ DECL_AM(erl_link);
+ Uint sz = 0;
+ LnkListContext llc;
+
+ erts_doforall_links(root, &do_calc_lnk_size, &sz);
+ if (sz == 0) {
+ return tail;
+ }
+ llc.p = p;
+ llc.hp = HAlloc(p,sz);
+ llc.res = tail;
+ llc.tag = AM_erl_link;
+ erts_doforall_links(root, &do_make_one_lnk_element, &llc);
+ return llc.res;
+}
+
+int
+erts_print_system_version(int to, void *arg, Process *c_p)
+{
+#ifdef ERTS_SMP
+ Uint total, online, active;
+ (void) erts_schedulers_state(&total, &online, &active, 0);
+#endif
+ return erts_print(to, arg, erts_system_version
+#ifdef ERTS_SMP
+ , total, online, erts_no_run_queues
+#else
+ , 1
+#endif
+#ifdef USE_THREADS
+ , erts_async_max_threads
+#endif
+#ifdef ERTS_ENABLE_KERNEL_POLL
+ , erts_use_kernel_poll ? "true" : "false"
+#endif
+ );
+}
+
+typedef struct {
+ Eterm entity;
+ Eterm node;
+} MonitorInfo;
+
+typedef struct {
+ MonitorInfo *mi;
+ Uint mi_i;
+ Uint mi_max;
+ int sz;
+} MonitorInfoCollection;
+
+#define INIT_MONITOR_INFOS(MIC) do { \
+ (MIC).mi = NULL; \
+ (MIC).mi_i = (MIC).mi_max = 0; \
+ (MIC).sz = 0; \
+} while(0)
+
+#define MI_INC 50
+#define EXTEND_MONITOR_INFOS(MICP) \
+do { \
+ if ((MICP)->mi_i >= (MICP)->mi_max) { \
+ (MICP)->mi = ((MICP)->mi ? erts_realloc(ERTS_ALC_T_TMP, \
+ (MICP)->mi, \
+ ((MICP)->mi_max+MI_INC) \
+ * sizeof(MonitorInfo)) \
+ : erts_alloc(ERTS_ALC_T_TMP, \
+ MI_INC*sizeof(MonitorInfo))); \
+ (MICP)->mi_max += MI_INC; \
+ } \
+ } while (0)
+#define DESTROY_MONITOR_INFOS(MIC) \
+do { \
+ if ((MIC).mi != NULL) { \
+ erts_free(ERTS_ALC_T_TMP, (void *) (MIC).mi); \
+ } \
+ } while (0)
+
+static void collect_one_link(ErtsLink *lnk, void *vmicp)
+{
+ MonitorInfoCollection *micp = vmicp;
+ EXTEND_MONITOR_INFOS(micp);
+ if (!(lnk->type == LINK_PID)) {
+ return;
+ }
+ micp->mi[micp->mi_i].entity = lnk->pid;
+ micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
+ micp->mi_i++;
+}
+
+static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
+{
+ MonitorInfoCollection *micp = vmicp;
+
+ if (mon->type != MON_ORIGIN) {
+ return;
+ }
+ EXTEND_MONITOR_INFOS(micp);
+ if (is_atom(mon->pid)) { /* external by name */
+ micp->mi[micp->mi_i].entity = mon->name;
+ micp->mi[micp->mi_i].node = mon->pid;
+ micp->sz += 3; /* need one 2-tuple */
+ } else if (is_external_pid(mon->pid)) { /* external by pid */
+ micp->mi[micp->mi_i].entity = mon->pid;
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += NC_HEAP_SIZE(mon->pid);
+ } else if (!is_nil(mon->name)) { /* internal by name */
+ micp->mi[micp->mi_i].entity = mon->name;
+ micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
+ micp->sz += 3; /* need one 2-tuple */
+ } else { /* internal by pid */
+ micp->mi[micp->mi_i].entity = mon->pid;
+ micp->mi[micp->mi_i].node = NIL;
+ /* no additional heap space needed */
+ }
+ micp->mi_i++;
+ micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
+}
+
+static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
+{
+ MonitorInfoCollection *micp = vmicp;
+
+ if (mon->type != MON_TARGET) {
+ return;
+ }
+
+ EXTEND_MONITOR_INFOS(micp);
+
+ micp->mi[micp->mi_i].node = NIL;
+ micp->mi[micp->mi_i].entity = mon->pid;
+ micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
+ micp->mi_i++;
+}
+
+typedef struct {
+ Process *c_p;
+ ErtsProcLocks c_p_locks;
+ ErtsSuspendMonitor **smi;
+ Uint smi_i;
+ Uint smi_max;
+ int sz;
+} ErtsSuspendMonitorInfoCollection;
+
+#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \
+ (SMIC).c_p = (CP); \
+ (SMIC).c_p_locks = (CPL); \
+ (SMIC).smi = NULL; \
+ (SMIC).smi_i = (SMIC).smi_max = 0; \
+ (SMIC).sz = 0; \
+} while(0)
+
+#define ERTS_SMI_INC 50
+#define ERTS_EXTEND_SUSPEND_MONITOR_INFOS(SMICP) \
+do { \
+ if ((SMICP)->smi_i >= (SMICP)->smi_max) { \
+ (SMICP)->smi = ((SMICP)->smi \
+ ? erts_realloc(ERTS_ALC_T_TMP, \
+ (SMICP)->smi, \
+ ((SMICP)->smi_max \
+ + ERTS_SMI_INC) \
+ * sizeof(ErtsSuspendMonitor *)) \
+ : erts_alloc(ERTS_ALC_T_TMP, \
+ ERTS_SMI_INC \
+ * sizeof(ErtsSuspendMonitor *))); \
+ (SMICP)->smi_max += ERTS_SMI_INC; \
+ } \
+ } while (0)
+
+#define ERTS_DESTROY_SUSPEND_MONITOR_INFOS(SMIC) \
+do { \
+ if ((SMIC).smi != NULL) { \
+ erts_free(ERTS_ALC_T_TMP, (void *) (SMIC).smi); \
+ } \
+ } while (0)
+
+static void
+collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
+{
+ ErtsSuspendMonitorInfoCollection *smicp = vsmicp;
+ Process *suspendee = erts_pid2proc(smicp->c_p,
+ smicp->c_p_locks,
+ smon->pid,
+ 0);
+ if (suspendee) { /* suspendee is alive */
+ Sint a, p;
+ if (smon->active) {
+ smon->active += smon->pending;
+ smon->pending = 0;
+ }
+
+ ASSERT((smon->active && !smon->pending)
+ || (smon->pending && !smon->active));
+
+ ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
+
+ smicp->smi[smicp->smi_i] = smon;
+ smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
+
+ a = (Sint) smon->active; /* quiet compiler warnings */
+ p = (Sint) smon->pending; /* on 64-bit machines */
+
+ if (!IS_SSMALL(a))
+ smicp->sz += BIG_UINT_HEAP_SIZE;
+ if (!IS_SSMALL(p))
+ smicp->sz += BIG_UINT_HEAP_SIZE;
+ smicp->smi_i++;
+ }
+}
+
+
+static void one_link_size(ErtsLink *lnk, void *vpu)
+{
+ Uint *pu = vpu;
+ *pu += ERTS_LINK_SIZE*sizeof(Uint);
+ if(!IS_CONST(lnk->pid))
+ *pu += NC_HEAP_SIZE(lnk->pid)*sizeof(Uint);
+ if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
+ erts_doforall_links(ERTS_LINK_ROOT(lnk),&one_link_size,vpu);
+ }
+}
+static void one_mon_size(ErtsMonitor *mon, void *vpu)
+{
+ Uint *pu = vpu;
+ *pu += ERTS_MONITOR_SIZE*sizeof(Uint);
+ if(!IS_CONST(mon->pid))
+ *pu += NC_HEAP_SIZE(mon->pid)*sizeof(Uint);
+ if(!IS_CONST(mon->ref))
+ *pu += NC_HEAP_SIZE(mon->ref)*sizeof(Uint);
+}
+
+/*
+ * process_info/[1,2]
+ */
+
+#define ERTS_PI_FAIL_TYPE_BADARG 0
+#define ERTS_PI_FAIL_TYPE_YIELD 1
+#define ERTS_PI_FAIL_TYPE_AWAIT_EXIT 2
+
+static ERTS_INLINE ErtsProcLocks
+pi_locks(Eterm info)
+{
+ switch (info) {
+ case am_status:
+ case am_priority:
+ return ERTS_PROC_LOCK_STATUS;
+ case am_links:
+ case am_monitors:
+ case am_monitored_by:
+ case am_suspending:
+ return ERTS_PROC_LOCK_LINK;
+ case am_messages:
+ case am_message_queue_len:
+ case am_total_heap_size:
+ return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
+ case am_memory:
+ return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_MSGQ;
+ default:
+ return ERTS_PROC_LOCK_MAIN;
+ }
+}
+
+/*
+ * All valid process_info arguments.
+ */
+static Eterm pi_args[] = {
+ am_registered_name,
+ am_current_function,
+ am_initial_call,
+ am_status,
+ am_messages,
+ am_message_queue_len,
+ am_links,
+ am_monitors,
+ am_monitored_by,
+ am_dictionary,
+ am_trap_exit,
+ am_error_handler,
+ am_heap_size,
+ am_stack_size,
+ am_memory,
+ am_garbage_collection,
+ am_group_leader,
+ am_reductions,
+ am_priority,
+ am_trace,
+ am_binary,
+ am_sequential_trace_token,
+ am_catchlevel,
+ am_backtrace,
+ am_last_calls,
+ am_total_heap_size,
+ am_suspending,
+#ifdef HYBRID
+ am_message_binary
+#endif
+};
+
+#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
+
+static ERTS_INLINE Eterm
+pi_ix2arg(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return am_undefined;
+ return pi_args[ix];
+}
+
+static ERTS_INLINE int
+pi_arg2ix(Eterm arg)
+{
+ switch (arg) {
+ case am_registered_name: return 0;
+ case am_current_function: return 1;
+ case am_initial_call: return 2;
+ case am_status: return 3;
+ case am_messages: return 4;
+ case am_message_queue_len: return 5;
+ case am_links: return 6;
+ case am_monitors: return 7;
+ case am_monitored_by: return 8;
+ case am_dictionary: return 9;
+ case am_trap_exit: return 10;
+ case am_error_handler: return 11;
+ case am_heap_size: return 12;
+ case am_stack_size: return 13;
+ case am_memory: return 14;
+ case am_garbage_collection: return 15;
+ case am_group_leader: return 16;
+ case am_reductions: return 17;
+ case am_priority: return 18;
+ case am_trace: return 19;
+ case am_binary: return 20;
+ case am_sequential_trace_token: return 21;
+ case am_catchlevel: return 22;
+ case am_backtrace: return 23;
+ case am_last_calls: return 24;
+ case am_total_heap_size: return 25;
+ case am_suspending: return 26;
+#ifdef HYBRID
+ case am_message_binary: return 27;
+#endif
+ default: return -1;
+ }
+}
+
+static Eterm pi_1_keys[] = {
+ am_registered_name,
+ am_current_function,
+ am_initial_call,
+ am_status,
+ am_message_queue_len,
+ am_messages,
+ am_links,
+ am_dictionary,
+ am_trap_exit,
+ am_error_handler,
+ am_priority,
+ am_group_leader,
+ am_total_heap_size,
+ am_heap_size,
+ am_stack_size,
+ am_reductions,
+ am_garbage_collection,
+ am_suspending
+};
+
+#define ERTS_PI_1_NO_OF_KEYS (sizeof(pi_1_keys)/sizeof(Eterm))
+
+static Eterm pi_1_keys_list;
+static Uint pi_1_keys_list_heap[2*ERTS_PI_1_NO_OF_KEYS];
+
+static void
+process_info_init(void)
+{
+ Eterm *hp = &pi_1_keys_list_heap[0];
+ int i;
+
+ pi_1_keys_list = NIL;
+
+ for (i = ERTS_PI_1_NO_OF_KEYS-1; i >= 0; i--) {
+ pi_1_keys_list = CONS(hp, pi_1_keys[i], pi_1_keys_list);
+ hp += 2;
+ }
+
+#ifdef DEBUG
+ { /* Make sure the process_info argument mappings are consistent */
+ int ix;
+ for (ix = 0; ix < ERTS_PI_ARGS; ix++) {
+ ASSERT(pi_arg2ix(pi_ix2arg(ix)) == ix);
+ }
+ }
+#endif
+
+}
+
+static ERTS_INLINE Process *
+pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
+{
+#ifdef ERTS_SMP
+ /*
+ * If the main lock is needed, we use erts_pid2proc_not_running()
+ * instead of erts_pid2proc() for two reasons:
+ * * Current function of pid and possibly other information will
+ * have been updated so that process_info() is consistent with an
+ * info-request/info-response signal model.
+ * * We avoid blocking the whole scheduler executing the
+ * process that is calling process_info() for a long time
+ * which will happen if pid is currently running.
+ * The caller of process_info() may have to yield if pid
+ * is currently running.
+ */
+
+ if (info_locks & ERTS_PROC_LOCK_MAIN)
+ return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
+ pid, info_locks);
+ else
+#endif
+ return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
+ pid, info_locks);
+}
+
+
+
+BIF_RETTYPE
+process_info_aux(Process *BIF_P,
+ Process *rp,
+ Eterm rpid,
+ Eterm item,
+ int always_wrap);
+
+#define ERTS_PI_RES_ELEM_IX_BUF_INC 1024
+#define ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ ERTS_PI_ARGS
+
+static Eterm
+process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
+ int *fail_type)
+{
+ int want_messages = 0;
+ int def_res_elem_ix_buf[ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ];
+ int *res_elem_ix = &def_res_elem_ix_buf[0];
+ int res_elem_ix_ix = -1;
+ int res_elem_ix_sz = ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ;
+ Eterm part_res[ERTS_PI_ARGS];
+ Eterm res, arg;
+ Uint *hp, *hp_end;
+ ErtsProcLocks locks = (ErtsProcLocks) 0;
+ int res_len, ix;
+ Process *rp = NULL;
+
+ *fail_type = ERTS_PI_FAIL_TYPE_BADARG;
+
+ for (ix = 0; ix < ERTS_PI_ARGS; ix++)
+ part_res[ix] = THE_NON_VALUE;
+
+ ASSERT(is_list(list));
+
+ while (is_list(list)) {
+ Eterm* consp = list_val(list);
+
+ arg = CAR(consp);
+ ix = pi_arg2ix(arg);
+ if (ix < 0) {
+ res = THE_NON_VALUE;
+ goto done;
+ }
+ if (arg == am_messages)
+ want_messages = 1;
+ locks |= pi_locks(arg);
+ res_elem_ix_ix++;
+ if (res_elem_ix_ix >= res_elem_ix_sz) {
+ if (res_elem_ix != &def_res_elem_ix_buf[0])
+ res_elem_ix =
+ erts_realloc(ERTS_ALC_T_TMP,
+ res_elem_ix,
+ sizeof(int)*(res_elem_ix_sz
+ += ERTS_PI_RES_ELEM_IX_BUF_INC));
+ else {
+ int new_res_elem_ix_sz = ERTS_PI_RES_ELEM_IX_BUF_INC;
+ int *new_res_elem_ix = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(int)*new_res_elem_ix_sz);
+ sys_memcpy((void *) new_res_elem_ix,
+ (void *) res_elem_ix,
+ sizeof(int)*res_elem_ix_sz);
+ res_elem_ix = new_res_elem_ix;
+ res_elem_ix_sz = new_res_elem_ix_sz;
+ }
+ }
+ res_elem_ix[res_elem_ix_ix] = ix;
+ list = CDR(consp);
+ }
+ if (is_not_nil(list)) {
+ res = THE_NON_VALUE;
+ goto done;
+ }
+
+ res_len = res_elem_ix_ix+1;
+
+ ASSERT(res_len > 0);
+
+ rp = pi_pid2proc(c_p, pid, locks|ERTS_PROC_LOCK_STATUS);
+ if (!rp) {
+ res = am_undefined;
+ goto done;
+ }
+ else if (rp == ERTS_PROC_LOCK_BUSY) {
+ rp = NULL;
+ res = THE_NON_VALUE;
+ *fail_type = ERTS_PI_FAIL_TYPE_YIELD;
+ goto done;
+ }
+ else if (c_p != rp && ERTS_PROC_PENDING_EXIT(rp)) {
+ locks |= ERTS_PROC_LOCK_STATUS;
+ res = THE_NON_VALUE;
+ *fail_type = ERTS_PI_FAIL_TYPE_AWAIT_EXIT;
+ goto done;
+ }
+ else if (!(locks & ERTS_PROC_LOCK_STATUS)) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ }
+
+
+ /*
+ * We always handle 'messages' first if it should be part
+ * of the result. This since if both 'messages' and
+ * 'message_queue_len' are wanted, 'messages' may
+ * change the result of 'message_queue_len' (in case
+ * the queue contain bad distribution messages).
+ */
+ if (want_messages) {
+ ix = pi_arg2ix(am_messages);
+ ASSERT(part_res[ix] == THE_NON_VALUE);
+ part_res[ix] = process_info_aux(c_p, rp, pid, am_messages, always_wrap);
+ ASSERT(part_res[ix] != THE_NON_VALUE);
+ }
+
+ for (; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
+ ix = res_elem_ix[res_elem_ix_ix];
+ if (part_res[ix] == THE_NON_VALUE) {
+ arg = pi_ix2arg(ix);
+ part_res[ix] = process_info_aux(c_p, rp, pid, arg, always_wrap);
+ ASSERT(part_res[ix] != THE_NON_VALUE);
+ }
+ }
+
+ hp = HAlloc(c_p, res_len*2);
+ hp_end = hp + res_len*2;
+ res = NIL;
+
+ for (res_elem_ix_ix = res_len - 1; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
+ ix = res_elem_ix[res_elem_ix_ix];
+ ASSERT(part_res[ix] != THE_NON_VALUE);
+ /*
+ * If we should ignore the value of registered_name,
+ * its value is nil. For more info, see comment in the
+ * beginning of process_info_aux().
+ */
+ if (is_nil(part_res[ix])) {
+ ASSERT(!always_wrap);
+ ASSERT(pi_ix2arg(ix) == am_registered_name);
+ }
+ else {
+ res = CONS(hp, part_res[ix], res);
+ hp += 2;
+ }
+ }
+
+ if (!always_wrap) {
+ HRelease(c_p, hp_end, hp);
+ }
+
+ done:
+
+ if (c_p == rp)
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (locks && rp)
+ erts_smp_proc_unlock(rp, locks);
+
+ if (res_elem_ix != &def_res_elem_ix_buf[0])
+ erts_free(ERTS_ALC_T_TMP, res_elem_ix);
+
+ return res;
+}
+
+BIF_RETTYPE process_info_1(BIF_ALIST_1)
+{
+ Eterm res;
+ int fail_type;
+
+ if (is_external_pid(BIF_ARG_1)
+ && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ BIF_RET(am_undefined);
+
+ if (is_not_internal_pid(BIF_ARG_1)
+ || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ res = process_info_list(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, &fail_type);
+ if (is_non_value(res)) {
+ switch (fail_type) {
+ case ERTS_PI_FAIL_TYPE_BADARG:
+ BIF_ERROR(BIF_P, BADARG);
+ case ERTS_PI_FAIL_TYPE_YIELD:
+ ERTS_BIF_YIELD1(bif_export[BIF_process_info_1], BIF_P, BIF_ARG_1);
+ case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
+ ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
+ }
+ }
+
+ ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
+ BIF_RET(res);
+}
+
+
+BIF_RETTYPE process_info_2(BIF_ALIST_2)
+{
+ Eterm res;
+ Process *rp;
+ Eterm pid = BIF_ARG_1;
+ ErtsProcLocks info_locks;
+ int fail_type;
+
+ if (is_external_pid(pid)
+ && external_pid_dist_entry(pid) == erts_this_dist_entry)
+ BIF_RET(am_undefined);
+
+ if (is_not_internal_pid(pid)
+ || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (is_nil(BIF_ARG_2))
+ BIF_RET(NIL);
+
+ if (is_list(BIF_ARG_2)) {
+ res = process_info_list(BIF_P, BIF_ARG_1, BIF_ARG_2, 1, &fail_type);
+ if (is_non_value(res)) {
+ switch (fail_type) {
+ case ERTS_PI_FAIL_TYPE_BADARG:
+ BIF_ERROR(BIF_P, BADARG);
+ case ERTS_PI_FAIL_TYPE_YIELD:
+ ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
+ BIF_ARG_1, BIF_ARG_2);
+ case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
+ ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
+ __FILE__, __LINE__);
+ }
+ }
+ ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
+ BIF_RET(res);
+ }
+
+ if (pi_arg2ix(BIF_ARG_2) < 0)
+ BIF_ERROR(BIF_P, BADARG);
+
+ info_locks = pi_locks(BIF_ARG_2);
+
+ rp = pi_pid2proc(BIF_P, pid, info_locks|ERTS_PROC_LOCK_STATUS);
+ if (!rp)
+ res = am_undefined;
+ else if (rp == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
+ BIF_ARG_1, BIF_ARG_2);
+ else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
+ erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS);
+ ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
+ }
+ else {
+ if (!(info_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ res = process_info_aux(BIF_P, rp, pid, BIF_ARG_2, 0);
+ }
+ ASSERT(is_value(res));
+
+#ifdef ERTS_SMP
+ if (BIF_P == rp)
+ info_locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (rp && info_locks)
+ erts_smp_proc_unlock(rp, info_locks);
+#endif
+
+ ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
+ BIF_RET(res);
+}
+
+Eterm
+process_info_aux(Process *BIF_P,
+ Process *rp,
+ Eterm rpid,
+ Eterm item,
+ int always_wrap)
+{
+ Eterm *hp;
+ Eterm res = NIL;
+
+ ASSERT(rp);
+
+ /*
+ * Q: Why this always_wrap argument?
+ *
+ * A: registered_name is strange. If process has no registered name,
+ * process_info(Pid, registered_name) returns [], and
+ * the result of process_info(Pid) has no {registered_name, Name}
+ * tuple in the resulting list. This is inconsistent with all other
+ * options, but we do not dare to change it.
+ *
+ * When process_info/2 is called with a list as second argument,
+ * registered_name behaves as it should, i.e. a
+ * {registered_name, []} will appear in the resulting list.
+ *
+ * If always_wrap != 0, process_info_aux() always wrap the result
+ * in a key two tuple.
+ */
+
+ switch (item) {
+
+ case am_registered_name:
+ if (rp->reg != NULL) {
+ hp = HAlloc(BIF_P, 3);
+ res = rp->reg->name;
+ } else {
+ if (always_wrap) {
+ hp = HAlloc(BIF_P, 3);
+ res = NIL;
+ }
+ else {
+ return NIL;
+ }
+ }
+ break;
+
+ case am_current_function:
+ if (rp->current == NULL) {
+ rp->current = find_function_from_pc(rp->i);
+ }
+ if (rp->current == NULL) {
+ hp = HAlloc(BIF_P, 3);
+ res = am_undefined;
+ } else {
+ Eterm* current;
+
+ if (rp->current[0] == am_erlang &&
+ rp->current[1] == am_process_info &&
+ (rp->current[2] == 1 || rp->current[2] == 2) &&
+ (current = find_function_from_pc(rp->cp)) != NULL) {
+
+ /*
+ * The current function is erlang:process_info/2,
+ * which is not the answer that the application want.
+ * We will use the function pointed into by rp->cp
+ * instead.
+ */
+
+ rp->current = current;
+ }
+
+ hp = HAlloc(BIF_P, 3+4);
+ res = TUPLE3(hp, rp->current[0],
+ rp->current[1], make_small(rp->current[2]));
+ hp += 4;
+ }
+ break;
+
+ case am_initial_call:
+ hp = HAlloc(BIF_P, 3+4);
+ res = TUPLE3(hp,
+ rp->initial[INITIAL_MOD],
+ rp->initial[INITIAL_FUN],
+ make_small(rp->initial[INITIAL_ARI]));
+ hp += 4;
+ break;
+
+ case am_status:
+ res = erts_process_status(BIF_P, ERTS_PROC_LOCK_MAIN, rp, rpid);
+ ASSERT(res != am_undefined);
+ hp = HAlloc(BIF_P, 3);
+ break;
+
+ case am_messages: {
+ ErlMessage* mp;
+ int n;
+
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
+ n = rp->msg.len;
+
+ if (n == 0 || rp->trace_flags & F_SENSITIVE) {
+ hp = HAlloc(BIF_P, 3);
+ } else {
+ int remove_bad_messages = 0;
+ struct {
+ Uint copy_struct_size;
+ ErlMessage* msgp;
+ } *mq = erts_alloc(ERTS_ALC_T_TMP, n*sizeof(*mq));
+ Sint i = 0;
+ Uint heap_need = 3;
+ Eterm *hp_end;
+
+ for (mp = rp->msg.first; mp; mp = mp->next) {
+ heap_need += 2;
+ mq[i].msgp = mp;
+ if (rp != BIF_P) {
+ Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
+ if (is_value(msg)) {
+ mq[i].copy_struct_size = (is_immed(msg)
+#ifdef HYBRID
+ || NO_COPY(msg)
+#endif
+ ? 0
+ : size_object(msg));
+ }
+ else if (mq[i].msgp->data.attached) {
+ mq[i].copy_struct_size
+ = erts_msg_attached_data_size(mq[i].msgp);
+ }
+ else {
+ /* Bad distribution message; ignore */
+ remove_bad_messages = 1;
+ mq[i].copy_struct_size = 0;
+ }
+ heap_need += mq[i].copy_struct_size;
+ }
+ else {
+ mq[i].copy_struct_size = 0;
+ if (mp->data.attached)
+ heap_need += erts_msg_attached_data_size(mp);
+ }
+ i++;
+ }
+
+ hp = HAlloc(BIF_P, heap_need);
+ hp_end = hp + heap_need;
+ ASSERT(i == n);
+ for (i--; i >= 0; i--) {
+ Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp);
+ if (rp != BIF_P) {
+ if (is_value(msg)) {
+ if (mq[i].copy_struct_size)
+ msg = copy_struct(msg,
+ mq[i].copy_struct_size,
+ &hp,
+ &MSO(BIF_P));
+ }
+ else if (mq[i].msgp->data.attached) {
+ ErlHeapFragment *hfp;
+ /*
+ * Decode it into a message buffer and attach it
+ * to the message instead of the attached external
+ * term.
+ *
+ * Note that we may not pass a process pointer
+ * to erts_msg_distext2heap(), since it would then
+ * try to alter locks on that process.
+ */
+ msg = erts_msg_distext2heap(
+ NULL, NULL, &hfp, &ERL_MESSAGE_TOKEN(mq[i].msgp),
+ mq[i].msgp->data.dist_ext);
+
+ ERL_MESSAGE_TERM(mq[i].msgp) = msg;
+ mq[i].msgp->data.heap_frag = hfp;
+
+ if (is_non_value(msg)) {
+ ASSERT(!mq[i].msgp->data.heap_frag);
+ /* Bad distribution message; ignore */
+ remove_bad_messages = 1;
+ continue;
+ }
+ else {
+ /* Make our copy of the message */
+ ASSERT(size_object(msg) == hfp->size);
+ msg = copy_struct(msg,
+ hfp->size,
+ &hp,
+ &MSO(BIF_P));
+ }
+ }
+ else {
+ /* Bad distribution message; ignore */
+ remove_bad_messages = 1;
+ continue;
+ }
+ }
+ else {
+ if (mq[i].msgp->data.attached) {
+ /* Decode it on the heap */
+ erts_move_msg_attached_data_to_heap(&hp,
+ &MSO(BIF_P),
+ mq[i].msgp);
+ msg = ERL_MESSAGE_TERM(mq[i].msgp);
+ ASSERT(!mq[i].msgp->data.attached);
+ if (is_non_value(msg)) {
+ /* Bad distribution message; ignore */
+ remove_bad_messages = 1;
+ continue;
+ }
+ }
+ }
+
+ res = CONS(hp, msg, res);
+ hp += 2;
+ }
+ HRelease(BIF_P, hp_end, hp+3);
+ erts_free(ERTS_ALC_T_TMP, mq);
+ if (remove_bad_messages) {
+ ErlMessage **mpp;
+ /*
+ * We need to remove bad distribution messages from
+ * the queue, so that the value returned for
+ * 'message_queue_len' is consistent with the value
+ * returned for 'messages'.
+ */
+ mpp = &rp->msg.first;
+ mp = rp->msg.first;
+ while (mp) {
+ if (is_value(ERL_MESSAGE_TERM(mp))) {
+ mpp = &mp->next;
+ mp = mp->next;
+ }
+ else {
+ ErlMessage* bad_mp = mp;
+ ASSERT(!mp->data.attached);
+ if (rp->msg.save == &mp->next)
+ rp->msg.save = mpp;
+ if (rp->msg.last == &mp->next)
+ rp->msg.last = mpp;
+ *mpp = mp->next;
+ mp = mp->next;
+ rp->msg.len--;
+ free_message(bad_mp);
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ case am_message_queue_len:
+ hp = HAlloc(BIF_P, 3);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
+ res = make_small(rp->msg.len);
+ break;
+
+ case am_links: {
+ MonitorInfoCollection mic;
+ int i;
+ Eterm item;
+
+ INIT_MONITOR_INFOS(mic);
+
+ erts_doforall_links(rp->nlinks,&collect_one_link,&mic);
+
+ hp = HAlloc(BIF_P, 3 + mic.sz);
+ res = NIL;
+ for (i = 0; i < mic.mi_i; i++) {
+ item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ res = CONS(hp, item, res);
+ hp += 2;
+ }
+ DESTROY_MONITOR_INFOS(mic);
+ break;
+ }
+
+ case am_monitors: {
+ MonitorInfoCollection mic;
+ int i;
+
+ INIT_MONITOR_INFOS(mic);
+ erts_doforall_monitors(rp->monitors,&collect_one_origin_monitor,&mic);
+ hp = HAlloc(BIF_P, 3 + mic.sz);
+ res = NIL;
+ for (i = 0; i < mic.mi_i; i++) {
+ if (is_atom(mic.mi[i].entity)) {
+ /* Monitor by name.
+ * Build {process, {Name, Node}} and cons it.
+ */
+ Eterm t1, t2;
+
+ t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
+ hp += 3;
+ t2 = TUPLE2(hp, am_process, t1);
+ hp += 3;
+ res = CONS(hp, t2, res);
+ hp += 2;
+ }
+ else {
+ /* Monitor by pid. Build {process, Pid} and cons it. */
+ Eterm t;
+ Eterm pid = STORE_NC(&hp,
+ &MSO(BIF_P).externals,
+ mic.mi[i].entity);
+ t = TUPLE2(hp, am_process, pid);
+ hp += 3;
+ res = CONS(hp, t, res);
+ hp += 2;
+ }
+ }
+ DESTROY_MONITOR_INFOS(mic);
+ break;
+ }
+
+ case am_monitored_by: {
+ MonitorInfoCollection mic;
+ int i;
+ Eterm item;
+
+ INIT_MONITOR_INFOS(mic);
+ erts_doforall_monitors(rp->monitors,&collect_one_target_monitor,&mic);
+ hp = HAlloc(BIF_P, 3 + mic.sz);
+
+ res = NIL;
+ for (i = 0; i < mic.mi_i; ++i) {
+ item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ res = CONS(hp, item, res);
+ hp += 2;
+ }
+ DESTROY_MONITOR_INFOS(mic);
+ break;
+ }
+
+ case am_suspending: {
+ ErtsSuspendMonitorInfoCollection smic;
+ int i;
+ Eterm item;
+#ifdef DEBUG
+ Eterm *hp_end;
+#endif
+
+ ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
+ BIF_P,
+ (BIF_P == rp
+ ? ERTS_PROC_LOCK_MAIN
+ : 0) | ERTS_PROC_LOCK_LINK);
+
+ erts_doforall_suspend_monitors(rp->suspend_monitors,
+ &collect_one_suspend_monitor,
+ &smic);
+ hp = HAlloc(BIF_P, 3 + smic.sz);
+#ifdef DEBUG
+ hp_end = hp + smic.sz;
+#endif
+
+ res = NIL;
+ for (i = 0; i < smic.smi_i; i++) {
+ Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
+ Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
+ Eterm active;
+ Eterm pending;
+ if (IS_SSMALL(a))
+ active = make_small(a);
+ else {
+ active = small_to_big(a, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ if (IS_SSMALL(p))
+ pending = make_small(p);
+ else {
+ pending = small_to_big(p, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ item = TUPLE3(hp, smic.smi[i]->pid, active, pending);
+ hp += 4;
+ res = CONS(hp, item, res);
+ hp += 2;
+ }
+
+ ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
+ ASSERT(hp == hp_end);
+
+ break;
+ }
+
+ case am_dictionary:
+ if (rp->trace_flags & F_SENSITIVE) {
+ res = NIL;
+ } else {
+ res = erts_dictionary_copy(BIF_P, rp->dictionary);
+ }
+ hp = HAlloc(BIF_P, 3);
+ break;
+
+ case am_trap_exit:
+ hp = HAlloc(BIF_P, 3);
+ if (rp->flags & F_TRAPEXIT)
+ res = am_true;
+ else
+ res = am_false;
+ break;
+
+ case am_error_handler:
+ hp = HAlloc(BIF_P, 3);
+ res = erts_proc_get_error_handler(BIF_P);
+ break;
+
+ case am_heap_size: {
+ Uint hsz = 3;
+ (void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
+ break;
+ }
+
+ case am_total_heap_size: {
+ ErlMessage *mp;
+ Uint total_heap_size;
+ Uint hsz = 3;
+
+ total_heap_size = rp->heap_sz;
+ if (rp->old_hend && rp->old_heap)
+ total_heap_size += rp->old_hend - rp->old_heap;
+
+ total_heap_size += rp->mbuf_sz;
+
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
+
+ for (mp = rp->msg.first; mp; mp = mp->next)
+ if (mp->data.attached)
+ total_heap_size += erts_msg_attached_data_size(mp);
+
+ (void) erts_bld_uint(NULL, &hsz, total_heap_size);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, total_heap_size);
+ break;
+ }
+
+ case am_stack_size: {
+ Uint stack_size = STACK_START(rp) - rp->stop;
+ Uint hsz = 3;
+ (void) erts_bld_uint(NULL, &hsz, stack_size);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, stack_size);
+ break;
+ }
+
+ case am_memory: { /* Memory consumed in bytes */
+ ErlMessage *mp;
+ Uint size = 0;
+ Uint hsz = 3;
+ struct saved_calls *scb;
+ size += sizeof(Process);
+
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
+
+ erts_doforall_links(rp->nlinks, &one_link_size, &size);
+ erts_doforall_monitors(rp->monitors, &one_mon_size, &size);
+ size += (rp->heap_sz + rp->mbuf_sz) * sizeof(Eterm);
+ if (rp->old_hend && rp->old_heap)
+ size += (rp->old_hend - rp->old_heap) * sizeof(Eterm);
+
+ size += rp->msg.len * sizeof(ErlMessage);
+
+ for (mp = rp->msg.first; mp; mp = mp->next)
+ if (mp->data.attached)
+ size += erts_msg_attached_data_size(mp)*sizeof(Eterm);
+
+ if (rp->arg_reg != rp->def_arg_reg) {
+ size += rp->arity * sizeof(rp->arg_reg[0]);
+ }
+
+ if (rp->psd)
+ size += sizeof(ErtsPSD);
+
+ scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
+ if (scb) {
+ size += (sizeof(struct saved_calls)
+ + (scb->len-1) * sizeof(scb->ct[0]));
+ }
+
+ size += erts_dicts_mem_size(rp);
+
+ (void) erts_bld_uint(NULL, &hsz, size);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, size);
+ break;
+ }
+
+ case am_garbage_collection: {
+ DECL_AM(minor_gcs);
+ Eterm t;
+
+ hp = HAlloc(BIF_P, 3+2+3+2+3);
+ t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp)));
+ hp += 3;
+ res = CONS(hp, t, NIL);
+ hp += 2;
+ t = TUPLE2(hp, am_fullsweep_after, make_small(MAX_GEN_GCS(rp)));
+ hp += 3;
+ res = CONS(hp, t, res);
+ hp += 2;
+ break;
+ }
+
+ case am_group_leader: {
+ int sz = NC_HEAP_SIZE(rp->group_leader);
+ hp = HAlloc(BIF_P, 3 + sz);
+ res = STORE_NC(&hp, &MSO(BIF_P).externals, rp->group_leader);
+ break;
+ }
+
+ case am_reductions: {
+ Uint reds = rp->reds + erts_current_reductions(BIF_P, rp);
+ Uint hsz = 3;
+ (void) erts_bld_uint(NULL, &hsz, reds);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, reds);
+ break;
+ }
+
+ case am_priority:
+ hp = HAlloc(BIF_P, 3);
+ res = erts_get_process_priority(rp);
+ break;
+
+ case am_trace:
+ hp = HAlloc(BIF_P, 3);
+ res = make_small(rp->trace_flags & TRACEE_FLAGS);
+ break;
+
+ case am_binary: {
+ Uint sz = 3;
+ (void) bld_bin_list(NULL, &sz, MSO(rp).mso);
+ hp = HAlloc(BIF_P, sz);
+ res = bld_bin_list(&hp, NULL, MSO(rp).mso);
+ break;
+ }
+
+#ifdef HYBRID
+ case am_message_binary: {
+ Uint sz = 3;
+ (void) bld_bin_list(NULL, &sz, erts_global_offheap.mso);
+ hp = HAlloc(BIF_P, sz);
+ res = bld_bin_list(&hp, NULL, erts_global_offheap.mso);
+ break;
+ }
+#endif
+
+ case am_sequential_trace_token:
+ res = copy_object(rp->seq_trace_token, BIF_P);
+ hp = HAlloc(BIF_P, 3);
+ break;
+
+ case am_catchlevel:
+ hp = HAlloc(BIF_P, 3);
+ res = make_small(catchlevel(BIF_P));
+ break;
+
+ case am_backtrace: {
+ erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
+ erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
+ res = new_binary(BIF_P, (byte *) dsbufp->str, (int) dsbufp->str_len);
+ erts_destroy_tmp_dsbuf(dsbufp);
+ hp = HAlloc(BIF_P, 3);
+ break;
+ }
+
+ case am_last_calls: {
+ struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P);
+ if (!scb) {
+ hp = HAlloc(BIF_P, 3);
+ res = am_false;
+ } else {
+ /*
+ * One cons cell and a 3-struct, and a 2-tuple.
+ * Might be less than that, if there are sends, receives or timeouts,
+ * so we must do a HRelease() to avoid creating holes.
+ */
+ Uint needed = scb->n*(2+4) + 3;
+ Eterm* limit;
+ Eterm term, list;
+ int i, j;
+
+ hp = HAlloc(BIF_P, needed);
+ limit = hp + needed;
+ list = NIL;
+ for (i = 0; i < scb->n; i++) {
+ j = scb->cur - i - 1;
+ if (j < 0)
+ j += scb->len;
+ if (scb->ct[j] == &exp_send)
+ term = am_send;
+ else if (scb->ct[j] == &exp_receive)
+ term = am_receive;
+ else if (scb->ct[j] == &exp_timeout)
+ term = am_timeout;
+ else {
+ term = TUPLE3(hp,
+ scb->ct[j]->code[0],
+ scb->ct[j]->code[1],
+ make_small(scb->ct[j]->code[2]));
+ hp += 4;
+ }
+ list = CONS(hp, term, list);
+ hp += 2;
+ }
+ res = list;
+ res = TUPLE2(hp, item, res);
+ hp += 3;
+ HRelease(BIF_P,limit,hp);
+ return res;
+ }
+ break;
+ }
+
+ default:
+ return THE_NON_VALUE; /* will produce badarg */
+
+ }
+
+ return TUPLE2(hp, item, res);
+}
+#undef MI_INC
+
+#if defined(VALGRIND)
+static int check_if_xml(void)
+{
+ char buf[1];
+ size_t bufsz = sizeof(buf);
+ return erts_sys_getenv("VALGRIND_LOG_XML", buf, &bufsz) >= 0;
+}
+#else
+#define check_if_xml() 0
+#endif
+
+/*
+ * This function takes care of calls to erlang:system_info/1 when the argument
+ * is a tuple.
+ */
+static BIF_RETTYPE
+info_1_tuple(Process* BIF_P, /* Pointer to current process. */
+ Eterm* tp, /* Pointer to first element in tuple */
+ int arity) /* Arity of tuple (untagged). */
+{
+ Eterm ret;
+ Eterm sel;
+
+ sel = *tp++;
+
+ if (sel == am_allocator_sizes && arity == 2) {
+ return erts_allocator_info_term(BIF_P, *tp, 1);
+ } else if (sel == am_allocated) {
+ if (arity == 2) {
+ Eterm res = THE_NON_VALUE;
+ char *buf;
+ int len = is_string(*tp);
+ if (len <= 0)
+ return res;
+ buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
+ if (intlist_to_buf(*tp, buf, len) != len)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ buf[len] = '\0';
+ res = erts_instr_dump_memory_map(buf) ? am_true : am_false;
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ if (is_non_value(res))
+ goto badarg;
+ return res;
+ }
+ else if (arity == 3 && tp[0] == am_status) {
+ if (is_atom(tp[1]))
+ return erts_instr_get_stat(BIF_P, tp[1], 1);
+ else {
+ Eterm res = THE_NON_VALUE;
+ char *buf;
+ int len = is_string(tp[1]);
+ if (len <= 0)
+ return res;
+ buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
+ if (intlist_to_buf(tp[1], buf, len) != len)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ buf[len] = '\0';
+ res = erts_instr_dump_stat(buf, 1) ? am_true : am_false;
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ if (is_non_value(res))
+ goto badarg;
+ return res;
+ }
+ }
+ else
+ goto badarg;
+ } else if (sel == am_allocator && arity == 2) {
+ return erts_allocator_info_term(BIF_P, *tp, 0);
+ } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", sel) && arity == 2) {
+ return erts_get_cpu_topology_term(BIF_P, *tp);
+ } else if (ERTS_IS_ATOM_STR("cpu_topology", sel) && arity == 2) {
+ Eterm res = erts_get_cpu_topology_term(BIF_P, *tp);
+ ERTS_BIF_PREP_TRAP1(ret, erts_format_cpu_topology_trap, BIF_P, res);
+ return ret;
+#if defined(PURIFY) || defined(VALGRIND)
+ } else if (ERTS_IS_ATOM_STR("error_checker", sel)
+#if defined(PURIFY)
+ || sel == am_purify
+#elif defined(VALGRIND)
+ || ERTS_IS_ATOM_STR("valgrind", sel)
+#endif
+ ) {
+ if (*tp == am_memory) {
+#if defined(PURIFY)
+ BIF_RET(erts_make_integer(purify_new_leaks(), BIF_P));
+#elif defined(VALGRIND)
+ VALGRIND_DO_LEAK_CHECK;
+ BIF_RET(make_small(0));
+#endif
+ } else if (*tp == am_fd) {
+#if defined(PURIFY)
+ BIF_RET(erts_make_integer(purify_new_fds_inuse(), BIF_P));
+#elif defined(VALGRIND)
+ /* Not present in valgrind... */
+ BIF_RET(make_small(0));
+#endif
+ } else if (*tp == am_running) {
+#if defined(PURIFY)
+ BIF_RET(purify_is_running() ? am_true : am_false);
+#elif defined(VALGRIND)
+ BIF_RET(RUNNING_ON_VALGRIND ? am_true : am_false);
+#endif
+ } else if (is_list(*tp)) {
+#if defined(PURIFY)
+#define ERTS_ERROR_CHECKER_PRINTF purify_printf
+#elif defined(VALGRIND)
+#define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
+#endif
+ int buf_size = 8*1024; /* Try with 8KB first */
+ char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
+ int r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
+ if (r < 0) {
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ buf_size = io_list_len(*tp);
+ if (buf_size < 0)
+ goto badarg;
+ buf_size++;
+ buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
+ r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
+ ASSERT(r == buf_size - 1);
+ }
+ buf[buf_size - 1 - r] = '\0';
+ if (check_if_xml()) {
+ ERTS_ERROR_CHECKER_PRINTF("<erlang_info_log>"
+ "%s</erlang_info_log>\n", buf);
+ } else {
+ ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
+ }
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_RET(am_true);
+#undef ERTS_ERROR_CHECKER_PRINTF
+ }
+#endif
+#ifdef QUANTIFY
+ } else if (sel == am_quantify) {
+ if (*tp == am_clear) {
+ quantify_clear_data();
+ BIF_RET(am_true);
+ } else if (*tp == am_start) {
+ quantify_start_recording_data();
+ BIF_RET(am_true);
+ } else if (*tp == am_stop) {
+ quantify_stop_recording_data();
+ BIF_RET(am_true);
+ } else if (*tp == am_running) {
+ BIF_RET(quantify_is_running() ? am_true : am_false);
+ }
+#endif
+#if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
+ } else if (ERTS_IS_ATOM_STR("ultrasparc_set_pcr", sel)) {
+ unsigned long long tmp;
+ int fd;
+ int rc;
+
+ if (arity != 2 || !is_small(*tp)) {
+ goto badarg;
+ }
+ tmp = signed_val(*tp);
+ if ((fd = open("/dev/perfmon", O_RDONLY)) == -1) {
+ BIF_RET(am_false);
+ }
+ rc = ioctl(fd, PERFMON_SETPCR, &tmp);
+ close(fd);
+ if (rc < 0) {
+ BIF_RET(am_false);
+ }
+ BIF_RET(am_true);
+#endif
+ }
+
+ badarg:
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+
+ return ret;
+}
+
+#define INFO_DSBUF_INC_SZ 256
+
+static erts_dsprintf_buf_t *
+grow_info_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
+{
+ size_t size;
+ size_t free_size = dsbufp->size - dsbufp->str_len;
+
+ ASSERT(dsbufp);
+
+ if (need <= free_size)
+ return dsbufp;
+ size = need - free_size + INFO_DSBUF_INC_SZ;
+ size = ((size + INFO_DSBUF_INC_SZ - 1)/INFO_DSBUF_INC_SZ)*INFO_DSBUF_INC_SZ;
+ size += dsbufp->size;
+ ASSERT(dsbufp->str_len + need <= size);
+ dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_INFO_DSBUF,
+ (void *) dsbufp->str,
+ size);
+ dsbufp->size = size;
+ return dsbufp;
+}
+
+static erts_dsprintf_buf_t *
+erts_create_info_dsbuf(Uint size)
+{
+ Uint init_size = size ? size : INFO_DSBUF_INC_SZ;
+ erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_info_dsbuf);
+ erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_INFO_DSBUF,
+ sizeof(erts_dsprintf_buf_t));
+ sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
+ dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_INFO_DSBUF, init_size);
+ dsbufp->str[0] = '\0';
+ dsbufp->size = init_size;
+ return dsbufp;
+}
+
+static void
+erts_destroy_info_dsbuf(erts_dsprintf_buf_t *dsbufp)
+{
+ if (dsbufp->str)
+ erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp->str);
+ erts_free(ERTS_ALC_T_INFO_DSBUF, (void *) dsbufp);
+}
+
+static Eterm
+c_compiler_used(Eterm **hpp, Uint *szp)
+{
+
+#if defined(__GNUC__)
+# if defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+# define ERTS_GNUC_VSN_NUMS 3
+# elif defined(__GNUC_MINOR__)
+# define ERTS_GNUC_VSN_NUMS 2
+# else
+# define ERTS_GNUC_VSN_NUMS 1
+# endif
+ return erts_bld_tuple(hpp,
+ szp,
+ 2,
+ erts_bld_atom(hpp, szp, "gnuc"),
+#if ERTS_GNUC_VSN_NUMS > 1
+ erts_bld_tuple(hpp,
+ szp,
+ ERTS_GNUC_VSN_NUMS,
+#endif
+ erts_bld_uint(hpp, szp,
+ (Uint) __GNUC__)
+#ifdef __GNUC_MINOR__
+ ,
+ erts_bld_uint(hpp, szp,
+ (Uint) __GNUC_MINOR__)
+#ifdef __GNUC_PATCHLEVEL__
+ ,
+ erts_bld_uint(hpp, szp,
+ (Uint) __GNUC_PATCHLEVEL__)
+#endif
+#endif
+#if ERTS_GNUC_VSN_NUMS > 1
+ )
+#endif
+ );
+
+#elif defined(_MSC_VER)
+ return erts_bld_tuple(hpp,
+ szp,
+ 2,
+ erts_bld_atom(hpp, szp, "msc"),
+ erts_bld_uint(hpp, szp, (Uint) _MSC_VER));
+
+#else
+ return erts_bld_tuple(hpp,
+ szp,
+ 2,
+ am_undefined,
+ am_undefined);
+#endif
+
+}
+
+BIF_RETTYPE system_info_1(BIF_ALIST_1)
+{
+ Eterm res;
+ Eterm* hp;
+ Eterm val;
+ int i;
+
+ if (is_tuple(BIF_ARG_1)) {
+ Eterm* tp = tuple_val(BIF_ARG_1);
+ Uint arity = *tp++;
+ return info_1_tuple(BIF_P, tp, arityval(arity));
+ } else if (BIF_ARG_1 == am_scheduler_id) {
+#ifdef ERTS_SMP
+ ASSERT(BIF_P->scheduler_data);
+ BIF_RET(make_small(BIF_P->scheduler_data->no));
+#else
+ BIF_RET(make_small(1));
+#endif
+ } else if (BIF_ARG_1 == am_compat_rel) {
+ ASSERT(erts_compat_rel > 0);
+ BIF_RET(make_small(erts_compat_rel));
+ } else if (BIF_ARG_1 == am_multi_scheduling) {
+#ifndef ERTS_SMP
+ BIF_RET(am_disabled);
+#else
+ if (erts_no_schedulers == 1)
+ BIF_RET(am_disabled);
+ else {
+ BIF_RET(erts_is_multi_scheduling_blocked()
+ ? am_blocked
+ : am_enabled);
+ }
+#endif
+ } else if (BIF_ARG_1 == am_allocated_areas) {
+ res = erts_allocated_areas(NULL, NULL, BIF_P);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_allocated) {
+ BIF_RET(erts_instr_get_memory_map(BIF_P));
+ } else if (BIF_ARG_1 == am_hipe_architecture) {
+#if defined(HIPE)
+ BIF_RET(hipe_arch_name);
+#else
+ BIF_RET(am_undefined);
+#endif
+ } else if (BIF_ARG_1 == am_trace_control_word) {
+ BIF_RET(db_get_trace_control_word_0(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
+ BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
+ } else if (BIF_ARG_1 == am_sequential_tracer) {
+ val = erts_get_system_seq_tracer();
+ ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false)
+ hp = HAlloc(BIF_P, 3);
+ res = TUPLE2(hp, am_sequential_tracer, val);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_garbage_collection){
+ Uint val = (Uint) erts_smp_atomic_read(&erts_max_gen_gcs);
+ hp = HAlloc(BIF_P, 3+2);
+ res = TUPLE2(hp, am_fullsweep_after, make_small(val));
+ hp += 3;
+ res = CONS(hp, res, NIL);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_fullsweep_after){
+ Uint val = (Uint) erts_smp_atomic_read(&erts_max_gen_gcs);
+ hp = HAlloc(BIF_P, 3);
+ res = TUPLE2(hp, am_fullsweep_after, make_small(val));
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_process_count) {
+ BIF_RET(make_small(erts_process_count()));
+ } else if (BIF_ARG_1 == am_process_limit) {
+ BIF_RET(make_small(erts_max_processes));
+ } else if (BIF_ARG_1 == am_info
+ || BIF_ARG_1 == am_procs
+ || BIF_ARG_1 == am_loaded
+ || BIF_ARG_1 == am_dist) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
+
+ /* Need to be the only thread running... */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ if (BIF_ARG_1 == am_info)
+ info(ERTS_PRINT_DSBUF, (void *) dsbufp);
+ else if (BIF_ARG_1 == am_procs)
+ process_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
+ else if (BIF_ARG_1 == am_loaded)
+ loaded(ERTS_PRINT_DSBUF, (void *) dsbufp);
+ else
+ distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(dsbufp && dsbufp->str);
+ res = new_binary(BIF_P, (byte *) dsbufp->str, (int) dsbufp->str_len);
+ erts_destroy_info_dsbuf(dsbufp);
+ BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
+ DistEntry *dep;
+ i = 0;
+ /* Need to be the only thread running... */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ for (dep = erts_visible_dist_entries; dep; dep = dep->next)
+ ++i;
+ for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
+ ++i;
+ hp = HAlloc(BIF_P,i*(3+2));
+ res = NIL;
+ for (dep = erts_hidden_dist_entries; dep; dep = dep->next) {
+ Eterm tpl;
+ ASSERT(is_immed(dep->cid));
+ tpl = TUPLE2(hp, dep->sysname, dep->cid);
+ hp +=3;
+ res = CONS(hp, tpl, res);
+ hp += 2;
+ }
+ for (dep = erts_visible_dist_entries; dep; dep = dep->next) {
+ Eterm tpl;
+ ASSERT(is_immed(dep->cid));
+ tpl = TUPLE2(hp, dep->sysname, dep->cid);
+ hp +=3;
+ res = CONS(hp, tpl, res);
+ hp += 2;
+ }
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_system_version) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
+ erts_print_system_version(ERTS_PRINT_DSBUF, (void *) dsbufp, BIF_P);
+ hp = HAlloc(BIF_P, dsbufp->str_len*2);
+ res = buf_to_intlist(&hp, dsbufp->str, dsbufp->str_len, NIL);
+ erts_destroy_tmp_dsbuf(dsbufp);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_system_architecture) {
+ hp = HAlloc(BIF_P, 2*(sizeof(ERLANG_ARCHITECTURE)-1));
+ BIF_RET(buf_to_intlist(&hp,
+ ERLANG_ARCHITECTURE,
+ sizeof(ERLANG_ARCHITECTURE)-1,
+ NIL));
+ }
+ else if (BIF_ARG_1 == am_memory_types) {
+ return erts_instr_get_type_info(BIF_P);
+ }
+ else if (BIF_ARG_1 == am_os_type) {
+ Eterm type = am_atom_put(os_type, strlen(os_type));
+ Eterm flav, tup;
+ char *buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
+
+ os_flavor(buf, 1024);
+ flav = am_atom_put(buf, strlen(buf));
+ hp = HAlloc(BIF_P, 3);
+ tup = TUPLE2(hp, type, flav);
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_RET(tup);
+ }
+ else if (BIF_ARG_1 == am_allocator) {
+ BIF_RET(erts_allocator_options((void *) BIF_P));
+ }
+ else if (BIF_ARG_1 == am_thread_pool_size) {
+#ifdef USE_THREADS
+ extern int erts_async_max_threads;
+#endif
+ int n;
+
+#ifdef USE_THREADS
+ n = erts_async_max_threads;
+#else
+ n = 0;
+#endif
+ BIF_RET(make_small(n));
+ }
+ else if (BIF_ARG_1 == am_alloc_util_allocators) {
+ BIF_RET(erts_alloc_util_allocators((void *) BIF_P));
+ }
+ else if (BIF_ARG_1 == am_elib_malloc) {
+#ifdef ELIB_ALLOC_IS_CLIB
+ struct elib_stat stat;
+ DECL_AM(heap_size);
+ DECL_AM(max_alloced_size);
+ DECL_AM(alloced_size);
+ DECL_AM(free_size);
+ DECL_AM(no_alloced_blocks);
+ DECL_AM(no_free_blocks);
+ DECL_AM(smallest_alloced_block);
+ DECL_AM(largest_free_block);
+ Eterm atoms[8];
+ Eterm ints[8];
+ Uint **hpp;
+ Uint sz;
+ Uint *szp;
+ int length;
+#ifdef DEBUG
+ Uint *endp;
+#endif
+
+ elib_stat(&stat);
+
+ /* First find out the heap size needed ... */
+ hpp = NULL;
+ szp = &sz;
+ sz = 0;
+
+ build_elib_malloc_term:
+ length = 0;
+ atoms[length] = AM_heap_size;
+ ints[length++] = erts_bld_uint(hpp, szp,
+ (Uint) stat.mem_total*sizeof(Uint));
+ atoms[length] = AM_max_alloced_size;
+ ints[length++] = erts_bld_uint(hpp, szp,
+ (Uint) stat.mem_max_alloc*sizeof(Uint));
+ atoms[length] = AM_alloced_size;
+ ints[length++] = erts_bld_uint(hpp, szp,
+ (Uint) stat.mem_alloc*sizeof(Uint));
+ atoms[length] = AM_free_size;
+ ints[length++] = erts_bld_uint(hpp, szp,
+ (Uint) stat.mem_free*sizeof(Uint));
+ atoms[length] = AM_no_alloced_blocks;
+ ints[length++] = erts_bld_uint(hpp, szp, (Uint) stat.mem_blocks);
+ atoms[length] = AM_no_free_blocks;
+ ints[length++] = erts_bld_uint(hpp, szp, (Uint) stat.free_blocks);
+ atoms[length] = AM_smallest_alloced_block;
+ ints[length++] = erts_bld_uint(hpp, szp,
+ (Uint) stat.min_used*sizeof(Uint));
+ atoms[length] = AM_largest_free_block;
+ ints[length++] = erts_bld_uint(hpp, szp,
+ (Uint) stat.max_free*sizeof(Uint));
+
+
+
+ ASSERT(length <= sizeof(atoms)/sizeof(Eterm));
+ ASSERT(length <= sizeof(ints)/sizeof(Eterm));
+
+ res = erts_bld_2tup_list(hpp, szp, length, atoms, ints);
+
+ if (szp) {
+ /* ... and then build the term */
+ hp = HAlloc(BIF_P, sz);
+#ifdef DEBUG
+ endp = hp + sz;
+#endif
+
+ szp = NULL;
+ hpp = &hp;
+ goto build_elib_malloc_term;
+ }
+
+#ifdef DEBUG
+ ASSERT(endp == hp);
+#endif
+
+#else /* #ifdef ELIB_ALLOC_IS_CLIB */
+ res = am_false;
+#endif /* #ifdef ELIB_ALLOC_IS_CLIB */
+
+ BIF_RET(res);
+ }
+ else if (BIF_ARG_1 == am_os_version) {
+ int major, minor, build;
+ Eterm tup;
+
+ os_version(&major, &minor, &build);
+ hp = HAlloc(BIF_P, 4);
+ tup = TUPLE3(hp,
+ make_small(major),
+ make_small(minor),
+ make_small(build));
+ BIF_RET(tup);
+ }
+ else if (BIF_ARG_1 == am_version) {
+ int n = strlen(ERLANG_VERSION);
+ hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2);
+ BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL));
+ }
+ else if (BIF_ARG_1 == am_machine) {
+ int n = strlen(EMULATOR);
+ hp = HAlloc(BIF_P, n*2);
+ BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL));
+ }
+ else if (BIF_ARG_1 == am_garbage_collection) {
+ BIF_RET(am_generational);
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+ } else if (BIF_ARG_1 == am_instruction_counts) {
+#ifdef DEBUG
+ Eterm *endp;
+#endif
+ Eterm *hp, **hpp;
+ Uint hsz, *hszp;
+ int i;
+
+ hpp = NULL;
+ hsz = 0;
+ hszp = &hsz;
+
+ bld_instruction_counts:
+
+ res = NIL;
+ for (i = num_instructions-1; i >= 0; i--) {
+ res = erts_bld_cons(hpp, hszp,
+ erts_bld_tuple(hpp, hszp, 2,
+ am_atom_put(opc[i].name,
+ strlen(opc[i].name)),
+ erts_bld_uint(hpp, hszp,
+ opc[i].count)),
+ res);
+ }
+
+ if (!hpp) {
+ hp = HAlloc(BIF_P, hsz);
+ hpp = &hp;
+#ifdef DEBUG
+ endp = hp + hsz;
+#endif
+ hszp = NULL;
+ goto bld_instruction_counts;
+ }
+
+#ifdef DEBUG
+ ASSERT(endp == hp);
+#endif
+
+ BIF_RET(res);
+#endif /* #ifndef ERTS_SMP */
+ } else if (BIF_ARG_1 == am_wordsize) {
+ return make_small(sizeof(Eterm));
+ } else if (BIF_ARG_1 == am_endian) {
+#if defined(WORDS_BIGENDIAN)
+ return am_big;
+#else
+ return am_little;
+#endif
+ } else if (BIF_ARG_1 == am_heap_sizes) {
+ return erts_heap_sizes(BIF_P);
+ } else if (BIF_ARG_1 == am_global_heaps_size) {
+#ifdef HYBRID
+ Uint hsz = 0;
+ Uint sz = 0;
+
+ sz += global_heap_sz;
+#ifdef INCREMENTAL
+ /* The size of the old generation is a bit hard to define here...
+ * The amount of live data in the last collection perhaps..? */
+ sz = 0;
+#else
+ if (global_old_hend && global_old_heap)
+ sz += global_old_hend - global_old_heap;
+#endif
+
+ sz *= sizeof(Eterm);
+
+ (void) erts_bld_uint(NULL, &hsz, sz);
+ hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
+ res = erts_bld_uint(&hp, NULL, sz);
+#else
+ res = make_small(0);
+#endif
+ return res;
+ } else if (BIF_ARG_1 == am_heap_type) {
+#if defined(HYBRID)
+ return am_hybrid;
+#else
+ return am_private;
+#endif
+ } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) {
+ res = erts_get_cpu_topology_term(BIF_P, am_used);
+ BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res);
+#if defined(__GNUC__) && defined(HAVE_SOLARIS_SPARC_PERFMON)
+ } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick1", BIF_ARG_1)) {
+ register unsigned high asm("%l0");
+ register unsigned low asm("%l1");
+
+ hp = HAlloc(BIF_P, 5);
+ asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
+ ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
+ : "=r" (high), "=r" (low));
+ res = TUPLE4(hp, make_small(high >> 16),
+ make_small(high & 0xFFFF),
+ make_small(low >> 16),
+ make_small(low & 0xFFFF));
+ BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("ultrasparc_read_tick2", BIF_ARG_1)) {
+ register unsigned high asm("%l0");
+ register unsigned low asm("%l1");
+
+ asm volatile (".word 0xa3410000;" /* rd %tick, %l1 */
+ ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
+ : "=r" (high), "=r" (low));
+ hp = HAlloc(BIF_P, 5);
+ res = TUPLE4(hp, make_small(high >> 16),
+ make_small(high & 0xFFFF),
+ make_small(low >> 16),
+ make_small(low & 0xFFFF));
+ BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic1", BIF_ARG_1)) {
+ register unsigned high asm("%l0");
+ register unsigned low asm("%l1");
+
+ hp = HAlloc(BIF_P, 5);
+ asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
+ ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
+ : "=r" (high), "=r" (low));
+ res = TUPLE4(hp, make_small(high >> 16),
+ make_small(high & 0xFFFF),
+ make_small(low >> 16),
+ make_small(low & 0xFFFF));
+ BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("ultrasparc_read_pic2", BIF_ARG_1)) {
+ register unsigned high asm("%l0");
+ register unsigned low asm("%l1");
+
+ asm volatile (".word 0xa3444000;" /* rd %asr17, %l1 */
+ ".word 0xa1347020" /* srlx %l1, 0x20, %l0 */
+ : "=r" (high), "=r" (low));
+ hp = HAlloc(BIF_P, 5);
+ res = TUPLE4(hp, make_small(high >> 16),
+ make_small(high & 0xFFFF),
+ make_small(low >> 16),
+ make_small(low & 0xFFFF));
+ BIF_RET(res);
+#endif
+ } else if (BIF_ARG_1 == am_threads) {
+#ifdef USE_THREADS
+ return am_true;
+#else
+ return am_false;
+#endif
+ } else if (BIF_ARG_1 == am_creation) {
+ return make_small(erts_this_node->creation);
+ } else if (BIF_ARG_1 == am_break_ignored) {
+ extern int ignore_break;
+ if (ignore_break)
+ return am_true;
+ else
+ return am_false;
+ }
+ /* Arguments that are unusual follow ... */
+ else if (ERTS_IS_ATOM_STR("logical_processors", BIF_ARG_1)) {
+ int no = erts_get_cpu_configured(erts_cpuinfo);
+ if (no > 0)
+ BIF_RET(make_small((Uint) no));
+ else {
+ DECL_AM(unknown);
+ BIF_RET(AM_unknown);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("logical_processors_online", BIF_ARG_1)) {
+ int no = erts_get_cpu_online(erts_cpuinfo);
+ if (no > 0)
+ BIF_RET(make_small((Uint) no));
+ else {
+ DECL_AM(unknown);
+ BIF_RET(AM_unknown);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("logical_processors_available", BIF_ARG_1)) {
+ int no = erts_get_cpu_available(erts_cpuinfo);
+ if (no > 0)
+ BIF_RET(make_small((Uint) no));
+ else {
+ DECL_AM(unknown);
+ BIF_RET(AM_unknown);
+ }
+ } else if (ERTS_IS_ATOM_STR("otp_release", BIF_ARG_1)) {
+ int n = sizeof(ERLANG_OTP_RELEASE)-1;
+ hp = HAlloc(BIF_P, 2*n);
+ BIF_RET(buf_to_intlist(&hp, ERLANG_OTP_RELEASE, n, NIL));
+ } else if (ERTS_IS_ATOM_STR("driver_version", BIF_ARG_1)) {
+ char buf[42];
+ int n = erts_snprintf(buf, 42, "%d.%d",
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION);
+ hp = HAlloc(BIF_P, 2*n);
+ BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
+ } else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
+#ifdef ERTS_SMP
+ BIF_RET(am_true);
+#else
+ BIF_RET(am_false);
+#endif
+ } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
+ BIF_RET(erts_bound_schedulers_term(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
+ BIF_RET(erts_get_schedulers_binds(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("constant_pool_support", BIF_ARG_1)) {
+ BIF_RET(am_true);
+ } else if (ERTS_IS_ATOM_STR("schedulers", BIF_ARG_1)
+ || ERTS_IS_ATOM_STR("schedulers_total", BIF_ARG_1)) {
+ res = make_small(erts_no_schedulers);
+ BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
+#ifndef ERTS_SMP
+ Eterm *hp = HAlloc(BIF_P, 4);
+ res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
+ BIF_RET(res);
+#else
+ Uint total, online, active;
+ switch (erts_schedulers_state(&total,
+ &online,
+ &active,
+ 1)) {
+ case ERTS_SCHDLR_SSPND_DONE: {
+ Eterm *hp = HAlloc(BIF_P, 4);
+ res = TUPLE3(hp,
+ make_small(total),
+ make_small(online),
+ make_small(active));
+ BIF_RET(res);
+ }
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(bif_export[BIF_system_info_1],
+ BIF_P, BIF_ARG_1);
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ }
+#endif
+ } else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
+#ifndef ERTS_SMP
+ BIF_RET(make_small(1));
+#else
+ Uint total, online, active;
+ switch (erts_schedulers_state(&total, &online, &active, 1)) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(make_small(online));
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(bif_export[BIF_system_info_1],
+ BIF_P, BIF_ARG_1);
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ }
+#endif
+ } else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
+#ifndef ERTS_SMP
+ BIF_RET(make_small(1));
+#else
+ Uint total, online, active;
+ switch (erts_schedulers_state(&total, &online, &active, 1)) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ BIF_RET(make_small(active));
+ case ERTS_SCHDLR_SSPND_YIELD_RESTART:
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(bif_export[BIF_system_info_1],
+ BIF_P, BIF_ARG_1);
+ default:
+ ASSERT(0);
+ BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
+ }
+#endif
+ } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
+ res = make_small(erts_no_run_queues);
+ BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) {
+ Eterm *hp = NULL;
+ Uint sz = 0;
+ (void) c_compiler_used(NULL, &sz);
+ if (sz)
+ hp = HAlloc(BIF_P, sz);
+ BIF_RET(c_compiler_used(&hp, NULL));
+ } else if (ERTS_IS_ATOM_STR("stop_memory_trace", BIF_ARG_1)) {
+ erts_mtrace_stop();
+ BIF_RET(am_true);
+ } else if (ERTS_IS_ATOM_STR("context_reductions", BIF_ARG_1)) {
+ BIF_RET(make_small(CONTEXT_REDS));
+ } else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
+#ifdef ERTS_ENABLE_KERNEL_POLL
+ BIF_RET(erts_use_kernel_poll ? am_true : am_false);
+#else
+ BIF_RET(am_false);
+#endif
+ } else if (ERTS_IS_ATOM_STR("lock_checking", BIF_ARG_1)) {
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ BIF_RET(am_true);
+#else
+ BIF_RET(am_false);
+#endif
+ } else if (ERTS_IS_ATOM_STR("lock_counting", BIF_ARG_1)) {
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_true);
+#else
+ BIF_RET(am_false);
+#endif
+ } else if (ERTS_IS_ATOM_STR("debug_compiled", BIF_ARG_1)) {
+#ifdef DEBUG
+ BIF_RET(am_true);
+#else
+ BIF_RET(am_false);
+#endif
+ } else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
+ BIF_RET(erts_check_io_info(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
+#ifndef ERTS_SMP
+ BIF_RET(NIL);
+#else
+ if (erts_no_schedulers == 1)
+ BIF_RET(NIL);
+ else
+ BIF_RET(erts_multi_scheduling_blockers(BIF_P));
+#endif
+ } else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
+ BIF_RET(ERTS_USE_MODIFIED_TIMING()
+ ? make_small(erts_modified_timing_level)
+ : am_undefined);
+ } else if (ERTS_IS_ATOM_STR("port_tasks", BIF_ARG_1)) {
+ BIF_RET(am_true);
+ } else if (ERTS_IS_ATOM_STR("io_thread", BIF_ARG_1)) {
+ BIF_RET(am_false);
+ } else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
+ BIF_RET(erts_sched_stat_term(BIF_P, 0));
+ } else if (ERTS_IS_ATOM_STR("total_scheduling_statistics", BIF_ARG_1)) {
+ BIF_RET(erts_sched_stat_term(BIF_P, 1));
+ } else if (ERTS_IS_ATOM_STR("taints", BIF_ARG_1)) {
+ BIF_RET(erts_nif_taints(BIF_P));
+ }
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+Eterm
+port_info_1(Process* p, Eterm pid)
+{
+ static Eterm keys[] = {
+ am_name,
+ am_links,
+ am_id,
+ am_connected,
+ am_input,
+ am_output
+ };
+ Eterm items[ASIZE(keys)];
+ Eterm result = NIL;
+ Eterm reg_name;
+ Eterm* hp;
+ Uint need;
+ int i;
+
+ /*
+ * Collect all information about the port.
+ */
+
+ for (i = 0; i < ASIZE(keys); i++) {
+ Eterm item;
+
+ item = port_info_2(p, pid, keys[i]);
+ if (is_non_value(item)) {
+ return THE_NON_VALUE;
+ }
+ if (item == am_undefined) {
+ return am_undefined;
+ }
+ items[i] = item;
+ }
+ reg_name = port_info_2(p, pid, am_registered_name);
+
+ /*
+ * Build the resulting list.
+ */
+
+ need = 2*ASIZE(keys);
+ if (is_tuple(reg_name)) {
+ need += 2;
+ }
+ hp = HAlloc(p, need);
+ for (i = ASIZE(keys) - 1; i >= 0; i--) {
+ result = CONS(hp, items[i], result);
+ hp += 2;
+ }
+ if (is_tuple(reg_name)) {
+ result = CONS(hp, reg_name, result);
+ }
+
+ return result;
+}
+
+
+/**********************************************************************/
+/* Return information on ports */
+/* Info:
+** id Port index
+** connected (Pid)
+** links List of pids
+** name String
+** input Number of bytes input from port program
+** output Number of bytes output to the port program
+*/
+
+BIF_RETTYPE port_info_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE ret;
+ Eterm portid = BIF_ARG_1;
+ Port *prt;
+ Eterm item = BIF_ARG_2;
+ Eterm res;
+ Eterm* hp;
+ int count;
+
+ if (is_internal_port(portid))
+ prt = erts_id2port(portid, BIF_P, ERTS_PROC_LOCK_MAIN);
+ else if (is_atom(portid))
+ erts_whereis_name(BIF_P, ERTS_PROC_LOCK_MAIN,
+ portid, NULL, 0, 0, &prt);
+ else if (is_external_port(portid)
+ && external_port_dist_entry(portid) == erts_this_dist_entry)
+ BIF_RET(am_undefined);
+ else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (!prt) {
+ BIF_RET(am_undefined);
+ }
+
+ if (item == am_id) {
+ hp = HAlloc(BIF_P, 3);
+ res = make_small(internal_port_number(portid));
+ }
+ else if (item == am_links) {
+ MonitorInfoCollection mic;
+ int i;
+ Eterm item;
+
+ INIT_MONITOR_INFOS(mic);
+
+ erts_doforall_links(prt->nlinks, &collect_one_link, &mic);
+
+ hp = HAlloc(BIF_P, 3 + mic.sz);
+ res = NIL;
+ for (i = 0; i < mic.mi_i; i++) {
+ item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ res = CONS(hp, item, res);
+ hp += 2;
+ }
+ DESTROY_MONITOR_INFOS(mic);
+
+ }
+ else if (item == am_monitors) {
+ MonitorInfoCollection mic;
+ int i;
+ Eterm item;
+
+ INIT_MONITOR_INFOS(mic);
+
+ erts_doforall_monitors(prt->monitors, &collect_one_origin_monitor, &mic);
+
+ hp = HAlloc(BIF_P, 3 + mic.sz);
+ res = NIL;
+ for (i = 0; i < mic.mi_i; i++) {
+ Eterm t;
+ item = STORE_NC(&hp, &MSO(BIF_P).externals, mic.mi[i].entity);
+ t = TUPLE2(hp, am_process, item);
+ hp += 3;
+ res = CONS(hp, t, res);
+ hp += 2;
+ }
+ DESTROY_MONITOR_INFOS(mic);
+
+ }
+ else if (item == am_name) {
+ count = sys_strlen(prt->name);
+
+ hp = HAlloc(BIF_P, 3 + 2*count);
+ res = buf_to_intlist(&hp, prt->name, count, NIL);
+ }
+ else if (item == am_connected) {
+ hp = HAlloc(BIF_P, 3);
+ res = prt->connected; /* internal pid */
+ }
+ else if (item == am_input) {
+ Uint hsz = 3;
+ Uint n = prt->bytes_in;
+ (void) erts_bld_uint(NULL, &hsz, n);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, n);
+ }
+ else if (item == am_output) {
+ Uint hsz = 3;
+ Uint n = prt->bytes_out;
+ (void) erts_bld_uint(NULL, &hsz, n);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, n);
+ }
+ else if (item == am_registered_name) {
+ RegProc *reg;
+ reg = prt->reg;
+ if (reg == NULL) {
+ ERTS_BIF_PREP_RET(ret, NIL);
+ goto done;
+ } else {
+ hp = HAlloc(BIF_P, 3);
+ res = reg->name;
+ }
+ }
+ else if (item == am_memory) {
+ /* All memory consumed in bytes (the Port struct should not be
+ included though).
+ */
+ Uint hsz = 3;
+ Uint size = 0;
+ ErlHeapFragment* bp;
+
+ hp = HAlloc(BIF_P, 3);
+
+ erts_doforall_links(prt->nlinks, &one_link_size, &size);
+
+ for (bp = prt->bp; bp; bp = bp->next)
+ size += sizeof(ErlHeapFragment) + (bp->size - 1)*sizeof(Eterm);
+
+ if (prt->linebuf)
+ size += sizeof(LineBuf) + prt->linebuf->ovsiz;
+
+ /* ... */
+
+
+ /* All memory allocated by the driver should be included, but it is
+ hard to retrieve... */
+
+ (void) erts_bld_uint(NULL, &hsz, size);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, size);
+ }
+ else if (item == am_queue_size) {
+ Uint ioq_size = erts_port_ioq_size(prt);
+ Uint hsz = 3;
+ (void) erts_bld_uint(NULL, &hsz, ioq_size);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_bld_uint(&hp, NULL, ioq_size);
+ }
+ else if (ERTS_IS_ATOM_STR("locking", item)) {
+ hp = HAlloc(BIF_P, 3);
+#ifndef ERTS_SMP
+ res = am_false;
+#else
+ if (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ DECL_AM(port_level);
+ ASSERT(prt->drv_ptr->flags
+ & ERL_DRV_FLAG_USE_PORT_LOCKING);
+ res = AM_port_level;
+ }
+ else {
+ DECL_AM(driver_level);
+ ASSERT(!(prt->drv_ptr->flags
+ & ERL_DRV_FLAG_USE_PORT_LOCKING));
+ res = AM_driver_level;
+ }
+#endif
+ }
+ else {
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ goto done;
+ }
+
+ ERTS_BIF_PREP_RET(ret, TUPLE2(hp, item, res));
+
+ done:
+
+ erts_smp_port_unlock(prt);
+
+ return ret;
+}
+
+
+Eterm
+fun_info_2(Process* p, Eterm fun, Eterm what)
+{
+ Eterm* hp;
+ Eterm val;
+
+ if (is_fun(fun)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
+
+ switch (what) {
+ case am_type:
+ hp = HAlloc(p, 3);
+ val = am_local;
+ break;
+ case am_pid:
+ hp = HAlloc(p, 3);
+ val = funp->creator;
+ break;
+ case am_module:
+ hp = HAlloc(p, 3);
+ val = funp->fe->module;
+ break;
+ case am_new_index:
+ hp = HAlloc(p, 3);
+ val = make_small(funp->fe->index);
+ break;
+ case am_new_uniq:
+ val = new_binary(p, funp->fe->uniq, 16);
+ hp = HAlloc(p, 3);
+ break;
+ case am_index:
+ hp = HAlloc(p, 3);
+ val = make_small(funp->fe->old_index);
+ break;
+ case am_uniq:
+ hp = HAlloc(p, 3);
+ val = make_small(funp->fe->old_uniq);
+ break;
+ case am_env:
+ {
+ Uint num_free = funp->num_free;
+ int i;
+
+ hp = HAlloc(p, 3 + 2*num_free);
+ val = NIL;
+ for (i = num_free-1; i >= 0; i--) {
+ val = CONS(hp, funp->env[i], val);
+ hp += 2;
+ }
+ }
+ break;
+ case am_refc:
+ val = erts_make_integer(erts_smp_atomic_read(&funp->fe->refc), p);
+ hp = HAlloc(p, 3);
+ break;
+ case am_arity:
+ hp = HAlloc(p, 3);
+ val = make_small(funp->arity);
+ break;
+ case am_name:
+ hp = HAlloc(p, 3);
+ val = funp->fe->address[-2];
+ break;
+ default:
+ goto error;
+ }
+ } else if (is_export(fun)) {
+ Export* exp = (Export *) (export_val(fun))[1];
+ switch (what) {
+ case am_type:
+ hp = HAlloc(p, 3);
+ val = am_external;
+ break;
+ case am_pid:
+ hp = HAlloc(p, 3);
+ val = am_undefined;
+ break;
+ case am_module:
+ hp = HAlloc(p, 3);
+ val = exp->code[0];
+ break;
+ case am_new_index:
+ hp = HAlloc(p, 3);
+ val = am_undefined;
+ break;
+ case am_new_uniq:
+ hp = HAlloc(p, 3);
+ val = am_undefined;
+ break;
+ case am_index:
+ hp = HAlloc(p, 3);
+ val = am_undefined;
+ break;
+ case am_uniq:
+ hp = HAlloc(p, 3);
+ val = am_undefined;
+ break;
+ case am_env:
+ hp = HAlloc(p, 3);
+ val = NIL;
+ break;
+ case am_refc:
+ hp = HAlloc(p, 3);
+ val = am_undefined;
+ break;
+ case am_arity:
+ hp = HAlloc(p, 3);
+ val = make_small(exp->code[2]);
+ break;
+ case am_name:
+ hp = HAlloc(p, 3);
+ val = exp->code[1];
+ break;
+ default:
+ goto error;
+ }
+ } else {
+ error:
+ BIF_ERROR(p, BADARG);
+ }
+ return TUPLE2(hp, what, val);
+}
+
+BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
+{
+ if(is_internal_pid(BIF_ARG_1)) {
+ Process *rp;
+
+ if (BIF_ARG_1 == BIF_P->id)
+ BIF_RET(am_true);
+
+ if(internal_pid_index(BIF_ARG_1) >= erts_max_processes)
+ BIF_ERROR(BIF_P, BADARG);
+
+ rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCK_STATUS);
+ if (!rp) {
+ BIF_RET(am_false);
+ }
+ else {
+ int have_pending_exit = ERTS_PROC_PENDING_EXIT(rp);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ if (have_pending_exit)
+ ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false);
+ else
+ BIF_RET(am_true);
+ }
+ }
+ else if(is_external_pid(BIF_ARG_1)) {
+ if(external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ BIF_RET(am_false); /* A pid from an old incarnation of this node */
+ else
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+BIF_RETTYPE process_display_2(BIF_ALIST_2)
+{
+ Process *rp;
+
+ if (BIF_ARG_2 != am_backtrace)
+ BIF_ERROR(BIF_P, BADARG);
+
+ rp = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCKS_ALL);
+ if(!rp) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD2(bif_export[BIF_process_display_2], BIF_P,
+ BIF_ARG_1, BIF_ARG_2);
+ if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
+ Eterm args[2] = {BIF_ARG_1, BIF_ARG_2};
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_ALL);
+ ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P,
+ BIF_ARG_1,
+ am_erlang,
+ am_process_display,
+ args,
+ 2);
+ }
+ erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp);
+#ifdef ERTS_SMP
+ erts_smp_proc_unlock(rp, (BIF_P == rp
+ ? ERTS_PROC_LOCKS_ALL_MINOR
+ : ERTS_PROC_LOCKS_ALL));
+#endif
+ BIF_RET(am_true);
+}
+
+
+/* this is a general call which return some possibly useful information */
+
+BIF_RETTYPE statistics_1(BIF_ALIST_1)
+{
+ Eterm res;
+ Eterm* hp;
+
+ if (BIF_ARG_1 == am_context_switches) {
+ Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
+ hp = HAlloc(BIF_P, 3);
+ res = TUPLE2(hp, cs, SMALL_ZERO);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_garbage_collection) {
+ Uint hsz = 4;
+ ErtsGCInfo gc_info;
+ Eterm gcs;
+ Eterm recl;
+ erts_gc_info(&gc_info);
+ (void) erts_bld_uint(NULL, &hsz, gc_info.garbage_collections);
+ (void) erts_bld_uint(NULL, &hsz, gc_info.reclaimed);
+ hp = HAlloc(BIF_P, hsz);
+ gcs = erts_bld_uint(&hp, NULL, gc_info.garbage_collections);
+ recl = erts_bld_uint(&hp, NULL, gc_info.reclaimed);
+ res = TUPLE3(hp, gcs, recl, SMALL_ZERO);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_reductions) {
+ Uint reds;
+ Uint diff;
+ Uint hsz = 3;
+ Eterm b1, b2;
+
+ erts_get_total_reductions(&reds, &diff);
+ (void) erts_bld_uint(NULL, &hsz, reds);
+ (void) erts_bld_uint(NULL, &hsz, diff);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_uint(&hp, NULL, reds);
+ b2 = erts_bld_uint(&hp, NULL, diff);
+ res = TUPLE2(hp, b1, b2);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_exact_reductions) {
+ Uint reds;
+ Uint diff;
+ Uint hsz = 3;
+ Eterm b1, b2;
+
+ erts_get_exact_total_reductions(BIF_P, &reds, &diff);
+ (void) erts_bld_uint(NULL, &hsz, reds);
+ (void) erts_bld_uint(NULL, &hsz, diff);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_uint(&hp, NULL, reds);
+ b2 = erts_bld_uint(&hp, NULL, diff);
+ res = TUPLE2(hp, b1, b2);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_runtime) {
+ unsigned long u1, u2, dummy;
+ Eterm b1, b2;
+ elapsed_time_both(&u1,&dummy,&u2,&dummy);
+ b1 = erts_make_integer(u1,BIF_P);
+ b2 = erts_make_integer(u2,BIF_P);
+ hp = HAlloc(BIF_P,3);
+ res = TUPLE2(hp, b1, b2);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_run_queue) {
+ res = erts_run_queues_len(NULL);
+ BIF_RET(make_small(res));
+ } else if (BIF_ARG_1 == am_wall_clock) {
+ Uint w1, w2;
+ Eterm b1, b2;
+ wall_clock_elapsed_time_both(&w1, &w2);
+ b1 = erts_make_integer(w1,BIF_P);
+ b2 = erts_make_integer(w2,BIF_P);
+ hp = HAlloc(BIF_P,3);
+ res = TUPLE2(hp, b1, b2);
+ BIF_RET(res);
+ } else if (BIF_ARG_1 == am_io) {
+ Eterm r1, r2;
+ Eterm in, out;
+ Uint hsz = 9;
+ Uint bytes_in = (Uint) erts_smp_atomic_read(&erts_bytes_in);
+ Uint bytes_out = (Uint) erts_smp_atomic_read(&erts_bytes_out);
+
+ (void) erts_bld_uint(NULL, &hsz, bytes_in);
+ (void) erts_bld_uint(NULL, &hsz, bytes_out);
+ hp = HAlloc(BIF_P, hsz);
+ in = erts_bld_uint(&hp, NULL, bytes_in);
+ out = erts_bld_uint(&hp, NULL, bytes_out);
+
+ r1 = TUPLE2(hp, am_input, in);
+ hp += 3;
+ r2 = TUPLE2(hp, am_output, out);
+ hp += 3;
+ BIF_RET(TUPLE2(hp, r1, r2));
+ }
+ else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
+ Eterm res, *hp, **hpp;
+ Uint sz, *szp;
+ int no_qs = erts_no_run_queues;
+ Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
+ (void) erts_run_queues_len(qszs);
+ sz = 0;
+ szp = &sz;
+ hpp = NULL;
+ while (1) {
+ int i;
+ for (i = 0; i < no_qs; i++)
+ qszs[no_qs+i] = erts_bld_uint(hpp, szp, qszs[i]);
+ res = erts_bld_tuplev(hpp, szp, no_qs, &qszs[no_qs]);
+ if (hpp) {
+ erts_free(ERTS_ALC_T_TMP, qszs);
+ BIF_RET(res);
+ }
+ hp = HAlloc(BIF_P, sz);
+ szp = NULL;
+ hpp = &hp;
+ }
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE memory_0(BIF_ALIST_0)
+{
+ BIF_RETTYPE res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
+ switch (res) {
+ case am_badarg: BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); /* never... */
+ case am_notsup: BIF_ERROR(BIF_P, EXC_NOTSUP);
+ default: BIF_RET(res);
+ }
+}
+
+BIF_RETTYPE memory_1(BIF_ALIST_1)
+{
+ BIF_RETTYPE res = erts_memory(NULL, NULL, BIF_P, BIF_ARG_1);
+ switch (res) {
+ case am_badarg: BIF_ERROR(BIF_P, BADARG);
+ case am_notsup: BIF_ERROR(BIF_P, EXC_NOTSUP);
+ default: BIF_RET(res);
+ }
+}
+
+BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
+{
+ BIF_RET(erts_error_logger_warnings);
+}
+
+static erts_smp_atomic_t available_internal_state;
+
+BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
+{
+ /*
+ * NOTE: Only supposed to be used for testing, and debugging.
+ */
+
+ if (!erts_smp_atomic_read(&available_internal_state)) {
+ BIF_ERROR(BIF_P, EXC_UNDEF);
+ }
+
+ if (is_atom(BIF_ARG_1)) {
+ if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
+ /* Used by (emulator) */
+ BIF_RET(make_small((Uint) ERTS_BIF_REDS_LEFT(BIF_P)));
+ }
+ else if (ERTS_IS_ATOM_STR("node_and_dist_references", BIF_ARG_1)) {
+ /* Used by node_container_SUITE (emulator) */
+ Eterm res = erts_get_node_and_dist_references(BIF_P);
+ BIF_RET(res);
+ }
+ else if (ERTS_IS_ATOM_STR("monitoring_nodes", BIF_ARG_1)) {
+ BIF_RET(erts_processes_monitoring_nodes(BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
+ || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
+ /* Used by node_container_SUITE (emulator) */
+ Eterm res;
+ if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
+ res = erts_test_next_pid(0, 0);
+ else {
+ res = erts_test_next_port(0, 0);
+ }
+ if (res < 0)
+ BIF_RET(am_false);
+ BIF_RET(erts_make_integer(res, BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
+ /* Used by ets_SUITE (stdlib) */
+ size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
+ BIF_RET(make_small((Uint) words));
+ }
+ else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
+ /* Used by (emulator) */
+ int res;
+#ifdef HAVE_ERTS_CHECK_IO_DEBUG
+ erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
+ res = erts_check_io_debug();
+ erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
+#else
+ res = 0;
+#endif
+ ASSERT(res >= 0);
+ BIF_RET(erts_make_integer((Uint) res, BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("process_info_args", BIF_ARG_1)) {
+ /* Used by process_SUITE (emulator) */
+ int i;
+ Eterm res = NIL;
+ Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
+ for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
+ res = CONS(hp, pi_args[i], res);
+ hp += 2;
+ }
+ BIF_RET(res);
+ }
+ else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
+ /* Used by process_SUITE (emulator) */
+ BIF_RET(erts_debug_processes(BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
+ /* Used by process_SUITE (emulator) */
+ BIF_RET(erts_debug_processes_bif_info(BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
+ /* Used by distribution_SUITE (emulator) */
+ BIF_RET(make_small((Uint) erts_debug_max_atom_out_cache_index()));
+ }
+ else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
+ Uint n;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ n = erts_debug_nbalance();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(erts_make_integer(n, BIF_P));
+ }
+ else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ }
+ else if (is_tuple(BIF_ARG_1)) {
+ Eterm* tp = tuple_val(BIF_ARG_1);
+ switch (arityval(tp[0])) {
+ case 2: {
+ if (ERTS_IS_ATOM_STR("process_status", tp[1])) {
+ /* Used by timer process_SUITE, timer_bif_SUITE, and
+ node_container_SUITE (emulator) */
+ if (is_internal_pid(tp[2])) {
+ BIF_RET(erts_process_status(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ NULL,
+ tp[2]));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
+ /* Used by erl_link_SUITE (emulator) */
+ if(is_internal_pid(tp[2])) {
+ Eterm res;
+ Process *p;
+
+ p = erts_pid2proc(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ tp[2],
+ ERTS_PROC_LOCK_LINK);
+ if (!p) {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ BIF_RET(am_undefined);
+ }
+ res = make_link_list(BIF_P, p->nlinks, NIL);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ BIF_RET(res);
+ }
+ else if(is_internal_port(tp[2])) {
+ Eterm res;
+ Port *p = erts_id2port(tp[2], BIF_P, ERTS_PROC_LOCK_MAIN);
+ if(!p)
+ BIF_RET(am_undefined);
+ res = make_link_list(BIF_P, p->nlinks, NIL);
+ erts_smp_port_unlock(p);
+ BIF_RET(res);
+ }
+ else if(is_node_name_atom(tp[2])) {
+ DistEntry *dep = erts_find_dist_entry(tp[2]);
+ if(dep) {
+ Eterm subres;
+ erts_smp_de_links_lock(dep);
+ subres = make_link_list(BIF_P, dep->nlinks, NIL);
+ subres = make_link_list(BIF_P, dep->node_links, subres);
+ erts_smp_de_links_unlock(dep);
+ erts_deref_dist_entry(dep);
+ BIF_RET(subres);
+ } else {
+ BIF_RET(am_undefined);
+ }
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("monitor_list", tp[1])) {
+ /* Used by erl_link_SUITE (emulator) */
+ if(is_internal_pid(tp[2])) {
+ Process *p;
+ Eterm res;
+
+ p = erts_pid2proc(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ tp[2],
+ ERTS_PROC_LOCK_LINK);
+ if (!p) {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ BIF_RET(am_undefined);
+ }
+ res = make_monitor_list(BIF_P, p->monitors);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ BIF_RET(res);
+ } else if(is_node_name_atom(tp[2])) {
+ DistEntry *dep = erts_find_dist_entry(tp[2]);
+ if(dep) {
+ Eterm ml;
+ erts_smp_de_links_lock(dep);
+ ml = make_monitor_list(BIF_P, dep->monitors);
+ erts_smp_de_links_unlock(dep);
+ erts_deref_dist_entry(dep);
+ BIF_RET(ml);
+ } else {
+ BIF_RET(am_undefined);
+ }
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("channel_number", tp[1])) {
+ Eterm res;
+ DistEntry *dep = erts_find_dist_entry(tp[2]);
+ if (!dep)
+ res = am_undefined;
+ else {
+ Uint cno = dist_entry_channel_no(dep);
+ res = make_small(cno);
+ erts_deref_dist_entry(dep);
+ }
+ BIF_RET(res);
+ }
+ else if (ERTS_IS_ATOM_STR("have_pending_exit", tp[1])) {
+ Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ tp[2], ERTS_PROC_LOCK_STATUS);
+ if (!rp) {
+ BIF_RET(am_undefined);
+ }
+ else {
+ Eterm res = ERTS_PROC_PENDING_EXIT(rp) ? am_true : am_false;
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ BIF_RET(res);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("binary_info", tp[1])) {
+ Eterm bin = tp[2];
+ if (is_binary(bin)) {
+ Eterm real_bin = bin;
+ Eterm res = am_true;
+ ErlSubBin* sb = (ErlSubBin *) binary_val(real_bin);
+
+ if (sb->thing_word == HEADER_SUB_BIN) {
+ real_bin = sb->orig;
+ }
+ if (*binary_val(real_bin) == HEADER_PROC_BIN) {
+ ProcBin* pb;
+ Binary* val;
+ Eterm SzTerm;
+ Uint hsz = 3 + 5;
+ Eterm* hp;
+ DECL_AM(refc_binary);
+
+ pb = (ProcBin *) binary_val(real_bin);
+ val = pb->val;
+ (void) erts_bld_uint(NULL, &hsz, pb->size);
+ (void) erts_bld_uint(NULL, &hsz, val->orig_size);
+ hp = HAlloc(BIF_P, hsz);
+
+ /* Info about the Binary* object */
+ SzTerm = erts_bld_uint(&hp, NULL, val->orig_size);
+ res = TUPLE2(hp, am_binary, SzTerm);
+ hp += 3;
+
+ /* Info about the ProcBin* object */
+ SzTerm = erts_bld_uint(&hp, NULL, pb->size);
+ res = TUPLE4(hp, AM_refc_binary, SzTerm,
+ res, make_small(pb->flags));
+ } else { /* heap binary */
+ DECL_AM(heap_binary);
+ res = AM_heap_binary;
+ }
+ BIF_RET(res);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("term_to_binary_no_funs", tp[1])) {
+ Uint dflags = (DFLAG_EXTENDED_REFERENCES |
+ DFLAG_EXTENDED_PIDS_PORTS |
+ DFLAG_BIT_BINARIES);
+ BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags));
+ }
+ else if (ERTS_IS_ATOM_STR("dist_port", tp[1])) {
+ Eterm res = am_undefined;
+ DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
+ if (dep) {
+ erts_smp_de_rlock(dep);
+ if (is_internal_port(dep->cid))
+ res = dep->cid;
+ erts_smp_de_runlock(dep);
+ erts_deref_dist_entry(dep);
+ }
+ BIF_RET(res);
+ }
+ else if (ERTS_IS_ATOM_STR("atom_out_cache_index", tp[1])) {
+ /* Used by distribution_SUITE (emulator) */
+ if (is_atom(tp[2])) {
+ BIF_RET(make_small(
+ (Uint)
+ erts_debug_atom_to_out_cache_index(tp[2])));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("fake_scheduler_bindings", tp[1])) {
+ return erts_fake_scheduler_bindings(BIF_P, tp[2]);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+static erts_smp_atomic_t hipe_test_reschedule_flag;
+
+BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
+{
+ /*
+ * NOTE: Only supposed to be used for testing, and debugging.
+ */
+ if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
+ && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
+ long on = (long) (BIF_ARG_2 == am_true);
+ long prev_on = erts_smp_atomic_xchg(&available_internal_state, on);
+ if (on) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Process %T ", BIF_P->id);
+ if (erts_is_alive)
+ erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
+ erts_dsprintf(dsbufp,
+ "enabled access to the emulator internal state.\n");
+ erts_dsprintf(dsbufp,
+ "NOTE: This is an erts internal test feature and "
+ "should *only* be used by OTP test-suites.\n");
+ erts_send_warning_to_logger(BIF_P->group_leader, dsbufp);
+ }
+ BIF_RET(prev_on ? am_true : am_false);
+ }
+
+ if (!erts_smp_atomic_read(&available_internal_state)) {
+ BIF_ERROR(BIF_P, EXC_UNDEF);
+ }
+
+ if (is_atom(BIF_ARG_1)) {
+
+ if (ERTS_IS_ATOM_STR("reds_left", BIF_ARG_1)) {
+ Sint reds;
+ if (term_to_Sint(BIF_ARG_2, &reds) != 0) {
+ if (0 <= reds && reds <= CONTEXT_REDS) {
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(BIF_P))
+ BIF_P->fcalls = reds;
+ else
+ BIF_P->fcalls = reds - CONTEXT_REDS;
+ }
+ BIF_RET(am_true);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("block", BIF_ARG_1)
+ || ERTS_IS_ATOM_STR("sleep", BIF_ARG_1)) {
+ int block = ERTS_IS_ATOM_STR("block", BIF_ARG_1);
+ Sint ms;
+ if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
+ if (ms > 0) {
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ if (block)
+ erts_smp_block_system(0);
+ while (erts_milli_sleep((long) ms) != 0);
+ if (block)
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ }
+ BIF_RET(am_true);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("block_scheduler", BIF_ARG_1)) {
+ Sint ms;
+ if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
+ if (ms > 0) {
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ while (erts_milli_sleep((long) ms) != 0);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ }
+ BIF_RET(am_true);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)
+ || ERTS_IS_ATOM_STR("next_port", BIF_ARG_1)) {
+ /* Used by node_container_SUITE (emulator) */
+ Uint next;
+
+ if (term_to_Uint(BIF_ARG_2, &next) != 0) {
+ Eterm res;
+
+ if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
+ res = erts_test_next_pid(1, next);
+ else {
+ res = erts_test_next_port(1, next);
+ }
+ if (res < 0)
+ BIF_RET(am_false);
+ BIF_RET(erts_make_integer(res, BIF_P));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("force_gc", BIF_ARG_1)) {
+ /* Used by signal_SUITE (emulator) */
+ Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_2, ERTS_PROC_LOCK_MAIN);
+ if (!rp) {
+ BIF_RET(am_false);
+ }
+ else {
+ FLAGS(rp) |= F_FORCE_GC;
+ if (BIF_P != rp)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(am_true);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("send_fake_exit_signal", BIF_ARG_1)) {
+ /* Used by signal_SUITE (emulator) */
+
+ /* Testcases depend on the exit being received via
+ a pending exit when the receiver is the same as
+ the caller. */
+ if (is_tuple(BIF_ARG_2)) {
+ Eterm* tp = tuple_val(BIF_ARG_2);
+ if (arityval(tp[0]) == 3
+ && (is_pid(tp[1]) || is_port(tp[1]))
+ && is_internal_pid(tp[2])) {
+ int xres;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ Process *rp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN,
+ tp[2], rp_locks,
+ ERTS_P2P_FLG_SMP_INC_REFC);
+ if (!rp) {
+ DECL_AM(dead);
+ BIF_RET(AM_dead);
+ }
+
+#ifdef ERTS_SMP
+ if (BIF_P == rp)
+ rp_locks |= ERTS_PROC_LOCK_MAIN;
+#endif
+ xres = erts_send_exit_signal(NULL, /* NULL in order to
+ force a pending exit
+ when we send to our
+ selves. */
+ tp[1],
+ rp,
+ &rp_locks,
+ tp[3],
+ NIL,
+ NULL,
+ 0);
+#ifdef ERTS_SMP
+ if (BIF_P == rp)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+#endif
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+ if (xres > 1) {
+ DECL_AM(message);
+ BIF_RET(AM_message);
+ }
+ else if (xres == 0) {
+ DECL_AM(unaffected);
+ BIF_RET(AM_unaffected);
+ }
+ else {
+ DECL_AM(exit);
+ BIF_RET(AM_exit);
+ }
+ }
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("colliding_names", BIF_ARG_1)) {
+ /* Used by ets_SUITE (stdlib) */
+ if (is_tuple(BIF_ARG_2)) {
+ Eterm* tpl = tuple_val(BIF_ARG_2);
+ Uint cnt;
+ if (arityval(tpl[0]) == 2 && is_atom(tpl[1]) &&
+ term_to_Uint(tpl[2], &cnt)) {
+ BIF_RET(erts_ets_colliding_names(BIF_P,tpl[1],cnt));
+ }
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("re_loop_limit", BIF_ARG_1)) {
+ /* Used by re_SUITE (stdlib) */
+ Uint max_loops;
+ if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
+ max_loops = erts_re_set_loop_limit(-1);
+ BIF_RET(make_small(max_loops));
+ } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
+ max_loops = erts_re_set_loop_limit(max_loops);
+ BIF_RET(make_small(max_loops));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("unicode_loop_limit", BIF_ARG_1)) {
+ /* Used by unicode_SUITE (stdlib) */
+ Uint max_loops;
+ if (is_atom(BIF_ARG_2) && ERTS_IS_ATOM_STR("default", BIF_ARG_2)) {
+ max_loops = erts_unicode_set_loop_limit(-1);
+ BIF_RET(make_small(max_loops));
+ } else if (term_to_Uint(BIF_ARG_2, &max_loops) != 0) {
+ max_loops = erts_unicode_set_loop_limit(max_loops);
+ BIF_RET(make_small(max_loops));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
+ /* Used by hipe test suites */
+ long flag = erts_smp_atomic_read(&hipe_test_reschedule_flag);
+ if (!flag && BIF_ARG_2 != am_false) {
+ erts_smp_atomic_set(&hipe_test_reschedule_flag, 1);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+ erts_smp_atomic_set(&hipe_test_reschedule_flag, !flag);
+ BIF_RET(NIL);
+ }
+ else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
+ /* Used by hipe test suites */
+ Eterm res = am_false;
+ Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_2, ERTS_PROC_LOCK_STATUS);
+ if (rp) {
+ erts_resume(rp, ERTS_PROC_LOCK_STATUS);
+ res = am_true;
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ }
+ BIF_RET(res);
+ }
+ else if (ERTS_IS_ATOM_STR("test_long_gc_sleep", BIF_ARG_1)) {
+ if (term_to_Uint(BIF_ARG_2, &erts_test_long_gc_sleep) > 0)
+ BIF_RET(am_true);
+ }
+ else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
+ erl_exit(ERTS_ABORT_EXIT, "%T\n", BIF_ARG_2);
+ }
+ else if (ERTS_IS_ATOM_STR("kill_dist_connection", BIF_ARG_1)) {
+ DistEntry *dep = erts_sysname_to_connected_dist_entry(BIF_ARG_2);
+ if (!dep)
+ BIF_RET(am_false);
+ else {
+ Uint32 con_id;
+ erts_smp_de_rlock(dep);
+ con_id = dep->connection_id;
+ erts_smp_de_runlock(dep);
+ erts_kill_dist_connection(dep, con_id);
+ erts_deref_dist_entry(dep);
+ BIF_RET(am_true);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) {
+#ifdef ERTS_SMP
+ int old_use_opt, use_opt;
+ switch (BIF_ARG_2) {
+ case am_true:
+ use_opt = 1;
+ break;
+ case am_false:
+ use_opt = 0;
+ break;
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ old_use_opt = !erts_disable_proc_not_running_opt;
+ erts_disable_proc_not_running_opt = !use_opt;
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(old_use_opt ? am_true : am_false);
+#else
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+#endif
+ }
+ }
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
+ unsigned long tries = 0, colls = 0;
+ unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
+ unsigned int line = 0;
+
+ Eterm af, uil;
+ Eterm uit, uic;
+ Eterm uits, uitns, uitn;
+ Eterm tt, tstat, tloc, t;
+
+ /* term:
+ * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
+ */
+
+ ethr_atomic_read(&stats->tries, (long *)&tries);
+ ethr_atomic_read(&stats->colls, (long *)&colls);
+
+ line = stats->line;
+ timer_s = stats->timer.s;
+ timer_ns = stats->timer.ns;
+ timer_n = stats->timer_n;
+
+ af = am_atom_put(stats->file, strlen(stats->file));
+ uil = erts_bld_uint( hpp, szp, line);
+ tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
+
+ uit = erts_bld_uint( hpp, szp, tries);
+ uic = erts_bld_uint( hpp, szp, colls);
+
+ uits = erts_bld_uint( hpp, szp, timer_s);
+ uitns = erts_bld_uint( hpp, szp, timer_ns);
+ uitn = erts_bld_uint( hpp, szp, timer_n);
+ tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
+
+ tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
+
+ t = erts_bld_tuple(hpp, szp, 2, tloc, tstat);
+
+ res = erts_bld_cons( hpp, szp, t, res);
+
+ return res;
+}
+
+static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock, Eterm res) {
+ Eterm name, type, id, stats = NIL, t;
+ Process *proc = NULL;
+ char *ltype;
+ int i;
+
+ /* term:
+ * [{name, id, type, stats()}]
+ */
+
+ ASSERT(lock->name);
+
+ ltype = erts_lcnt_lock_type(lock->flag);
+
+ ASSERT(ltype);
+
+ type = am_atom_put(ltype, strlen(ltype));
+
+ name = am_atom_put(lock->name, strlen(lock->name));
+
+ if (lock->flag & ERTS_LCNT_LT_ALLOC) {
+ /* use allocator types names as id's for allocator locks */
+ ltype = ERTS_ALC_A2AD(signed_val(lock->id));
+ id = am_atom_put(ltype, strlen(ltype));
+ } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
+ /* use registered names as id's for process locks if available */
+ proc = erts_pid2proc_unlocked(lock->id);
+ if (proc && proc->reg) {
+ id = proc->reg->name;
+ } else {
+ /* otherwise use process id */
+ id = lock->id;
+ }
+ } else {
+ id = lock->id;
+ }
+
+ for (i = 0; i < lock->n_stats; i++) {
+ stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
+ }
+
+ t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
+
+ res = erts_bld_cons( hpp, szp, t, res);
+
+ return res;
+}
+
+static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *data, Eterm res) {
+ Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
+ erts_lcnt_lock_t *lock = NULL;
+ char *str_duration = "duration";
+ char *str_locks = "locks";
+
+ /* term:
+ * [{'duration', {seconds, nanoseconds}}, {'locks', locks()}]
+ */
+
+ /* duration tuple */
+ dts = erts_bld_uint( hpp, szp, data->duration.s);
+ dtns = erts_bld_uint( hpp, szp, data->duration.ns);
+ tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
+
+ adur = am_atom_put(str_duration, strlen(str_duration));
+ tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
+
+ /* lock tuple */
+
+ aloc = am_atom_put(str_locks, strlen(str_locks));
+
+ for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
+ lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+ }
+
+ for (lock = data->deleted_locks->head; lock != NULL ; lock = lock->next ) {
+ lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+ }
+
+ tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
+
+ res = erts_bld_cons( hpp, szp, tloc, res);
+ res = erts_bld_cons( hpp, szp, tdur, res);
+
+ return res;
+}
+#endif
+
+BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ Eterm res = NIL;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ if (BIF_ARG_1 == am_info) {
+ erts_lcnt_data_t *data;
+ Uint hsize = 0;
+ Uint *szp;
+ Eterm* hp;
+
+ erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+
+ data = erts_lcnt_get_data();
+
+ /* calculate size */
+
+ szp = &hsize;
+ lcnt_build_result_term(NULL, szp, data, NIL);
+
+ /* alloc and build */
+
+ hp = HAlloc(BIF_P, hsize);
+
+ res = lcnt_build_result_term(&hp, NULL, data, res);
+
+ erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+
+ goto done;
+ } else if (BIF_ARG_1 == am_clear) {
+ erts_lcnt_clear_counters();
+ res = am_ok;
+ goto done;
+ } else if (is_tuple(BIF_ARG_1)) {
+ Uint prev = 0;
+ Eterm* tp = tuple_val(BIF_ARG_1);
+ switch (arityval(tp[0])) {
+ case 2:
+ if (ERTS_IS_ATOM_STR("process_locks", tp[1])) {
+ if (tp[2] == am_true) {
+ prev = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK);
+ if (prev) res = am_true;
+ else res = am_false;
+ goto done;
+ } else if (tp[2] == am_false) {
+ prev = erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_PROCLOCK);
+ if (prev) res = am_true;
+ else res = am_false;
+ goto done;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+#endif
+ BIF_ERROR(BIF_P, BADARG);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+done:
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(res);
+#endif
+}
+
+void
+erts_bif_info_init(void)
+{
+ erts_smp_atomic_init(&available_internal_state, 0);
+ erts_smp_atomic_init(&hipe_test_reschedule_flag, 0);
+
+ process_info_init();
+}
diff --git a/erts/emulator/beam/erl_bif_lists.c b/erts/emulator/beam/erl_bif_lists.c
new file mode 100644
index 0000000000..a9e8dd86f7
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_lists.c
@@ -0,0 +1,392 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * BIFs logically belonging to the lists module.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+
+static Eterm keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List);
+
+/*
+ * erlang:'++'/2
+ */
+
+Eterm
+ebif_plusplus_2(Process* p, Eterm A, Eterm B)
+{
+ return append_2(p, A, B);
+}
+
+/*
+ * erlang:'--'/2
+ */
+
+Eterm
+ebif_minusminus_2(Process* p, Eterm A, Eterm B)
+{
+ return subtract_2(p, A, B);
+}
+
+BIF_RETTYPE append_2(BIF_ALIST_2)
+{
+ Eterm list;
+ Eterm copy;
+ Eterm last;
+ size_t need;
+ Eterm* hp;
+ int i;
+
+ if ((i = list_length(BIF_ARG_1)) < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (i == 0) {
+ BIF_RET(BIF_ARG_2);
+ } else if (is_nil(BIF_ARG_2)) {
+ BIF_RET(BIF_ARG_1);
+ }
+
+ need = 2*i;
+ hp = HAlloc(BIF_P, need);
+ list = BIF_ARG_1;
+ copy = last = CONS(hp, CAR(list_val(list)), make_list(hp+2));
+ list = CDR(list_val(list));
+ hp += 2;
+ i--;
+ while(i--) {
+ Eterm* listp = list_val(list);
+ last = CONS(hp, CAR(listp), make_list(hp+2));
+ list = CDR(listp);
+ hp += 2;
+ }
+ CDR(list_val(last)) = BIF_ARG_2;
+ BIF_RET(copy);
+}
+
+BIF_RETTYPE subtract_2(BIF_ALIST_2)
+{
+ Eterm list;
+ Eterm* hp;
+ Uint need;
+ Eterm res;
+ Eterm small_vec[10]; /* Preallocated memory for small lists */
+ Eterm* vec_p;
+ Eterm* vp;
+ int i;
+ int n;
+ int m;
+
+ if ((n = list_length(BIF_ARG_1)) < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if ((m = list_length(BIF_ARG_2)) < 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (n == 0)
+ BIF_RET(NIL);
+ if (m == 0)
+ BIF_RET(BIF_ARG_1);
+
+ /* allocate element vector */
+ if (n <= sizeof(small_vec)/sizeof(small_vec[0]))
+ vec_p = small_vec;
+ else
+ vec_p = (Eterm*) erts_alloc(ERTS_ALC_T_TMP, n * sizeof(Eterm));
+
+ /* PUT ALL ELEMENTS IN VP */
+ vp = vec_p;
+ list = BIF_ARG_1;
+ i = n;
+ while(i--) {
+ Eterm* listp = list_val(list);
+ *vp++ = CAR(listp);
+ list = CDR(listp);
+ }
+
+ /* UNMARK ALL DELETED CELLS */
+ list = BIF_ARG_2;
+ m = 0; /* number of deleted elements */
+ while(is_list(list)) {
+ Eterm* listp = list_val(list);
+ Eterm elem = CAR(listp);
+ i = n;
+ vp = vec_p;
+ while(i--) {
+ if (is_value(*vp) && eq(*vp, elem)) {
+ *vp = THE_NON_VALUE;
+ m++;
+ break;
+ }
+ vp++;
+ }
+ list = CDR(listp);
+ }
+
+ if (m == n) /* All deleted ? */
+ res = NIL;
+ else if (m == 0) /* None deleted ? */
+ res = BIF_ARG_1;
+ else { /* REBUILD LIST */
+ res = NIL;
+ need = 2*(n - m);
+ hp = HAlloc(BIF_P, need);
+ vp = vec_p + n - 1;
+ while(vp >= vec_p) {
+ if (is_value(*vp)) {
+ res = CONS(hp, *vp, res);
+ hp += 2;
+ }
+ vp--;
+ }
+ }
+ if (vec_p != small_vec)
+ erts_free(ERTS_ALC_T_TMP, (void *) vec_p);
+ BIF_RET(res);
+}
+
+BIF_RETTYPE lists_member_2(BIF_ALIST_2)
+{
+ Eterm term;
+ Eterm list;
+ Eterm item;
+ int non_immed_key;
+ int max_iter = 10 * CONTEXT_REDS;
+
+ if (is_nil(BIF_ARG_2)) {
+ BIF_RET(am_false);
+ } else if (is_not_list(BIF_ARG_2)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ term = BIF_ARG_1;
+ non_immed_key = is_not_immed(term);
+ list = BIF_ARG_2;
+ while (is_list(list)) {
+ if (--max_iter < 0) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_lists_member_2], BIF_P, term, list);
+ }
+ item = CAR(list_val(list));
+ if ((item == term) || (non_immed_key && eq(item, term))) {
+ BIF_RET2(am_true, CONTEXT_REDS - max_iter/10);
+ }
+ list = CDR(list_val(list));
+ }
+ if (is_not_nil(list)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET2(am_false, CONTEXT_REDS - max_iter/10);
+}
+
+BIF_RETTYPE lists_reverse_2(BIF_ALIST_2)
+{
+ Eterm list;
+ Eterm tmp_list;
+ Eterm result;
+ Eterm* hp;
+ Uint n;
+ int max_iter;
+
+ /*
+ * Handle legal and illegal non-lists quickly.
+ */
+ if (is_nil(BIF_ARG_1)) {
+ BIF_RET(BIF_ARG_2);
+ } else if (is_not_list(BIF_ARG_1)) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ /*
+ * First use the rest of the remaning heap space.
+ */
+ list = BIF_ARG_1;
+ result = BIF_ARG_2;
+ hp = HEAP_TOP(BIF_P);
+ n = HeapWordsLeft(BIF_P) / 2;
+ while (n != 0 && is_list(list)) {
+ Eterm* pair = list_val(list);
+ result = CONS(hp, CAR(pair), result);
+ list = CDR(pair);
+ hp += 2;
+ n--;
+ }
+ HEAP_TOP(BIF_P) = hp;
+ if (is_nil(list)) {
+ BIF_RET(result);
+ }
+
+ /*
+ * Calculate length of remaining list (up to a suitable limit).
+ */
+ max_iter = CONTEXT_REDS * 40;
+ n = 0;
+ tmp_list = list;
+ while (max_iter-- > 0 && is_list(tmp_list)) {
+ tmp_list = CDR(list_val(tmp_list));
+ n++;
+ }
+ if (is_not_nil(tmp_list) && is_not_list(tmp_list)) {
+ goto error;
+ }
+
+ /*
+ * Now do one HAlloc() and continue reversing.
+ */
+ hp = HAlloc(BIF_P, 2*n);
+ while (n != 0 && is_list(list)) {
+ Eterm* pair = list_val(list);
+ result = CONS(hp, CAR(pair), result);
+ list = CDR(pair);
+ hp += 2;
+ n--;
+ }
+ if (is_nil(list)) {
+ BIF_RET(result);
+ } else {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_lists_reverse_2], BIF_P, list, result);
+ }
+}
+
+BIF_RETTYPE
+lists_keymember_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
+{
+ Eterm res;
+
+ res = keyfind(BIF_lists_keymember_3, p, Key, Pos, List);
+ if (is_value(res) && is_tuple(res)) {
+ return am_true;
+ } else {
+ return res;
+ }
+}
+
+BIF_RETTYPE
+lists_keysearch_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
+{
+ Eterm res;
+
+ res = keyfind(BIF_lists_keysearch_3, p, Key, Pos, List);
+ if (is_non_value(res) || is_not_tuple(res)) {
+ return res;
+ } else { /* Tuple */
+ Eterm* hp = HAlloc(p, 3);
+ return TUPLE2(hp, am_value, res);
+ }
+}
+
+BIF_RETTYPE
+lists_keyfind_3(Process* p, Eterm Key, Eterm Pos, Eterm List)
+{
+ return keyfind(BIF_lists_keyfind_3, p, Key, Pos, List);
+}
+
+static Eterm
+keyfind(int Bif, Process* p, Eterm Key, Eterm Pos, Eterm List)
+{
+ int max_iter = 10 * CONTEXT_REDS;
+ Sint pos;
+ Eterm term;
+
+ if (!is_small(Pos) || (pos = signed_val(Pos)) < 1) {
+ BIF_ERROR(p, BADARG);
+ }
+
+ if (is_small(Key)) {
+ double float_key = (double) signed_val(Key);
+
+ while (is_list(List)) {
+ if (--max_iter < 0) {
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(bif_export[Bif], p, Key, Pos, List);
+ }
+ term = CAR(list_val(List));
+ List = CDR(list_val(List));
+ if (is_tuple(term)) {
+ Eterm *tuple_ptr = tuple_val(term);
+ if (pos <= arityval(*tuple_ptr)) {
+ Eterm element = tuple_ptr[pos];
+ if (Key == element) {
+ return term;
+ } else if (is_float(element)) {
+ FloatDef f;
+
+ GET_DOUBLE(element, f);
+ if (f.fd == float_key) {
+ return term;
+ }
+ }
+ }
+ }
+ }
+ } else if (is_immed(Key)) {
+ while (is_list(List)) {
+ if (--max_iter < 0) {
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(bif_export[Bif], p, Key, Pos, List);
+ }
+ term = CAR(list_val(List));
+ List = CDR(list_val(List));
+ if (is_tuple(term)) {
+ Eterm *tuple_ptr = tuple_val(term);
+ if (pos <= arityval(*tuple_ptr)) {
+ Eterm element = tuple_ptr[pos];
+ if (Key == element) {
+ return term;
+ }
+ }
+ }
+ }
+ } else {
+ while (is_list(List)) {
+ if (--max_iter < 0) {
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(bif_export[Bif], p, Key, Pos, List);
+ }
+ term = CAR(list_val(List));
+ List = CDR(list_val(List));
+ if (is_tuple(term)) {
+ Eterm *tuple_ptr = tuple_val(term);
+ if (pos <= arityval(*tuple_ptr)) {
+ Eterm element = tuple_ptr[pos];
+ if (cmp(Key, element) == 0) {
+ return term;
+ }
+ }
+ }
+ }
+ }
+
+ if (is_not_nil(List)) {
+ BIF_ERROR(p, BADARG);
+ }
+ return am_false;
+}
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
new file mode 100644
index 0000000000..6da72dcef9
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -0,0 +1,327 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Operator BIFs.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "big.h"
+#include "dist.h"
+#include "erl_version.h"
+#include "erl_binary.h"
+
+BIF_RETTYPE and_2(BIF_ALIST_2)
+{
+ if (BIF_ARG_1 == am_true && BIF_ARG_2 == am_true)
+ BIF_RET(am_true);
+ else if (BIF_ARG_1 == am_true && BIF_ARG_2 == am_false)
+ BIF_RET(am_false);
+ else if (BIF_ARG_1 == am_false && BIF_ARG_2 == am_true)
+ BIF_RET(am_false);
+ else if (BIF_ARG_1 == am_false && BIF_ARG_2 == am_false)
+ BIF_RET(am_false);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE or_2(BIF_ALIST_2)
+{
+ if (BIF_ARG_1 == am_true && BIF_ARG_2 == am_true)
+ BIF_RET(am_true);
+ else if (BIF_ARG_1 == am_true && BIF_ARG_2 == am_false)
+ BIF_RET(am_true);
+ else if (BIF_ARG_1 == am_false && BIF_ARG_2 == am_true)
+ BIF_RET(am_true);
+ else if (BIF_ARG_1 == am_false && BIF_ARG_2 == am_false)
+ BIF_RET(am_false);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE xor_2(BIF_ALIST_2)
+{
+ if (BIF_ARG_1 == am_true && BIF_ARG_2 == am_true)
+ BIF_RET(am_false);
+ else if (BIF_ARG_1 == am_true && BIF_ARG_2 == am_false)
+ BIF_RET(am_true);
+ else if (BIF_ARG_1 == am_false && BIF_ARG_2 == am_true)
+ BIF_RET(am_true);
+ else if (BIF_ARG_1 == am_false && BIF_ARG_2 == am_false)
+ BIF_RET(am_false);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE not_1(BIF_ALIST_1)
+{
+ if (BIF_ARG_1 == am_true)
+ BIF_RET(am_false);
+ else if (BIF_ARG_1 == am_false)
+ BIF_RET(am_true);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE sgt_2(BIF_ALIST_2)
+{
+ BIF_RET(cmp_gt(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+}
+
+BIF_RETTYPE sge_2(BIF_ALIST_2)
+{
+ BIF_RET(cmp_ge(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+}
+
+BIF_RETTYPE slt_2(BIF_ALIST_2)
+{
+ BIF_RET(cmp_lt(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+}
+
+BIF_RETTYPE sle_2(BIF_ALIST_2)
+{
+ BIF_RET(cmp_le(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+}
+
+BIF_RETTYPE seq_2(BIF_ALIST_2)
+{
+ BIF_RET(eq(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+}
+
+BIF_RETTYPE seqeq_2(BIF_ALIST_2)
+{
+ BIF_RET(cmp_eq(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+}
+
+BIF_RETTYPE sneq_2(BIF_ALIST_2)
+{
+ BIF_RET(eq(BIF_ARG_1, BIF_ARG_2) ? am_false : am_true);
+}
+
+BIF_RETTYPE sneqeq_2(BIF_ALIST_2)
+{
+ BIF_RET(cmp_ne(BIF_ARG_1, BIF_ARG_2) ? am_true : am_false);
+}
+
+BIF_RETTYPE is_atom_1(BIF_ALIST_1)
+{
+ if (is_atom(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_float_1(BIF_ALIST_1)
+{
+ if (is_float(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_integer_1(BIF_ALIST_1)
+{
+ if (is_integer(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_list_1(BIF_ALIST_1)
+{
+ if (is_list(BIF_ARG_1) || is_nil(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_number_1(BIF_ALIST_1)
+{
+ if (is_number(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+
+BIF_RETTYPE is_pid_1(BIF_ALIST_1)
+{
+ if (is_pid(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_port_1(BIF_ALIST_1)
+{
+ if (is_port(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_reference_1(BIF_ALIST_1)
+{
+ if (is_ref(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_tuple_1(BIF_ALIST_1)
+{
+ if (is_tuple(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_binary_1(BIF_ALIST_1)
+{
+ if (is_binary(BIF_ARG_1) && binary_bitsize(BIF_ARG_1) == 0) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_bitstring_1(BIF_ALIST_1)
+{
+ if (is_binary(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_function_1(BIF_ALIST_1)
+{
+ if (is_any_fun(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ } else {
+ BIF_RET(am_false);
+ }
+}
+
+BIF_RETTYPE is_function_2(BIF_ALIST_2)
+{
+ Sint arity;
+
+ /*
+ * Verify argument 2 (arity); arity must be >= 0.
+ */
+ if (is_small(BIF_ARG_2)) {
+ arity = signed_val(BIF_ARG_2);
+ if (arity < 0) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ } else if (is_big(BIF_ARG_2) && !bignum_header_is_neg(*big_val(BIF_ARG_2))) {
+ /* A positive bignum is OK, but can't possibly match. */
+ arity = -1;
+ } else {
+ /* Everything else (including negative bignum) is an error. */
+ goto error;
+ }
+
+ if (is_fun(BIF_ARG_1)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(BIF_ARG_1);
+
+ if (funp->arity == (Uint) arity) {
+ BIF_RET(am_true);
+ }
+ } else if (is_export(BIF_ARG_1)) {
+ Export* exp = (Export *) (export_val(BIF_ARG_1))[1];
+
+ if (exp->code[2] == (Uint) arity) {
+ BIF_RET(am_true);
+ }
+ } else if (is_tuple(BIF_ARG_1)) {
+ Eterm* tp = tuple_val(BIF_ARG_1);
+ if (tp[0] == make_arityval(2) && is_atom(tp[1]) && is_atom(tp[2])) {
+ BIF_RET(am_true);
+ }
+ }
+ BIF_RET(am_false);
+}
+
+BIF_RETTYPE is_boolean_1(BIF_ALIST_1)
+{
+ if (BIF_ARG_1 == am_true || BIF_ARG_1 == am_false) {
+ BIF_RET(am_true);
+ } else {
+ BIF_RET(am_false);
+ }
+}
+
+
+
+/*
+ * The compiler usually translates calls to is_record/2 to more primitive
+ * operations. In some cases this is not possible. We'll need to implement
+ * a weak version of is_record/2 as BIF (the size of the record cannot
+ * be verified).
+ */
+BIF_RETTYPE is_record_2(BIF_ALIST_2)
+{
+ Eterm *t;
+
+ if (is_not_atom(BIF_ARG_2)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (is_tuple(BIF_ARG_1) &&
+ arityval(*(t = tuple_val(BIF_ARG_1))) >= 1 &&
+ t[1] == BIF_ARG_2) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+
+/*
+ * Record test cannot actually be a bif. The epp processor is involved in
+ * the real guard test, we have to add one more parameter, the
+ * return value of record_info(size, Rec), which is the arity of the TUPLE.
+ * his may seem awkward when applied from the shell, where the plain
+ * tuple test is more understandable, I think...
+ */
+BIF_RETTYPE is_record_3(BIF_ALIST_3)
+{
+ Eterm *t;
+ if (is_not_atom(BIF_ARG_2) || is_not_small(BIF_ARG_3)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (is_tuple(BIF_ARG_1) &&
+ arityval(*(t = tuple_val(BIF_ARG_1))) == signed_val(BIF_ARG_3)
+ && t[1] == BIF_ARG_2) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+
+
+
+
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
new file mode 100644
index 0000000000..954b1f9729
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -0,0 +1,190 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * BIFs belonging to the 'os' module.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "big.h"
+#include "dist.h"
+#include "erl_version.h"
+
+/*
+ * Return the pid for the Erlang process in the host OS.
+ */
+
+ /* return a timestamp */
+BIF_RETTYPE os_timestamp_0(BIF_ALIST_0)
+{
+ Uint megasec, sec, microsec;
+ Eterm* hp;
+
+ get_sys_now(&megasec, &sec, &microsec);
+ hp = HAlloc(BIF_P, 4);
+ BIF_RET(TUPLE3(hp, make_small(megasec), make_small(sec),
+ make_small(microsec)));
+}
+
+
+Eterm
+os_getpid_0(Process* p)
+{
+ char pid_string[21]; /* enough for a 64 bit number */
+ int n;
+ Eterm* hp;
+ sys_get_pid(pid_string); /* In sys.c */
+ n = sys_strlen(pid_string);
+ hp = HAlloc(p, n*2);
+ BIF_RET(buf_to_intlist(&hp, pid_string, n, NIL));
+}
+
+Eterm
+os_getenv_0(Process* p)
+{
+ GETENV_STATE state;
+ char *cp;
+ Eterm* hp;
+ Eterm ret;
+ Eterm str;
+ int len;
+
+ init_getenv_state(&state);
+
+ ret = NIL;
+ while ((cp = getenv_string(&state)) != NULL) {
+ len = strlen(cp);
+ hp = HAlloc(p, len*2+2);
+ str = buf_to_intlist(&hp, cp, len, NIL);
+ ret = CONS(hp, str, ret);
+ }
+
+ fini_getenv_state(&state);
+
+ return ret;
+}
+
+Eterm
+os_getenv_1(Process* p, Eterm key)
+{
+ Eterm str;
+ int len, res;
+ char *key_str, *val;
+ char buf[1024];
+ size_t val_size = sizeof(buf);
+
+ len = is_string(key);
+ if (!len) {
+ BIF_ERROR(p, BADARG);
+ }
+ /* Leave at least one byte in buf for value */
+ key_str = len < sizeof(buf)-2 ? &buf[0] : erts_alloc(ERTS_ALC_T_TMP, len+1);
+ if (intlist_to_buf(key, key_str, len) != len)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ key_str[len] = '\0';
+
+ if (key_str != &buf[0])
+ val = &buf[0];
+ else {
+ val_size -= len + 1;
+ val = &buf[len + 1];
+ }
+ res = erts_sys_getenv(key_str, val, &val_size);
+
+ if (res < 0) {
+ no_var:
+ str = am_false;
+ } else {
+ Eterm* hp;
+ if (res > 0) {
+ val = erts_alloc(ERTS_ALC_T_TMP, val_size);
+ while (1) {
+ res = erts_sys_getenv(key_str, val, &val_size);
+ if (res == 0)
+ break;
+ else if (res < 0)
+ goto no_var;
+ else
+ val = erts_realloc(ERTS_ALC_T_TMP, val, val_size);
+ }
+ }
+ if (val_size)
+ hp = HAlloc(p, val_size*2);
+ str = buf_to_intlist(&hp, val, val_size, NIL);
+ }
+ if (key_str != &buf[0])
+ erts_free(ERTS_ALC_T_TMP, key_str);
+ if (val < &buf[0] || &buf[sizeof(buf)-1] < val)
+ erts_free(ERTS_ALC_T_TMP, val);
+ BIF_RET(str);
+}
+
+Eterm
+os_putenv_2(Process* p, Eterm key, Eterm value)
+{
+ char def_buf[1024];
+ char *buf = NULL;
+ int sep_ix, i, key_len, value_len, tot_len;
+ key_len = is_string(key);
+ if (!key_len) {
+ error:
+ if (buf)
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_ERROR(p, BADARG);
+ }
+ if (is_nil(value))
+ value_len = 0;
+ else {
+ value_len = is_string(value);
+ if (!value_len)
+ goto error;
+ }
+ tot_len = key_len + 1 + value_len + 1;
+ if (tot_len <= sizeof(def_buf))
+ buf = &def_buf[0];
+ else
+ buf = erts_alloc(ERTS_ALC_T_TMP, tot_len);
+ i = intlist_to_buf(key, buf, key_len);
+ if (i != key_len)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ sep_ix = i;
+ buf[i++] = '=';
+ if (is_not_nil(value))
+ i += intlist_to_buf(value, &buf[i], value_len);
+ if (i != key_len + 1 + value_len)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ buf[i] = '\0';
+ if (erts_sys_putenv(buf, sep_ix)) {
+ goto error;
+ }
+ if (buf != &def_buf[0])
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ BIF_RET(am_true);
+}
+
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
new file mode 100644
index 0000000000..f454f2e12d
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -0,0 +1,1476 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2001-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef _OSE_
+# include "ose.h"
+#endif
+
+#include <ctype.h>
+
+#define ERTS_WANT_EXTERNAL_TAGS
+#include "sys.h"
+#include "erl_vm.h"
+#include "erl_sys_driver.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+#include "dist.h"
+#include "erl_version.h"
+#include "erl_binary.h"
+#include "erl_db_util.h"
+#include "register.h"
+#include "external.h"
+#include "packet_parser.h"
+#include "erl_bits.h"
+
+static int open_port(Process* p, Eterm name, Eterm settings, int *err_nump);
+static byte* convert_environment(Process* p, Eterm env);
+static char **convert_args(Eterm);
+static void free_args(char **);
+
+char *erts_default_arg0 = "default";
+
+BIF_RETTYPE open_port_2(BIF_ALIST_2)
+{
+ int port_num;
+ Eterm port_val;
+ char *str;
+ int err_num;
+
+ if ((port_num = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_num)) < 0) {
+ if (port_num == -3) {
+ ASSERT(err_num == BADARG || err_num == SYSTEM_LIMIT);
+ BIF_ERROR(BIF_P, err_num);
+ } else if (port_num == -2) {
+ str = erl_errno_id(err_num);
+ } else {
+ str = "einval";
+ }
+ BIF_P->fvalue = am_atom_put(str, strlen(str));
+ BIF_ERROR(BIF_P, EXC_ERROR);
+ }
+
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+
+ port_val = erts_port[port_num].id;
+ erts_add_link(&(erts_port[port_num].nlinks), LINK_PID, BIF_P->id);
+ erts_add_link(&(BIF_P->nlinks), LINK_PID, port_val);
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+
+ erts_port_release(&erts_port[port_num]);
+
+ BIF_RET(port_val);
+}
+
+/****************************************************************************
+
+ PORT BIFS:
+
+ port_command/2 -- replace Port ! {..., {command, Data}}
+ port_command(Port, Data) -> true
+ when port(Port), io-list(Data)
+
+ port_control/3 -- new port_control(Port, Ctl, Data) -> Reply
+ port_control(Port, Ctl, Data) -> Reply
+ where integer(Ctl), io-list(Data), io-list(Reply)
+
+ port_close/1 -- replace Port ! {..., close}
+ port_close(Port) -> true
+ when port(Port)
+
+ port_connect/2 -- replace Port ! {..., {connect, Pid}}
+ port_connect(Port, Pid)
+ when port(Port), pid(Pid)
+
+ ***************************************************************************/
+
+static Port*
+id_or_name2port(Process *c_p, Eterm id)
+{
+ Port *port;
+ if (is_not_atom(id))
+ port = erts_id2port(id, c_p, ERTS_PROC_LOCK_MAIN);
+ else
+ erts_whereis_name(c_p, ERTS_PROC_LOCK_MAIN, id, NULL, 0, 0, &port);
+ return port;
+}
+
+#define ERTS_PORT_COMMAND_FLAG_FORCE (((Uint32) 1) << 0)
+#define ERTS_PORT_COMMAND_FLAG_NOSUSPEND (((Uint32) 1) << 1)
+
+static BIF_RETTYPE do_port_command(Process *BIF_P,
+ Eterm BIF_ARG_1,
+ Eterm BIF_ARG_2,
+ Eterm BIF_ARG_3,
+ Uint32 flags)
+{
+ BIF_RETTYPE res;
+ Port *p;
+
+ /* Trace sched out before lock check wait */
+ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(BIF_P, am_out);
+ }
+
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(BIF_P, am_inactive);
+ }
+
+ p = id_or_name2port(BIF_P, BIF_ARG_1);
+ if (!p) {
+ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(BIF_P, am_in);
+ }
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(BIF_P, am_active);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ /* Trace port in, id_or_name2port causes wait */
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(p, am_in, am_command);
+ }
+ if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
+ profile_runnable_port(p, am_active);
+ }
+
+ ERTS_BIF_PREP_RET(res, am_true);
+
+ if ((flags & ERTS_PORT_COMMAND_FLAG_FORCE)
+ && !(p->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_NOTSUP);
+ }
+ else if (!(flags & ERTS_PORT_COMMAND_FLAG_FORCE)
+ && p->status & ERTS_PORT_SFLG_PORT_BUSY) {
+ if (flags & ERTS_PORT_COMMAND_FLAG_NOSUSPEND) {
+ ERTS_BIF_PREP_RET(res, am_false);
+ }
+ else {
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, p);
+ if (erts_system_monitor_flags.busy_port) {
+ monitor_generic(BIF_P, am_busy_port, p->id);
+ }
+ ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_port_command_3], BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+ } else {
+ int wres;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ wres = erts_write_to_port(BIF_P->id, p, BIF_ARG_2);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ if (wres != 0) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+ }
+ }
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(p, am_out, am_command);
+ }
+ if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
+ profile_runnable_port(p, am_inactive);
+ }
+
+ erts_port_release(p);
+ /* Trace sched in after port release */
+ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(BIF_P, am_in);
+ }
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(BIF_P, am_active);
+ }
+
+ if (ERTS_PROC_IS_EXITING(BIF_P)) {
+ KILL_CATCHES(BIF_P); /* Must exit */
+ ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR);
+ }
+ return res;
+}
+
+BIF_RETTYPE port_command_2(BIF_ALIST_2)
+{
+ return do_port_command(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL, 0);
+}
+
+BIF_RETTYPE port_command_3(BIF_ALIST_3)
+{
+ Eterm l = BIF_ARG_3;
+ Uint32 flags = 0;
+ while (is_list(l)) {
+ Eterm* cons = list_val(l);
+ Eterm car = CAR(cons);
+ if (car == am_force) {
+ flags |= ERTS_PORT_COMMAND_FLAG_FORCE;
+ } else if (car == am_nosuspend) {
+ flags |= ERTS_PORT_COMMAND_FLAG_NOSUSPEND;
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ l = CDR(cons);
+ }
+ if(!is_nil(l)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ return do_port_command(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, flags);
+}
+
+BIF_RETTYPE port_call_2(BIF_ALIST_2)
+{
+ return port_call_3(BIF_P,BIF_ARG_1,make_small(0),BIF_ARG_2);
+}
+
+BIF_RETTYPE port_call_3(BIF_ALIST_3)
+{
+ Uint op;
+ Port *p;
+ Uint size;
+ byte *bytes;
+ byte *endp;
+ size_t real_size;
+ erts_driver_t *drv;
+ byte port_input[256]; /* Default input buffer to encode in */
+ byte port_result[256]; /* Buffer for result from port. */
+ byte* port_resp; /* Pointer to result buffer. */
+ char *prc;
+ int ret;
+ Eterm res;
+ Sint result_size;
+ Eterm *hp;
+ Eterm *hp_end; /* To satisfy hybrid heap architecture */
+ unsigned ret_flags = 0U;
+ int fpe_was_unmasked;
+
+ bytes = &port_input[0];
+ port_resp = port_result;
+ /* trace of port scheduling with virtual process descheduling
+ * lock wait
+ */
+ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(BIF_P, am_out);
+ }
+
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(BIF_P, am_inactive);
+ }
+
+ p = id_or_name2port(BIF_P, BIF_ARG_1);
+ if (!p) {
+ error:
+ if (port_resp != port_result &&
+ !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
+ driver_free(port_resp);
+ }
+ if (bytes != &port_input[0])
+ erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes);
+ /* Need to virtual schedule in the process if there
+ * was an error.
+ */
+ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(BIF_P, am_in);
+ }
+
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(BIF_P, am_active);
+ }
+
+ if (p)
+ erts_port_release(p);
+#ifdef ERTS_SMP
+ ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+#else
+ ERTS_BIF_CHK_EXITED(BIF_P);
+#endif
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if ((drv = p->drv_ptr) == NULL) {
+ goto error;
+ }
+ if (drv->call == NULL) {
+ goto error;
+ }
+ if (!term_to_Uint(BIF_ARG_2, &op)) {
+ goto error;
+ }
+ p->caller = BIF_P->id;
+
+ /* Lock taken, virtual schedule of port */
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(p, am_in, am_call);
+ }
+
+ if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
+ profile_runnable_port(p, am_active);
+ }
+ size = erts_encode_ext_size(BIF_ARG_3);
+ if (size > sizeof(port_input))
+ bytes = erts_alloc(ERTS_ALC_T_PORT_CALL_BUF, size);
+
+ endp = bytes;
+ erts_encode_ext(BIF_ARG_3, &endp);
+
+ real_size = endp - bytes;
+ if (real_size > size) {
+ erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
+ __FILE__, __LINE__, endp - (bytes + size));
+ }
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ prc = (char *) port_resp;
+ fpe_was_unmasked = erts_block_fpe();
+ ret = drv->call((ErlDrvData)p->drv_data,
+ (unsigned) op,
+ (char *) bytes,
+ (int) real_size,
+ &prc,
+ (int) sizeof(port_result),
+ &ret_flags);
+ erts_unblock_fpe(fpe_was_unmasked);
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(p, am_out, am_call);
+ }
+
+ if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
+ profile_runnable_port(p, am_inactive);
+ }
+
+ port_resp = (byte *) prc;
+ p->caller = NIL;
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+#ifdef HARDDEBUG
+ {
+ int z;
+ printf("real_size = %ld,%d, ret = %d\r\n",real_size,
+ (int) real_size, ret);
+ printf("[");
+ for(z = 0; z < real_size; ++z) {
+ printf("%d, ",(int) bytes[z]);
+ }
+ printf("]\r\n");
+ printf("[");
+ for(z = 0; z < ret; ++z) {
+ printf("%d, ",(int) port_resp[z]);
+ }
+ printf("]\r\n");
+ }
+#endif
+ if (ret <= 0 || port_resp[0] != VERSION_MAGIC) {
+ /* Error or a binary without magic/ with wrong magic */
+ goto error;
+ }
+ result_size = erts_decode_ext_size(port_resp, ret, 0);
+ if (result_size < 0) {
+ goto error;
+ }
+ hp = HAlloc(BIF_P, result_size);
+ hp_end = hp + result_size;
+ endp = port_resp;
+ res = erts_decode_ext(&hp, &MSO(BIF_P), &endp);
+ if (res == THE_NON_VALUE) {
+ goto error;
+ }
+ HRelease(BIF_P, hp_end, hp);
+ if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
+ driver_free(port_resp);
+ }
+ if (bytes != &port_input[0])
+ erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes);
+ if (p)
+ erts_port_release(p);
+#ifdef ERTS_SMP
+ ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+#else
+ ERTS_BIF_CHK_EXITED(BIF_P);
+#endif
+ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(BIF_P, am_in);
+ }
+
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(BIF_P, am_active);
+ }
+
+ return res;
+}
+
+BIF_RETTYPE port_control_3(BIF_ALIST_3)
+{
+ Port* p;
+ Uint op;
+ Eterm res = THE_NON_VALUE;
+
+ /* Virtual schedule out calling process before lock wait */
+ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(BIF_P, am_out);
+ }
+
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(BIF_P, am_inactive);
+ }
+
+ p = id_or_name2port(BIF_P, BIF_ARG_1);
+ if (!p) {
+ /* Schedule the process before exiting */
+ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(BIF_P, am_in);
+ }
+
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(BIF_P, am_active);
+ }
+
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ /* Trace the port for scheduling in */
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(p, am_in, am_control);
+ }
+
+ if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
+ profile_runnable_port(p, am_active);
+ }
+
+ if (term_to_Uint(BIF_ARG_2, &op))
+ res = erts_port_control(BIF_P, p, op, BIF_ARG_3);
+
+ /* Trace the port for scheduling out */
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(p, am_out, am_control);
+ }
+
+ if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
+ profile_runnable_port(p, am_inactive);
+ }
+
+ erts_port_release(p);
+#ifdef ERTS_SMP
+ ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+#else
+ ERTS_BIF_CHK_EXITED(BIF_P);
+#endif
+
+ if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(BIF_P, am_in);
+ }
+
+ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
+ profile_runnable_proc(BIF_P, am_active);
+ }
+
+ if (is_non_value(res)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(res);
+}
+
+BIF_RETTYPE port_close_1(BIF_ALIST_1)
+{
+ Port* p;
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ p = id_or_name2port(NULL, BIF_ARG_1);
+ if (!p) {
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_do_exit_port(p, p->connected, am_normal);
+ /* if !ERTS_SMP: since we terminate port with reason normal
+ we SHOULD never get an exit signal ourselves
+ */
+ erts_port_release(p);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(am_true);
+}
+
+BIF_RETTYPE port_connect_2(BIF_ALIST_2)
+{
+ Port* prt;
+ Process* rp;
+ Eterm pid = BIF_ARG_2;
+
+ if (is_not_internal_pid(pid)) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ prt = id_or_name2port(BIF_P, BIF_ARG_1);
+ if (!prt) {
+ goto error;
+ }
+
+ rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ pid, ERTS_PROC_LOCK_LINK);
+ if (!rp) {
+ erts_smp_port_unlock(prt);
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ goto error;
+ }
+
+ erts_add_link(&(rp->nlinks), LINK_PID, prt->id);
+ erts_add_link(&(prt->nlinks), LINK_PID, pid);
+
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+
+ prt->connected = pid; /* internal pid */
+ erts_smp_port_unlock(prt);
+ BIF_RET(am_true);
+}
+
+BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
+{
+ Port* prt;
+ Eterm portid = BIF_ARG_1;
+ Eterm data = BIF_ARG_2;
+
+ prt = id_or_name2port(BIF_P, portid);
+ if (!prt) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (prt->bp != NULL) {
+ free_message_buffer(prt->bp);
+ prt->bp = NULL;
+ }
+ if (IS_CONST(data)) {
+ prt->data = data;
+ } else {
+ Uint size;
+ ErlHeapFragment* bp;
+ Eterm* hp;
+
+ size = size_object(data);
+ prt->bp = bp = new_message_buffer(size);
+ hp = bp->mem;
+ prt->data = copy_struct(data, size, &hp, &bp->off_heap);
+ }
+ erts_smp_port_unlock(prt);
+ BIF_RET(am_true);
+}
+
+
+BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
+{
+ BIF_RETTYPE res;
+ Port* prt;
+ Eterm portid = BIF_ARG_1;
+
+ prt = id_or_name2port(BIF_P, portid);
+ if (!prt) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (prt->bp == NULL) { /* MUST be CONST! */
+ res = prt->data;
+ } else {
+ Eterm* hp = HAlloc(BIF_P, prt->bp->size);
+ res = copy_struct(prt->data, prt->bp->size, &hp, &MSO(BIF_P));
+ }
+ erts_smp_port_unlock(prt);
+ BIF_RET(res);
+}
+
+/*
+ * Open a port. Most of the work is not done here but rather in
+ * the file io.c.
+ * Error returns: -1 or -2 returned from open_driver (-2 implies
+ * that *err_nump contains the error code; -1 means we don't really know what happened),
+ * -3 if argument parsing failed or we are out of ports (*err_nump should contain
+ * either BADARG or SYSTEM_LIMIT).
+ */
+
+static int
+open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
+{
+#define OPEN_PORT_ERROR(VAL) do { port_num = (VAL); goto do_return; } while (0)
+ int i, port_num;
+ Eterm option;
+ Uint arity;
+ Eterm* tp;
+ Uint* nargs;
+ erts_driver_t* driver;
+ char* name_buf = NULL;
+ SysDriverOpts opts;
+ int binary_io;
+ int soft_eof;
+ Sint linebuf;
+ byte dir[MAXPATHLEN];
+
+ /* These are the defaults */
+ opts.packet_bytes = 0;
+ opts.use_stdio = 1;
+ opts.redir_stderr = 0;
+ opts.read_write = 0;
+ opts.hide_window = 0;
+ opts.wd = NULL;
+ opts.envir = NULL;
+ opts.exit_status = 0;
+ opts.overlapped_io = 0;
+ opts.spawn_type = ERTS_SPAWN_ANY;
+ opts.argv = NULL;
+ binary_io = 0;
+ soft_eof = 0;
+ linebuf = 0;
+
+ *err_nump = 0;
+
+ if (is_not_list(settings) && is_not_nil(settings)) {
+ goto badarg;
+ }
+ /*
+ * Parse the settings.
+ */
+
+ if (is_not_nil(settings)) {
+ nargs = list_val(settings);
+ while (1) {
+ if (is_tuple_arity(*nargs, 2)) {
+ tp = tuple_val(*nargs);
+ arity = *tp++;
+ option = *tp++;
+ if (option == am_packet) {
+ if (is_not_small(*tp)) {
+ goto badarg;
+ }
+ opts.packet_bytes = signed_val(*tp);
+ switch (opts.packet_bytes) {
+ case 1:
+ case 2:
+ case 4:
+ break;
+ default:
+ goto badarg;
+ }
+ } else if (option == am_line) {
+ if (is_not_small(*tp)) {
+ goto badarg;
+ }
+ linebuf = signed_val(*tp);
+ if (linebuf <= 0) {
+ goto badarg;
+ }
+ } else if (option == am_env) {
+ byte* bytes;
+ if ((bytes = convert_environment(p, *tp)) == NULL) {
+ goto badarg;
+ }
+ opts.envir = (char *) bytes;
+ } else if (option == am_args) {
+ char **av;
+ char **oav = opts.argv;
+ if ((av = convert_args(*tp)) == NULL) {
+ goto badarg;
+ }
+ opts.argv = av;
+ if (oav) {
+ opts.argv[0] = oav[0];
+ oav[0] = erts_default_arg0;
+ free_args(oav);
+ }
+
+ } else if (option == am_arg0) {
+ char *a0;
+ int n;
+ if (is_nil(*tp)) {
+ n = 0;
+ } else if( (n = is_string(*tp)) == 0) {
+ goto badarg;
+ }
+ a0 = (char *) erts_alloc(ERTS_ALC_T_TMP,
+ (n + 1) * sizeof(byte));
+ if (intlist_to_buf(*tp, a0, n) != n) {
+ erl_exit(1, "%s:%d: Internal error\n",
+ __FILE__, __LINE__);
+ }
+ a0[n] = '\0';
+ if (opts.argv == NULL) {
+ opts.argv = erts_alloc(ERTS_ALC_T_TMP,
+ 2 * sizeof(char **));
+ opts.argv[0] = a0;
+ opts.argv[1] = NULL;
+ } else {
+ if (opts.argv[0] != erts_default_arg0) {
+ erts_free(ERTS_ALC_T_TMP, opts.argv[0]);
+ }
+ opts.argv[0] = a0;
+ }
+ } else if (option == am_cd) {
+ Eterm iolist;
+ Eterm heap[4];
+ int r;
+
+ heap[0] = *tp;
+ heap[1] = make_list(heap+2);
+ heap[2] = make_small(0);
+ heap[3] = NIL;
+ iolist = make_list(heap);
+ r = io_list_to_buf(iolist, (char*) dir, MAXPATHLEN);
+ if (r < 0) {
+ goto badarg;
+ }
+ opts.wd = (char *) dir;
+ } else {
+ goto badarg;
+ }
+ } else if (*nargs == am_stream) {
+ opts.packet_bytes = 0;
+ } else if (*nargs == am_use_stdio) {
+ opts.use_stdio = 1;
+ } else if (*nargs == am_stderr_to_stdout) {
+ opts.redir_stderr = 1;
+ } else if (*nargs == am_line) {
+ linebuf = 512;
+ } else if (*nargs == am_nouse_stdio) {
+ opts.use_stdio = 0;
+ } else if (*nargs == am_binary) {
+ binary_io = 1;
+ } else if (*nargs == am_in) {
+ opts.read_write |= DO_READ;
+ } else if (*nargs == am_out) {
+ opts.read_write |= DO_WRITE;
+ } else if (*nargs == am_eof) {
+ soft_eof = 1;
+ } else if (*nargs == am_hide) {
+ opts.hide_window = 1;
+ } else if (*nargs == am_exit_status) {
+ opts.exit_status = 1;
+ } else if (*nargs == am_overlapped_io) {
+ opts.overlapped_io = 1;
+ } else {
+ goto badarg;
+ }
+ if (is_nil(*++nargs))
+ break;
+ if (is_not_list(*nargs)) {
+ goto badarg;
+ }
+ nargs = list_val(*nargs);
+ }
+ }
+ if (opts.read_write == 0) /* implement default */
+ opts.read_write = DO_READ|DO_WRITE;
+
+ /* Mutually exclusive arguments. */
+ if((linebuf && opts.packet_bytes) ||
+ (opts.redir_stderr && !opts.use_stdio)) {
+ goto badarg;
+ }
+
+ /*
+ * Parse the first argument and start the appropriate driver.
+ */
+
+ if (is_atom(name) || (i = is_string(name))) {
+ /* a vanilla port */
+ if (is_atom(name)) {
+ name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP,
+ atom_tab(atom_val(name))->len+1);
+ sys_memcpy((void *) name_buf,
+ (void *) atom_tab(atom_val(name))->name,
+ atom_tab(atom_val(name))->len);
+ name_buf[atom_tab(atom_val(name))->len] = '\0';
+ } else {
+ name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1);
+ if (intlist_to_buf(name, name_buf, i) != i)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ name_buf[i] = '\0';
+ }
+ driver = &vanilla_driver;
+ } else {
+ if (is_not_tuple(name)) {
+ goto badarg; /* Not a process or fd port */
+ }
+ tp = tuple_val(name);
+ arity = *tp++;
+
+ if (arity == make_arityval(0)) {
+ goto badarg;
+ }
+
+ if (*tp == am_spawn || *tp == am_spawn_driver) { /* A process port */
+ if (arity != make_arityval(2)) {
+ goto badarg;
+ }
+ name = tp[1];
+ if (is_atom(name)) {
+ name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP,
+ atom_tab(atom_val(name))->len+1);
+ sys_memcpy((void *) name_buf,
+ (void *) atom_tab(atom_val(name))->name,
+ atom_tab(atom_val(name))->len);
+ name_buf[atom_tab(atom_val(name))->len] = '\0';
+ } else if ((i = is_string(name))) {
+ name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1);
+ if (intlist_to_buf(name, name_buf, i) != i)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ name_buf[i] = '\0';
+ } else {
+ goto badarg;
+ }
+ if (*tp == am_spawn_driver) {
+ opts.spawn_type = ERTS_SPAWN_DRIVER;
+ }
+ driver = &spawn_driver;
+ } else if (*tp == am_spawn_executable) { /* A program */
+ /*
+ * {spawn_executable,Progname}
+ */
+
+ if (arity != make_arityval(2)) {
+ goto badarg;
+ }
+ name = tp[1];
+ if (is_atom(name)) {
+ name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP,
+ atom_tab(atom_val(name))->len+1);
+ sys_memcpy((void *) name_buf,
+ (void *) atom_tab(atom_val(name))->name,
+ atom_tab(atom_val(name))->len);
+ name_buf[atom_tab(atom_val(name))->len] = '\0';
+ } else if ((i = is_string(name))) {
+ name_buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1);
+ if (intlist_to_buf(name, name_buf, i) != i)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ name_buf[i] = '\0';
+ } else {
+ goto badarg;
+ }
+ opts.spawn_type = ERTS_SPAWN_EXECUTABLE;
+ driver = &spawn_driver;
+ } else if (*tp == am_fd) { /* An fd port */
+ int n;
+ struct Sint_buf sbuf;
+ char* p;
+
+ if (arity != make_arityval(3)) {
+ goto badarg;
+ }
+ if (is_not_small(tp[1]) || is_not_small(tp[2])) {
+ goto badarg;
+ }
+ opts.ifd = unsigned_val(tp[1]);
+ opts.ofd = unsigned_val(tp[2]);
+
+ /* Syntesize name from input and output descriptor. */
+ name_buf = erts_alloc(ERTS_ALC_T_TMP,
+ 2*sizeof(struct Sint_buf) + 2);
+ p = Sint_to_buf(opts.ifd, &sbuf);
+ n = sys_strlen(p);
+ sys_strncpy(name_buf, p, n);
+ name_buf[n] = '/';
+ p = Sint_to_buf(opts.ofd, &sbuf);
+ sys_strcpy(name_buf+n+1, p);
+
+ driver = &fd_driver;
+ } else {
+ goto badarg;
+ }
+ }
+
+ if ((driver != &spawn_driver && opts.argv != NULL) ||
+ (driver == &spawn_driver &&
+ opts.spawn_type != ERTS_SPAWN_EXECUTABLE &&
+ opts.argv != NULL)) {
+ /* Argument vector only if explicit spawn_executable */
+ goto badarg;
+ }
+
+
+ if (driver != &spawn_driver && opts.exit_status) {
+ goto badarg;
+ }
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(p, am_out);
+ }
+
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+
+ port_num = erts_open_driver(driver, p->id, name_buf, &opts, err_nump);
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+
+ if (port_num < 0) {
+ DEBUGF(("open_driver returned %d(%d)\n", port_num, *err_nump));
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(p, am_in);
+ }
+ OPEN_PORT_ERROR(port_num);
+ }
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_virtual_sched(p, am_in);
+ }
+
+ if (binary_io) {
+ erts_port_status_bor_set(&erts_port[port_num],
+ ERTS_PORT_SFLG_BINARY_IO);
+ }
+ if (soft_eof) {
+ erts_port_status_bor_set(&erts_port[port_num],
+ ERTS_PORT_SFLG_SOFT_EOF);
+ }
+ if (linebuf && erts_port[port_num].linebuf == NULL){
+ erts_port[port_num].linebuf = allocate_linebuf(linebuf);
+ erts_port_status_bor_set(&erts_port[port_num],
+ ERTS_PORT_SFLG_LINEBUF_IO);
+ }
+
+ do_return:
+ if (name_buf)
+ erts_free(ERTS_ALC_T_TMP, (void *) name_buf);
+ if (opts.argv) {
+ free_args(opts.argv);
+ }
+ return port_num;
+
+ badarg:
+ *err_nump = BADARG;
+ OPEN_PORT_ERROR(-3);
+ goto do_return;
+#undef OPEN_PORT_ERROR
+}
+
+static char **convert_args(Eterm l)
+{
+ char **pp;
+ char *b;
+ int n;
+ int i = 0;
+ Eterm str;
+ /* We require at least one element in list (argv[0]) */
+ if (is_not_list(l) && is_not_nil(l)) {
+ return NULL;
+ }
+ n = list_length(l);
+ pp = erts_alloc(ERTS_ALC_T_TMP, (n + 2) * sizeof(char **));
+ pp[i++] = erts_default_arg0;
+ while (is_list(l)) {
+ str = CAR(list_val(l));
+
+ if (is_nil(str)) {
+ n = 0;
+ } else if( (n = is_string(str)) == 0) {
+ /* Not a string... */
+ int j;
+ for (j = 1; j < i; ++j)
+ erts_free(ERTS_ALC_T_TMP, pp[j]);
+ erts_free(ERTS_ALC_T_TMP, pp);
+ return NULL;
+ }
+ b = (char *) erts_alloc(ERTS_ALC_T_TMP, (n + 1) * sizeof(byte));
+ pp[i++] = (char *) b;
+ if (intlist_to_buf(str, b, n) != n)
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ b[n] = '\0';
+ l = CDR(list_val(l));
+ }
+ pp[i] = NULL;
+ return pp;
+}
+
+static void free_args(char **av)
+{
+ int i;
+ if (av == NULL)
+ return;
+ for (i = 0; av[i] != NULL; ++i) {
+ if (av[i] != erts_default_arg0) {
+ erts_free(ERTS_ALC_T_TMP, av[i]);
+ }
+ }
+ erts_free(ERTS_ALC_T_TMP, av);
+}
+
+
+static byte* convert_environment(Process* p, Eterm env)
+{
+ Eterm all;
+ Eterm* temp_heap;
+ Eterm* hp;
+ Uint heap_size;
+ int n;
+ byte* bytes;
+
+ if ((n = list_length(env)) < 0) {
+ return NULL;
+ }
+ heap_size = 2*(5*n+1);
+ temp_heap = hp = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, heap_size*sizeof(Eterm));
+ bytes = NULL; /* Indicating error */
+
+ /*
+ * All errors below are handled by jumping to 'done', to ensure that the memory
+ * gets deallocated. Do NOT return directly from this function.
+ */
+
+ all = CONS(hp, make_small(0), NIL);
+ hp += 2;
+
+ while(is_list(env)) {
+ Eterm tmp;
+ Eterm* tp;
+
+ tmp = CAR(list_val(env));
+ if (is_not_tuple_arity(tmp, 2)) {
+ goto done;
+ }
+ tp = tuple_val(tmp);
+ tmp = CONS(hp, make_small(0), NIL);
+ hp += 2;
+ if (tp[2] != am_false) {
+ tmp = CONS(hp, tp[2], tmp);
+ hp += 2;
+ }
+ tmp = CONS(hp, make_small('='), tmp);
+ hp += 2;
+ tmp = CONS(hp, tp[1], tmp);
+ hp += 2;
+ all = CONS(hp, tmp, all);
+ hp += 2;
+ env = CDR(list_val(env));
+ }
+ if (is_not_nil(env)) {
+ goto done;
+ }
+ if ((n = io_list_len(all)) < 0) {
+ goto done;
+ }
+
+ /*
+ * Put the result in a binary (no risk for a memory leak that way).
+ */
+ (void) erts_new_heap_binary(p, NULL, n, &bytes);
+ io_list_to_buf(all, (char*)bytes, n);
+
+ done:
+ erts_free(ERTS_ALC_T_TMP, temp_heap);
+ return bytes;
+}
+
+/* ------------ decode_packet() and friends: */
+
+struct packet_callback_args
+{
+ Process* p; /* In */
+ Eterm res; /* Out */
+ int string_as_bin; /* return strings as binaries (http_bin): */
+ byte* aligned_ptr;
+ Eterm orig;
+ Uint bin_offs;
+ byte bin_bitoffs;
+};
+
+static Eterm
+http_bld_string(struct packet_callback_args* pca, Uint **hpp, Uint *szp,
+ const char *str, Sint len)
+{
+ Eterm res = THE_NON_VALUE;
+ Uint size;
+
+ if (pca->string_as_bin) {
+ size = heap_bin_size(len);
+
+ if (szp) {
+ *szp += (size > ERL_SUB_BIN_SIZE) ? ERL_SUB_BIN_SIZE : size;
+ }
+ if (hpp) {
+ res = make_binary(*hpp);
+ if (size > ERL_SUB_BIN_SIZE) {
+ ErlSubBin* bin = (ErlSubBin*) *hpp;
+ bin->thing_word = HEADER_SUB_BIN;
+ bin->size = len;
+ bin->offs = pca->bin_offs + ((byte*)str - pca->aligned_ptr);
+ bin->orig = pca->orig;
+ bin->bitoffs = pca->bin_bitoffs;
+ bin->bitsize = 0;
+ bin->is_writable = 0;
+ *hpp += ERL_SUB_BIN_SIZE;
+ }
+ else {
+ ErlHeapBin* bin = (ErlHeapBin*) *hpp;
+ bin->thing_word = header_heap_bin(len);
+ bin->size = len;
+ memcpy(bin->data, str, len);
+ *hpp += size;
+ }
+ }
+ }
+ else {
+ res = erts_bld_string_n(hpp, szp, str, len);
+ }
+ return res;
+}
+
+static int http_response_erl(void *arg, int major, int minor,
+ int status, const char* phrase, int phrase_len)
+{
+ /* {http_response,{Major,Minor},Status,"Phrase"} */
+ struct packet_callback_args* pca = (struct packet_callback_args*) arg;
+ Eterm phrase_term, ver;
+ Uint hsize = 3 + 5;
+ Eterm* hp;
+#ifdef DEBUG
+ Eterm* hend;
+#endif
+
+ http_bld_string(pca, NULL, &hsize, phrase, phrase_len);
+ hp = HAlloc(pca->p, hsize);
+#ifdef DEBUG
+ hend = hp + hsize;
+#endif
+ phrase_term = http_bld_string(pca, &hp, NULL, phrase, phrase_len);
+ ver = TUPLE2(hp, make_small(major), make_small(minor));
+ hp += 3;
+ pca->res = TUPLE4(hp, am_http_response, ver, make_small(status), phrase_term);
+ ASSERT(hp+5==hend);
+ return 1;
+}
+
+static Eterm http_bld_uri(struct packet_callback_args* pca,
+ Eterm** hpp, Uint* szp, const PacketHttpURI* uri)
+{
+ Eterm s1, s2;
+ if (uri->type == URI_STAR) {
+ return am_Times; /* '*' */
+ }
+
+ s1 = http_bld_string(pca, hpp, szp, uri->s1_ptr, uri->s1_len);
+
+ switch (uri->type) {
+ case URI_ABS_PATH:
+ return erts_bld_tuple(hpp, szp, 2, am_abs_path, s1);
+ case URI_HTTP:
+ case URI_HTTPS:
+ s2 = http_bld_string(pca, hpp, szp, uri->s2_ptr, uri->s2_len);
+ return erts_bld_tuple
+ (hpp, szp, 5, am_absoluteURI,
+ ((uri->type==URI_HTTP) ? am_http : am_https),
+ s1,
+ ((uri->port==0) ? am_undefined : make_small(uri->port)),
+ s2);
+
+ case URI_STRING:
+ return s1;
+ case URI_SCHEME:
+ s2 = http_bld_string(pca, hpp, szp, uri->s2_ptr, uri->s2_len);
+ return erts_bld_tuple(hpp, szp, 3, am_scheme, s1, s2);
+
+ default:
+ erl_exit(1, "%s, line %d: type=%u\n", __FILE__, __LINE__, uri->type);
+ }
+}
+
+static int http_request_erl(void* arg, const http_atom_t* meth,
+ const char* meth_ptr, int meth_len,
+ const PacketHttpURI* uri, int major, int minor)
+{
+ struct packet_callback_args* pca = (struct packet_callback_args*) arg;
+ Eterm meth_term, uri_term, ver_term;
+ Uint sz = 0;
+ Uint* szp = &sz;
+ Eterm* hp;
+ Eterm** hpp = NULL;
+
+ /* {http_request,Meth,Uri,Version} */
+
+ for (;;) {
+ meth_term = (meth!=NULL) ? meth->atom :
+ http_bld_string(pca, hpp, szp, meth_ptr, meth_len);
+ uri_term = http_bld_uri(pca, hpp, szp, uri);
+ ver_term = erts_bld_tuple(hpp, szp, 2,
+ make_small(major), make_small(minor));
+ pca->res = erts_bld_tuple(hpp, szp, 4, am_http_request, meth_term,
+ uri_term, ver_term);
+ if (hpp != NULL) break;
+ hpp = &hp;
+ hp = HAlloc(pca->p, sz);
+ szp = NULL;
+ }
+ return 1;
+}
+
+static int
+http_header_erl(void* arg, const http_atom_t* name, const char* name_ptr,
+ int name_len, const char* value_ptr, int value_len)
+{
+ struct packet_callback_args* pca = (struct packet_callback_args*) arg;
+ Eterm bit_term, name_term, val_term;
+ Uint sz = 6;
+ Eterm* hp;
+#ifdef DEBUG
+ Eterm* hend;
+#endif
+
+ /* {http_header,Bit,Name,IValue,Value} */
+
+ if (name == NULL) {
+ http_bld_string(pca, NULL, &sz, name_ptr, name_len);
+ }
+ http_bld_string(pca, NULL, &sz, value_ptr, value_len);
+
+ hp = HAlloc(pca->p, sz);
+#ifdef DEBUG
+ hend = hp + sz;
+#endif
+
+ if (name != NULL) {
+ bit_term = make_small(name->index+1);
+ name_term = name->atom;
+ }
+ else {
+ bit_term = make_small(0);
+ name_term = http_bld_string(pca, &hp,NULL,name_ptr,name_len);
+ }
+
+ val_term = http_bld_string(pca, &hp, NULL, value_ptr, value_len);
+ pca->res = TUPLE5(hp, am_http_header, bit_term, name_term, am_undefined, val_term);
+ ASSERT(hp+6==hend);
+ return 1;
+}
+
+static int http_eoh_erl(void* arg)
+{
+ /* http_eoh */
+ struct packet_callback_args* pca = (struct packet_callback_args*) arg;
+ pca->res = am_http_eoh;
+ return 1;
+}
+
+static int http_error_erl(void* arg, const char* buf, int len)
+{
+ /* {http_error,Line} */
+ struct packet_callback_args* pca = (struct packet_callback_args*) arg;
+ Uint sz = 3;
+ Eterm* hp;
+#ifdef DEBUG
+ Eterm* hend;
+#endif
+
+ http_bld_string(pca, NULL, &sz, buf, len);
+
+ hp = HAlloc(pca->p, sz);
+#ifdef DEBUG
+ hend = hp + sz;
+#endif
+ pca->res = erts_bld_tuple(&hp, NULL, 2, am_http_error,
+ http_bld_string(pca, &hp, NULL, buf, len));
+ ASSERT(hp==hend);
+ return 1;
+}
+
+static
+int ssl_tls_erl(void* arg, unsigned type, unsigned major, unsigned minor,
+ const char* buf, int len, const char* prefix, int plen)
+{
+ struct packet_callback_args* pca = (struct packet_callback_args*) arg;
+ Eterm* hp;
+ Eterm ver;
+ Eterm bin = new_binary(pca->p, NULL, plen+len);
+ byte* bin_ptr = binary_bytes(bin);
+
+ memcpy(bin_ptr+plen, buf, len);
+ if (plen) {
+ memcpy(bin_ptr, prefix, plen);
+ }
+
+ /* {ssl_tls,NIL,ContentType,{Major,Minor},Bin} */
+ hp = HAlloc(pca->p, 3+6);
+ ver = TUPLE2(hp, make_small(major), make_small(minor));
+ hp += 3;
+ pca->res = TUPLE5(hp, am_ssl_tls, NIL, make_small(type), ver, bin);
+ return 1;
+}
+
+
+PacketCallbacks packet_callbacks_erl = {
+ http_response_erl,
+ http_request_erl,
+ http_eoh_erl,
+ http_header_erl,
+ http_error_erl,
+ ssl_tls_erl
+};
+
+/*
+ decode_packet(Type,Bin,Options)
+ Returns:
+ {ok, PacketBodyBin, RestBin}
+ {more, PacketSz | undefined}
+ {error, invalid}
+*/
+BIF_RETTYPE decode_packet_3(BIF_ALIST_3)
+{
+ unsigned max_plen = 0; /* Packet max length, 0=no limit */
+ unsigned trunc_len = 0; /* Truncate lines if longer, 0=no limit */
+ int http_state = 0; /* 0=request/response 1=header */
+ int packet_sz; /*-------Binaries involved: ------------------*/
+ byte* bin_ptr; /*| orig: original binary */
+ byte bin_bitsz; /*| bin: BIF_ARG_2, may be sub-binary of orig */
+ Uint bin_sz; /*| packet: prefix of bin */
+ char* body_ptr; /*| body: part of packet to return */
+ int body_sz; /*| rest: bin without packet */
+ struct packet_callback_args pca;
+ enum PacketParseType type;
+ Eterm* hp;
+ Eterm* hend;
+ ErlSubBin* rest;
+ Eterm res;
+ Eterm options;
+ int code;
+
+ if (!is_binary(BIF_ARG_2) ||
+ (!is_list(BIF_ARG_3) && !is_nil(BIF_ARG_3))) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ switch (BIF_ARG_1) {
+ case make_small(0): case am_raw: type = TCP_PB_RAW; break;
+ case make_small(1): type = TCP_PB_1; break;
+ case make_small(2): type = TCP_PB_2; break;
+ case make_small(4): type = TCP_PB_4; break;
+ case am_asn1: type = TCP_PB_ASN1; break;
+ case am_sunrm: type = TCP_PB_RM; break;
+ case am_cdr: type = TCP_PB_CDR; break;
+ case am_fcgi: type = TCP_PB_FCGI; break;
+ case am_line: type = TCP_PB_LINE_LF; break;
+ case am_tpkt: type = TCP_PB_TPKT; break;
+ case am_http: type = TCP_PB_HTTP; break;
+ case am_httph: type = TCP_PB_HTTPH; break;
+ case am_http_bin: type = TCP_PB_HTTP_BIN; break;
+ case am_httph_bin: type = TCP_PB_HTTPH_BIN; break;
+ case am_ssl_tls: type = TCP_PB_SSL_TLS; break;
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ options = BIF_ARG_3;
+ while (!is_nil(options)) {
+ Eterm* cons = list_val(options);
+ if (is_tuple(CAR(cons))) {
+ Eterm* tpl = tuple_val(CAR(cons));
+ Uint val;
+ if (tpl[0] == make_arityval(2) &&
+ term_to_Uint(tpl[2],&val) && val <= UINT_MAX) {
+ switch (tpl[1]) {
+ case am_packet_size:
+ max_plen = val;
+ goto next_option;
+ case am_line_length:
+ trunc_len = val;
+ goto next_option;
+ }
+ }
+ }
+ BIF_ERROR(BIF_P, BADARG);
+
+ next_option:
+ options = CDR(cons);
+ }
+
+
+ bin_sz = binary_size(BIF_ARG_2);
+ ERTS_GET_BINARY_BYTES(BIF_ARG_2, bin_ptr, pca.bin_bitoffs, bin_bitsz);
+ if (pca.bin_bitoffs != 0) {
+ pca.aligned_ptr = erts_alloc(ERTS_ALC_T_TMP, bin_sz);
+ erts_copy_bits(bin_ptr, pca.bin_bitoffs, 1, pca.aligned_ptr, 0, 1, bin_sz*8);
+ }
+ else {
+ pca.aligned_ptr = bin_ptr;
+ }
+ packet_sz = packet_get_length(type, (char*)pca.aligned_ptr, bin_sz,
+ max_plen, trunc_len, &http_state);
+ if (!(packet_sz > 0 && packet_sz <= bin_sz)) {
+ if (packet_sz < 0) {
+ goto error;
+ }
+ else { /* not enough data */
+ Eterm plen = (packet_sz==0) ? am_undefined :
+ erts_make_integer(packet_sz, BIF_P);
+ Eterm* hp = HAlloc(BIF_P,3);
+ res = TUPLE2(hp, am_more, plen);
+ goto done;
+ }
+ }
+ /* We got a whole packet */
+
+ body_ptr = (char*) pca.aligned_ptr;
+ body_sz = packet_sz;
+ packet_get_body(type, (const char**) &body_ptr, &body_sz);
+
+ ERTS_GET_REAL_BIN(BIF_ARG_2, pca.orig, pca.bin_offs, pca.bin_bitoffs, bin_bitsz);
+ pca.p = BIF_P;
+ pca.res = THE_NON_VALUE;
+ pca.string_as_bin = (type == TCP_PB_HTTP_BIN || type == TCP_PB_HTTPH_BIN);
+ code = packet_parse(type, (char*)pca.aligned_ptr, packet_sz, &http_state,
+ &packet_callbacks_erl, &pca);
+ if (code == 0) { /* no special packet parsing, make plain binary */
+ ErlSubBin* body;
+ Uint hsz = 2*ERL_SUB_BIN_SIZE + 4;
+ hp = HAlloc(BIF_P, hsz);
+ hend = hp + hsz;
+
+ body = (ErlSubBin *) hp;
+ body->thing_word = HEADER_SUB_BIN;
+ body->size = body_sz;
+ body->offs = pca.bin_offs + (body_ptr - (char*)pca.aligned_ptr);
+ body->orig = pca.orig;
+ body->bitoffs = pca.bin_bitoffs;
+ body->bitsize = 0;
+ body->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+ pca.res = make_binary(body);
+ }
+ else if (code > 0) {
+ Uint hsz = ERL_SUB_BIN_SIZE + 4;
+ ASSERT(pca.res != THE_NON_VALUE);
+ hp = HAlloc(BIF_P, hsz);
+ hend = hp + hsz;
+ }
+ else {
+error:
+ hp = HAlloc(BIF_P,3);
+ res = TUPLE2(hp, am_error, am_invalid);
+ goto done;
+ }
+
+ rest = (ErlSubBin *) hp;
+ rest->thing_word = HEADER_SUB_BIN;
+ rest->size = bin_sz - packet_sz;
+ rest->offs = pca.bin_offs + packet_sz;
+ rest->orig = pca.orig;
+ rest->bitoffs = pca.bin_bitoffs;
+ rest->bitsize = bin_bitsz; /* The extra bits go into the rest. */
+ rest->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+ res = TUPLE3(hp, am_ok, pca.res, make_binary(rest));
+ hp += 4;
+ ASSERT(hp==hend); (void)hend;
+
+done:
+ if (pca.aligned_ptr != bin_ptr) {
+ erts_free(ERTS_ALC_T_TMP, pca.aligned_ptr);
+ }
+ BIF_RET(res);
+}
+
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
new file mode 100644
index 0000000000..16abab65b0
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -0,0 +1,1142 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "erl_binary.h"
+#include "big.h"
+#define ERLANG_INTEGRATION 1
+#define PCRE_STATIC
+#include "pcre.h"
+
+#define PCRE_DEFAULT_COMPILE_OPTS 0
+#define PCRE_DEFAULT_EXEC_OPTS 0
+#define LOOP_FACTOR 10
+
+
+static const unsigned char *default_table;
+static Uint max_loop_limit;
+static Export re_exec_trap_export;
+static Export *grun_trap_exportp = NULL;
+static Export *urun_trap_exportp = NULL;
+static Export *ucompile_trap_exportp = NULL;
+
+static BIF_RETTYPE re_exec_trap(BIF_ALIST_3);
+
+static void *erts_erts_pcre_malloc(size_t size) {
+ return erts_alloc(ERTS_ALC_T_RE_HEAP,size);
+}
+
+static void erts_erts_pcre_free(void *ptr) {
+ erts_free(ERTS_ALC_T_RE_HEAP,ptr);
+}
+
+static void *erts_erts_pcre_stack_malloc(size_t size) {
+ return erts_alloc(ERTS_ALC_T_RE_STACK,size);
+}
+
+static void erts_erts_pcre_stack_free(void *ptr) {
+ erts_free(ERTS_ALC_T_RE_STACK,ptr);
+}
+
+void erts_init_bif_re(void)
+{
+ erts_pcre_malloc = &erts_erts_pcre_malloc;
+ erts_pcre_free = &erts_erts_pcre_free;
+ erts_pcre_stack_malloc = &erts_erts_pcre_stack_malloc;
+ erts_pcre_stack_free = &erts_erts_pcre_stack_free;
+ default_table = NULL; /* ISO8859-1 default, forced into pcre */
+ max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
+
+ sys_memset((void *) &re_exec_trap_export, 0, sizeof(Export));
+ re_exec_trap_export.address = &re_exec_trap_export.code[3];
+ re_exec_trap_export.code[0] = am_erlang;
+ re_exec_trap_export.code[1] = am_re_run_trap;
+ re_exec_trap_export.code[2] = 3;
+ re_exec_trap_export.code[3] = (Eterm) em_apply_bif;
+ re_exec_trap_export.code[4] = (Eterm) &re_exec_trap;
+
+ grun_trap_exportp = erts_export_put(am_re,am_grun,3);
+ urun_trap_exportp = erts_export_put(am_re,am_urun,3);
+ ucompile_trap_exportp = erts_export_put(am_re,am_ucompile,2);
+
+ return;
+}
+
+Sint erts_re_set_loop_limit(Sint limit)
+{
+ Sint save = (Sint) max_loop_limit;
+ if (limit <= 0) {
+ max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
+ } else {
+ max_loop_limit = (Uint) limit;
+ }
+ return save;
+}
+
+/*
+ * Deal with plain int's and so on for the library interface
+ */
+
+static int term_to_int(Eterm term, int *sp)
+{
+#ifdef ARCH_64
+
+ if (is_small(term)) {
+ Uint x = signed_val(term);
+ if (x > INT_MAX) {
+ return 0;
+ }
+ *sp = (int) x;
+ return 1;
+ }
+ return 0;
+
+#else
+
+ if (is_small(term)) {
+ *sp = signed_val(term);
+ return 1;
+ } else if (is_big(term)) {
+ ErtsDigit* xr = big_v(term);
+ dsize_t xl = big_size(term);
+ int sign = big_sign(term);
+ unsigned uval = 0;
+ int n = 0;
+
+ if (xl*D_EXP > sizeof(unsigned)*8) {
+ return 0;
+ }
+ while (xl-- > 0) {
+ uval |= ((unsigned)(*xr++)) << n;
+ n += D_EXP;
+ }
+ if (sign) {
+ uval = -uval;
+ if ((int)uval > 0)
+ return 0;
+ } else {
+ if ((int)uval < 0)
+ return 0;
+ }
+ *sp = uval;
+ return 1;
+ } else {
+ return 0;
+ }
+
+#endif
+
+}
+
+static Eterm make_signed_integer(int x, Process *p)
+{
+#ifdef ARCH_64
+ return make_small(x);
+#else
+ Eterm* hp;
+ if (IS_SSMALL(x))
+ return make_small(x);
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE);
+ if (x >= 0) {
+ *hp = make_pos_bignum_header(1);
+ } else {
+ x = -x;
+ *hp = make_neg_bignum_header(1);
+ }
+ BIG_DIGIT(hp, 0) = x;
+ return make_big(hp);
+ }
+#endif
+}
+
+/*
+ * Parse option lists
+ */
+
+#define PARSE_FLAG_UNIQUE_COMPILE_OPT 1
+#define PARSE_FLAG_UNIQUE_EXEC_OPT 2
+#define PARSE_FLAG_UNICODE 4
+#define PARSE_FLAG_STARTOFFSET 8
+#define PARSE_FLAG_CAPTURE_OPT 16
+#define PARSE_FLAG_GLOBAL 32
+
+#define CAPSPEC_VALUES 0
+#define CAPSPEC_TYPE 1
+#define CAPSPEC_SIZE 2
+
+static int /* 0 == ok, < 0 == error */
+parse_options(Eterm listp, /* in */
+ int *compile_options, /* out */
+ int *exec_options, /* out */
+ int *flags,/* out */
+ int *startoffset, /* out */
+ Eterm *capture_spec) /* capture_spec[CAPSPEC_SIZE] */ /* out */
+{
+ int copt,eopt,fl;
+ Eterm item;
+
+ if (listp == NIL) {
+ copt = PCRE_DEFAULT_COMPILE_OPTS;
+ eopt = PCRE_DEFAULT_EXEC_OPTS;
+ fl = 0;
+ } else {
+ copt = 0;
+ eopt = 0;
+ fl = 0;
+ for (;is_list(listp); listp = CDR(list_val(listp))) {
+ item = CAR(list_val(listp));
+ if (is_tuple(item)) {
+ Eterm *tp = tuple_val(item);
+ if (arityval(*tp) != 2 || is_not_atom(tp[1])) {
+ if (arityval(*tp) == 3 && tp[1] == am_capture) {
+ if (capture_spec != NULL) {
+ capture_spec[CAPSPEC_VALUES] = tp[2];
+ capture_spec[CAPSPEC_TYPE] = tp[3];
+ }
+ fl |= (PARSE_FLAG_CAPTURE_OPT |
+ PARSE_FLAG_UNIQUE_EXEC_OPT);
+ continue;
+ } else {
+ return -1;
+ }
+ }
+ switch(tp[1]) {
+ case am_capture:
+ if (capture_spec != NULL) {
+ capture_spec[CAPSPEC_VALUES] = tp[2];
+ capture_spec[CAPSPEC_TYPE] = am_index;
+ }
+ fl |= (PARSE_FLAG_CAPTURE_OPT |
+ PARSE_FLAG_UNIQUE_EXEC_OPT);
+ break;
+ case am_offset:
+ {
+ int tmp;
+ if (!term_to_int(tp[2],&tmp)) {
+ return -1;
+ }
+ if (startoffset != NULL) {
+ *startoffset = tmp;
+ }
+ }
+ fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT|PARSE_FLAG_STARTOFFSET);
+ break;
+ case am_newline:
+ if (!is_atom(tp[2])) {
+ return -1;
+ }
+ switch (tp[2]) {
+ case am_cr:
+ copt |= PCRE_NEWLINE_CR;
+ eopt |= PCRE_NEWLINE_CR;
+ break;
+ case am_crlf:
+ copt |= PCRE_NEWLINE_CRLF;
+ eopt |= PCRE_NEWLINE_CRLF;
+ break;
+ case am_lf:
+ copt |= PCRE_NEWLINE_LF;
+ eopt |= PCRE_NEWLINE_LF;
+ break;
+ case am_anycrlf:
+ copt |= PCRE_NEWLINE_ANYCRLF;
+ eopt |= PCRE_NEWLINE_ANYCRLF;
+ break;
+ case am_any:
+ eopt |= PCRE_NEWLINE_ANY;
+ copt |= PCRE_NEWLINE_ANY;
+ break;
+ default:
+ return -1;
+ break;
+ }
+ break;
+ default:
+ return -1;
+ }
+ }else if (is_not_atom(item)) {
+ return -1;
+ } else {
+ switch(item) {
+ case am_anchored:
+ copt |= PCRE_ANCHORED;
+ eopt |= PCRE_ANCHORED;
+ break;
+ case am_notempty:
+ eopt |= PCRE_NOTEMPTY;
+ fl |= PARSE_FLAG_UNIQUE_EXEC_OPT;
+ break;
+ case am_notbol:
+ eopt |= PCRE_NOTBOL;
+ fl |= PARSE_FLAG_UNIQUE_EXEC_OPT;
+ break;
+ case am_noteol:
+ eopt |= PCRE_NOTEOL;
+ fl |= PARSE_FLAG_UNIQUE_EXEC_OPT;
+ break;
+ case am_caseless:
+ copt |= PCRE_CASELESS;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_dollar_endonly:
+ copt |= PCRE_DOLLAR_ENDONLY;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_dotall:
+ copt |= PCRE_DOTALL;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_extended:
+ copt |= PCRE_EXTENDED;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_firstline:
+ copt |= PCRE_FIRSTLINE;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_multiline:
+ copt |= PCRE_MULTILINE;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_no_auto_capture:
+ copt |= PCRE_NO_AUTO_CAPTURE;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_dupnames:
+ copt |= PCRE_DUPNAMES;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_ungreedy:
+ copt |= PCRE_UNGREEDY;
+ fl |= PARSE_FLAG_UNIQUE_COMPILE_OPT;
+ break;
+ case am_unicode:
+ copt |= PCRE_UTF8;
+ fl |= (PARSE_FLAG_UNIQUE_COMPILE_OPT | PARSE_FLAG_UNICODE);
+ break;
+ case am_global:
+ fl |= (PARSE_FLAG_UNIQUE_EXEC_OPT | PARSE_FLAG_GLOBAL);
+ break;
+ case am_bsr_anycrlf:
+ eopt |= PCRE_BSR_ANYCRLF;
+ copt |= PCRE_BSR_ANYCRLF;
+ break;
+ case am_bsr_unicode:
+ eopt |= PCRE_BSR_UNICODE;
+ copt |= PCRE_BSR_UNICODE;
+ break;
+ default:
+ return -1;
+ }
+ }
+ }
+ if (is_not_nil(listp)) {
+ return -1;
+ }
+ }
+ if (compile_options != NULL) {
+ *compile_options = copt;
+ }
+ if (exec_options != NULL) {
+ *exec_options = eopt;
+ }
+ if (flags != NULL) {
+ *flags = fl;
+ }
+ return 0;
+}
+
+/*
+ * Build Erlang term result from compilation
+ */
+
+static Eterm
+build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, const char *errstr, int errofset, int unicode, int with_ok)
+{
+ Eterm *hp;
+ Eterm ret;
+ size_t pattern_size;
+ int capture_count;
+ if (!result) {
+ /* Return {error_tag, {Code, String, Offset}} */
+ int elen = sys_strlen(errstr);
+ int need = 3 /* tuple of 2 */ +
+ 3 /* tuple of 2 */ +
+ (2 * elen) /* The error string list */;
+ hp = HAlloc(p, need);
+ ret = buf_to_intlist(&hp, (char *) errstr, elen, NIL);
+ ret = TUPLE2(hp, ret, make_small(errofset));
+ hp += 3;
+ ret = TUPLE2(hp, error_tag, ret);
+ } else {
+ erts_pcre_fullinfo(result, NULL, PCRE_INFO_SIZE, &pattern_size);
+ erts_pcre_fullinfo(result, NULL, PCRE_INFO_CAPTURECOUNT, &capture_count);
+ /* XXX: Optimize - keep in offheap binary to allow this to
+ be kept across traps w/o need of copying */
+ ret = new_binary(p, (byte *) result, pattern_size);
+ erts_pcre_free(result);
+ hp = HAlloc(p, (with_ok) ? (3+5) : 5);
+ ret = TUPLE4(hp,am_re_pattern, make_small(capture_count), make_small(unicode),ret);
+ if (with_ok) {
+ hp += 5;
+ ret = TUPLE2(hp,am_ok,ret);
+ }
+ }
+ return ret;
+}
+
+/*
+ * Compile BIFs
+ */
+
+BIF_RETTYPE
+re_compile_2(BIF_ALIST_2)
+{
+ int slen;
+ char *expr;
+ pcre *result;
+ int errcode = 0;
+ const char *errstr = "";
+ int errofset = 0;
+ Eterm ret;
+ int options = 0;
+ int pflags = 0;
+ int unicode = 0;
+
+
+ if (parse_options(BIF_ARG_2,&options,NULL,&pflags,NULL,NULL)
+ < 0) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+
+ if (pflags & PARSE_FLAG_UNIQUE_EXEC_OPT) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+
+ unicode = (pflags & PARSE_FLAG_UNICODE) ? 1 : 0;
+
+ if (pflags & PARSE_FLAG_UNICODE && !is_binary(BIF_ARG_1)) {
+ BIF_TRAP2(ucompile_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ if ((slen = io_list_len(BIF_ARG_1)) < 0) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
+ if (io_list_to_buf(BIF_ARG_1, expr, slen) != 0) {
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ expr[slen]='\0';
+ result = erts_pcre_compile2(expr, options, &errcode,
+ &errstr, &errofset, default_table);
+
+ ret = build_compile_result(BIF_P, am_error, result, errcode,
+ errstr, errofset, unicode, 1);
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
+ BIF_RET(ret);
+}
+
+BIF_RETTYPE
+re_compile_1(BIF_ALIST_1)
+{
+ return re_compile_2(BIF_P,BIF_ARG_1,NIL);
+}
+
+/*
+ * Restart contexts for the re:run bif
+ */
+
+/*
+ * When erts_pcre_exec is restarted, only the actual extra-structure with
+ * it's restart-data need to be kept. The match is then called with
+ * watever is saved. The code is pointed out by this and cannot be
+ * reallocated or GC'ed, why it's passed along as a off-heap-binary,
+ * but not actually passed in the erts_pcre_exec restart calls.
+ */
+
+typedef enum { RetIndex, RetString, RetBin, RetNone } ReturnType;
+
+typedef struct _return_info {
+ ReturnType type;
+ int num_spec; /* 0 == all, -1 == all_but first, > 0 specified in vector */
+ int v[1];
+} ReturnInfo;
+
+typedef struct _restart_context {
+ pcre_extra extra;
+ void *restart_data;
+ Uint32 flags;
+ char *subject; /* to be able to free it when done */
+ pcre *code; /* Keep a copy */
+ int *ovector; /* Keep until done */
+ ReturnInfo *ret_info;
+} RestartContext;
+
+#define RESTART_FLAG_SUBJECT_IN_BINARY 0x1
+
+static void cleanup_restart_context(RestartContext *rc)
+{
+ if (rc->restart_data != NULL) {
+ erts_pcre_free_restart_data(rc->restart_data);
+ rc->restart_data = NULL;
+ }
+ if (rc->ovector != NULL) {
+ erts_free(ERTS_ALC_T_RE_SUBJECT, rc->ovector);
+ rc->ovector = NULL;
+ }
+ if (rc->subject != NULL && !(rc->flags & RESTART_FLAG_SUBJECT_IN_BINARY)) {
+ erts_free(ERTS_ALC_T_RE_SUBJECT, rc->subject);
+ }
+ rc->subject = NULL;
+ if (rc->code != NULL) {
+ erts_free(ERTS_ALC_T_RE_SUBJECT, rc->code);
+ rc->code = NULL;
+ }
+ if (rc->ret_info != NULL) {
+ erts_free(ERTS_ALC_T_RE_SUBJECT, rc->ret_info);
+ rc->ret_info = NULL;
+ }
+}
+
+static void cleanup_restart_context_bin(Binary *bp)
+{
+ RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
+ cleanup_restart_context(rc);
+}
+
+/*
+ * Build the return value for Erlang from result and restart context
+ */
+
+static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Eterm orig_subject)
+{
+ Eterm res;
+ Eterm *hp;
+ if (rc <= 0) {
+ res = am_nomatch;
+ } else {
+ ReturnInfo *ri = restartp->ret_info;
+ ReturnInfo defri = {RetIndex,0,{0}};
+ if (ri == NULL) {
+ ri = &defri;
+ }
+ if (ri->type == RetNone) {
+ res = am_match;
+ } else if (ri->type == RetIndex){
+ Eterm *tmp_vect;
+ Eterm tpl;
+ int i;
+ if (ri->num_spec <= 0) {
+ tmp_vect = erts_alloc(ERTS_ALC_T_RE_TMP_BUF,
+ rc * 2 * sizeof(Eterm));
+ for(i = -(ri->num_spec) ;i < rc; ++i) {
+ tmp_vect[i*2] = make_signed_integer(restartp->ovector[i*2],p);
+ tmp_vect[i*2+1] = make_signed_integer(restartp->ovector[i*2+1] - restartp->ovector[i*2],p);
+ }
+ hp = HAlloc(p, 3+(3+2)*(rc + ri->num_spec));
+ res = NIL;
+ for(i = rc-1 ;i >= -(ri->num_spec); --i) {
+ tpl = TUPLE2(hp,tmp_vect[i*2],tmp_vect[i*2+1]);
+ hp += 3;
+ res = CONS(hp,tpl,res);
+ hp += 2;
+ }
+ } else {
+ int n = 0;
+ int x;
+ tmp_vect = erts_alloc(ERTS_ALC_T_RE_TMP_BUF,
+ ri->num_spec * 2 * sizeof(Eterm));
+ for (i = 0; i < ri->num_spec; ++i) {
+ x = ri->v[i];
+ if (x < rc && x >= 0) {
+ tmp_vect[n*2] = make_signed_integer(restartp->ovector[x*2],p);
+ tmp_vect[n*2+1] = make_signed_integer(restartp->ovector[x*2+1]-restartp->ovector[x*2],p);
+ } else {
+ tmp_vect[n*2] = make_small(-1);
+ tmp_vect[n*2+1] = make_small(0);
+ }
+ ++n;
+ }
+ hp = HAlloc(p, 3+(3+2)*n);
+ res = NIL;
+ for(i = n-1 ;i >= 0; --i) {
+ tpl = TUPLE2(hp,tmp_vect[i*2],tmp_vect[i*2+1]);
+ hp += 3;
+ res = CONS(hp,tpl,res);
+ hp += 2;
+ }
+ }
+ res = TUPLE2(hp,am_match,res);
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, tmp_vect);
+ } else {
+ Eterm *tmp_vect;
+ int i;
+ Eterm orig = NIL;
+ Uint offset = 0;
+ Uint bitoffs = 0;
+ Uint bitsize = 0;
+ if (restartp->flags & RESTART_FLAG_SUBJECT_IN_BINARY) {
+ ERTS_GET_REAL_BIN(orig_subject, orig, offset, bitoffs, bitsize);
+ }
+ if (ri->num_spec <= 0) {
+ tmp_vect = erts_alloc(ERTS_ALC_T_RE_TMP_BUF,
+ rc * sizeof(Eterm));
+ for(i = -(ri->num_spec) ;i < rc; ++i) { /* XXX: Unicode */
+ char *cp;
+ int len;
+ if (restartp->ovector[i*2] < 0) {
+ cp = restartp->subject;
+ len = 0;
+ } else {
+ cp = restartp->subject + restartp->ovector[i*2];
+ len = restartp->ovector[i*2+1] - restartp->ovector[i*2];
+ }
+ if (ri->type == RetBin) {
+ if (restartp->flags & RESTART_FLAG_SUBJECT_IN_BINARY) {
+ /* Optimized - if subject was binary to begin
+ with, we can make sub-binaries. */
+ ErlSubBin *sb;
+ Uint virtual_offset = cp - restartp->subject;
+ hp = HAlloc(p, ERL_SUB_BIN_SIZE);
+ sb = (ErlSubBin *) hp;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = len;
+ sb->offs = offset + virtual_offset;
+ sb->orig = orig;
+ sb->bitoffs = bitoffs;
+ sb->bitsize = bitsize;
+ sb->is_writable = 0;
+ tmp_vect[i] = make_binary(sb);
+ } else {
+ tmp_vect[i] = new_binary(p, (byte *) cp, len);
+ }
+ } else {
+ Eterm *hp2;
+ hp2 = HAlloc(p,(2*len));
+ tmp_vect[i] = buf_to_intlist(&hp2, cp, len, NIL);
+ }
+ }
+ hp = HAlloc(p, 3+2*(rc + ri->num_spec));
+ res = NIL;
+ for(i = rc-1 ;i >= -(ri->num_spec); --i) {
+ res = CONS(hp,tmp_vect[i],res);
+ hp += 2;
+ }
+ } else {
+ int n = 0;
+ int x;
+ tmp_vect = erts_alloc(ERTS_ALC_T_RE_TMP_BUF,
+ ri->num_spec * sizeof(Eterm));
+ for (i = 0; i < ri->num_spec; ++i) {
+ x = ri->v[i];
+ if (x < rc && x >= 0) {
+ char *cp;
+ int len;
+ if (restartp->ovector[x*2] < 0) {
+ cp = restartp->subject;
+ len = 0;
+ } else {
+ cp = restartp->subject + restartp->ovector[x*2];
+ len = restartp->ovector[x*2+1] - restartp->ovector[x*2];
+ }
+ if (ri->type == RetBin) {
+ if (restartp->flags & RESTART_FLAG_SUBJECT_IN_BINARY) {
+ /* Optimized - if subject was binary to begin
+ with, we could make sub-binaries. */
+ ErlSubBin *sb;
+ Uint virtual_offset = cp - restartp->subject;
+ hp = HAlloc(p, ERL_SUB_BIN_SIZE);
+ sb = (ErlSubBin *) hp;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = len;
+ sb->offs = offset + virtual_offset;
+ sb->orig = orig;
+ sb->bitoffs = bitoffs;
+ sb->bitsize = bitsize;
+ sb->is_writable = 0;
+ tmp_vect[n] = make_binary(sb);
+ } else {
+ tmp_vect[n] = new_binary(p, (byte *) cp, len);
+ }
+ } else {
+ Eterm *hp2;
+ hp2 = HAlloc(p,(2*len));
+ tmp_vect[n] = buf_to_intlist(&hp2, cp, len, NIL);
+ }
+ } else {
+ if (ri->type == RetBin) {
+ tmp_vect[n] = new_binary(p, (byte *) "", 0);
+ } else {
+ tmp_vect[n] = NIL;
+ }
+ }
+ ++n;
+ }
+ hp = HAlloc(p, 3+2*n);
+ res = NIL;
+ for(i = n-1 ;i >= 0; --i) {
+ res = CONS(hp,tmp_vect[i],res);
+ hp += 2;
+ }
+
+ }
+ res = TUPLE2(hp,am_match,res);
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, tmp_vect);
+ }
+ }
+ return res;
+}
+
+/*
+ * Extra parsing function, build the ReturnInfo structure from
+ * a capture specification in the option list
+ */
+
+#define RINFO_SIZ(Num) (sizeof(ReturnInfo) + (sizeof(int) * (Num - 1)))
+
+static ReturnInfo *
+build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
+{
+ ReturnInfo *ri = erts_alloc(ERTS_ALC_T_RE_SUBJECT, RINFO_SIZ(0));
+ int sallocated = 0;
+ char *tmpb = NULL;
+ int tmpbsiz = 0;
+ Eterm l;
+
+ ri->type = RetIndex;
+ ri->num_spec = 0;
+
+
+ switch(capture_spec[CAPSPEC_TYPE]) {
+ case am_index:
+ ri->type = RetIndex;
+ break;
+ case am_list:
+ ri->type = RetString;
+ break;
+ case am_binary:
+ ri->type = RetBin;
+ break;
+ default:
+ goto error;
+ }
+
+ switch(capture_spec[CAPSPEC_VALUES]) {
+ case am_all:
+ ri->num_spec = 0;
+ break;
+ case am_none:
+ case NIL:
+ ri->num_spec = 0;
+ ri->type = RetNone;
+ break;
+ case am_all_but_first:
+ ri->num_spec = -1;
+ break;
+ case am_first:
+ ri->num_spec = 1;
+ if(ri->num_spec > sallocated) {
+ sallocated = ri->num_spec;
+ ri = erts_realloc(ERTS_ALC_T_RE_SUBJECT, ri, RINFO_SIZ(sallocated));
+ }
+ ri->v[ri->num_spec - 1] = 0;
+ break;
+ default:
+ if (is_list(capture_spec[CAPSPEC_VALUES])) {
+ for(l=capture_spec[CAPSPEC_VALUES];is_list(l);l = CDR(list_val(l))) {
+ int x;
+ Eterm val = CAR(list_val(l));
+ if (ri->num_spec < 0)
+ ri->num_spec = 0;
+ ++(ri->num_spec);
+ if(ri->num_spec > sallocated) {
+ sallocated += 10;
+ ri = erts_realloc(ERTS_ALC_T_RE_SUBJECT, ri, RINFO_SIZ(sallocated));
+ }
+ if (term_to_int(val,&x)) {
+ ri->v[ri->num_spec - 1] = x;
+ } else if (is_atom(val) || is_binary(val) || is_list(val)) {
+ if (is_atom(val)) {
+ Atom *ap = atom_tab(atom_val(val));
+ if ((ap->len + 1) > tmpbsiz) {
+ if (!tmpbsiz) {
+ tmpb = erts_alloc(ERTS_ALC_T_RE_TMP_BUF,(tmpbsiz = ap->len + 1));
+ } else {
+ tmpb = erts_realloc(ERTS_ALC_T_RE_TMP_BUF,tmpb,
+ (tmpbsiz = ap->len + 1));
+ }
+ }
+ memcpy(tmpb,ap->name,ap->len);
+ tmpb[ap->len] = '\0';
+ } else {
+ int slen = io_list_len(val);
+ if (slen < 0) {
+ goto error;
+ }
+ if ((slen + 1) > tmpbsiz) {
+ if (!tmpbsiz) {
+ tmpb = erts_alloc(ERTS_ALC_T_RE_TMP_BUF,(tmpbsiz = slen + 1));
+ } else {
+ tmpb = erts_realloc(ERTS_ALC_T_RE_TMP_BUF,tmpb,
+ (tmpbsiz = slen + 1));
+ }
+ }
+ if (io_list_to_buf(val, tmpb, slen) != 0) {
+ goto error;
+ }
+ tmpb[slen] = '\0';
+ }
+ if ((ri->v[ri->num_spec - 1] = erts_pcre_get_stringnumber(code,tmpb)) ==
+ PCRE_ERROR_NOSUBSTRING) {
+ ri->v[ri->num_spec - 1] = -1;
+ }
+ } else {
+ goto error;
+ }
+ }
+ if (l != NIL) {
+ goto error;
+ }
+ } else {
+ goto error;
+ }
+ break;
+ }
+
+ if(tmpb != NULL) {
+ erts_free(ERTS_ALC_T_RE_TMP_BUF,tmpb);
+ }
+ return ri;
+ error:
+ if(tmpb != NULL) {
+ erts_free(ERTS_ALC_T_RE_TMP_BUF,tmpb);
+ }
+ erts_free(ERTS_ALC_T_RE_SUBJECT, ri);
+ return NULL;
+}
+
+
+/*
+ * The actual re:run/2,3 BIFs
+ */
+BIF_RETTYPE
+re_run_3(BIF_ALIST_3)
+{
+ const pcre *code_tmp;
+ RestartContext restart;
+ byte *temp_alloc = NULL;
+ int slength;
+ int startoffset = 0;
+ int options = 0, comp_options = 0;
+ int ovsize;
+ int pflags;
+ Eterm *tp;
+ int rc;
+ Eterm res;
+ size_t code_size;
+ Uint loop_limit_tmp;
+ unsigned long loop_count;
+ Eterm capture[CAPSPEC_SIZE];
+ int is_list_cap;
+
+ if (parse_options(BIF_ARG_3,&comp_options,&options,&pflags,&startoffset,capture)
+ < 0) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ is_list_cap = ((pflags & PARSE_FLAG_CAPTURE_OPT) &&
+ (capture[CAPSPEC_TYPE] == am_list));
+
+ if (is_not_tuple(BIF_ARG_2) || (arityval(*tuple_val(BIF_ARG_2)) != 4)) {
+ if (is_binary(BIF_ARG_2) || is_list(BIF_ARG_2) || is_nil(BIF_ARG_2)) {
+ /* Compile from textual RE */
+ int slen;
+ char *expr;
+ pcre *result;
+ int errcode = 0;
+ const char *errstr = "";
+ int errofset = 0;
+ int capture_count;
+
+ if (pflags & PARSE_FLAG_UNICODE &&
+ (!is_binary(BIF_ARG_1) ||
+ (is_list_cap && !(pflags & PARSE_FLAG_GLOBAL)))) {
+ BIF_TRAP3(urun_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+
+ if ((slen = io_list_len(BIF_ARG_2)) < 0) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+
+ expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
+ if (io_list_to_buf(BIF_ARG_2, expr, slen) != 0) {
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ expr[slen]='\0';
+ result = erts_pcre_compile2(expr, comp_options, &errcode,
+ &errstr, &errofset, default_table);
+ if (!result) {
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
+ /* Compilation error gives badarg except in the compile
+ function */
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ if (pflags & PARSE_FLAG_GLOBAL) {
+ Eterm precompiled =
+ build_compile_result(BIF_P, am_error,
+ result, errcode,
+ errstr, errofset,
+ (pflags &
+ PARSE_FLAG_UNICODE) ? 1 : 0,
+ 0);
+ Eterm *hp,r;
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
+ hp = HAlloc(BIF_P,4);
+ /* BIF_ARG_2 is in the tuple just to make exceptions right */
+ r = TUPLE3(hp,BIF_ARG_3,
+ ((pflags & PARSE_FLAG_UNIQUE_COMPILE_OPT) ?
+ am_true :
+ am_false), BIF_ARG_2);
+ BIF_TRAP3(grun_trap_exportp, BIF_P, BIF_ARG_1, precompiled, r);
+ }
+
+ erts_pcre_fullinfo(result, NULL, PCRE_INFO_SIZE, &code_size);
+ erts_pcre_fullinfo(result, NULL, PCRE_INFO_CAPTURECOUNT, &capture_count);
+ ovsize = 3*(capture_count+1);
+ restart.code = erts_alloc(ERTS_ALC_T_RE_SUBJECT, code_size);
+ memcpy(restart.code, result, code_size);
+ erts_pcre_free(result);
+ erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
+ /*unicode = (pflags & PARSE_FLAG_UNICODE) ? 1 : 0;*/
+ } else {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ } else {
+ if (pflags & PARSE_FLAG_UNIQUE_COMPILE_OPT) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+
+ tp = tuple_val(BIF_ARG_2);
+ if (tp[1] != am_re_pattern || is_not_small(tp[2]) ||
+ is_not_small(tp[3]) || is_not_binary(tp[4])) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+
+ if (unsigned_val(tp[3]) &&
+ (!is_binary(BIF_ARG_1) ||
+ (is_list_cap && !(pflags & PARSE_FLAG_GLOBAL)))) { /* unicode */
+ BIF_TRAP3(urun_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2,
+ BIF_ARG_3);
+ }
+
+ if (pflags & PARSE_FLAG_GLOBAL) {
+ Eterm *hp,r;
+ hp = HAlloc(BIF_P,3);
+ r = TUPLE2(hp,BIF_ARG_3,am_false);
+ BIF_TRAP3(grun_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2,
+ r);
+ }
+
+ ovsize = 3*(unsigned_val(tp[2])+1);
+ code_size = binary_size(tp[4]);
+ if ((code_tmp = (const pcre *)
+ erts_get_aligned_binary_bytes(tp[4], &temp_alloc)) == NULL) {
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ restart.code = erts_alloc(ERTS_ALC_T_RE_SUBJECT, code_size);
+ memcpy(restart.code, code_tmp, code_size);
+ erts_free_aligned_binary_bytes(temp_alloc);
+
+ }
+
+
+ restart.ovector = erts_alloc(ERTS_ALC_T_RE_SUBJECT, ovsize * sizeof(int));
+ restart.extra.flags = PCRE_EXTRA_TABLES | PCRE_EXTRA_LOOP_LIMIT;
+ restart.extra.tables = default_table;
+ restart.extra.loop_limit = ERTS_BIF_REDS_LEFT(BIF_P) * LOOP_FACTOR;
+ loop_limit_tmp = max_loop_limit; /* To lesser probability of race in debug
+ situation (erts_debug) */
+ if (restart.extra.loop_limit > loop_limit_tmp) {
+ restart.extra.loop_limit = loop_limit_tmp;
+ }
+ restart.restart_data = NULL;
+ restart.extra.restart_data = &restart.restart_data;
+ restart.extra.restart_flags = 0;
+ restart.extra.loop_counter_return = &loop_count;
+ restart.ret_info = NULL;
+
+ if (pflags & PARSE_FLAG_CAPTURE_OPT) {
+ if ((restart.ret_info = build_capture(capture,restart.code)) == NULL) {
+ erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
+ erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ }
+
+ /* Optimized - if already in binary off heap, keep that and avoid
+ copying, also binary returns can be sub binaries in that case */
+
+ restart.flags = 0;
+ if (is_binary(BIF_ARG_1)) {
+ Eterm real_bin;
+ Uint offset;
+ Eterm* bptr;
+ int bitoffs;
+ int bitsize;
+ ProcBin* pb;
+
+ ERTS_GET_REAL_BIN(BIF_ARG_1, real_bin, offset, bitoffs, bitsize);
+
+ slength = binary_size(BIF_ARG_1);
+ bptr = binary_val(real_bin);
+ if (bitsize != 0 || bitoffs != 0 || (*bptr != HEADER_PROC_BIN)) {
+ goto handle_iolist;
+ }
+ pb = (ProcBin *) bptr;
+ restart.subject = (char *) (pb->bytes+offset);
+ restart.flags |= RESTART_FLAG_SUBJECT_IN_BINARY;
+ } else {
+handle_iolist:
+ if ((slength = io_list_len(BIF_ARG_1)) < 0) {
+ erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
+ erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
+ if (restart.ret_info != NULL) {
+ erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ret_info);
+ }
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength);
+
+ if (io_list_to_buf(BIF_ARG_1, restart.subject, slength) != 0) {
+ erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
+ erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
+ erts_free(ERTS_ALC_T_RE_SUBJECT, restart.subject);
+ if (restart.ret_info != NULL) {
+ erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ret_info);
+ }
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ }
+
+
+#ifdef DEBUG
+ loop_count = 0xFFFFFFFF;
+#endif
+
+ rc = erts_pcre_exec(restart.code, &(restart.extra), restart.subject, slength, startoffset,
+ options, restart.ovector, ovsize);
+ ASSERT(loop_count != 0xFFFFFFFF);
+ BUMP_REDS(BIF_P, loop_count / LOOP_FACTOR);
+ if (rc == PCRE_ERROR_LOOP_LIMIT) {
+ /* Trap */
+ Binary *mbp = erts_create_magic_binary(sizeof(RestartContext),
+ cleanup_restart_context_bin);
+ RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
+ Eterm magic_bin;
+ Eterm *hp;
+ memcpy(restartp,&restart,sizeof(RestartContext));
+ BUMP_ALL_REDS(BIF_P);
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ magic_bin = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mbp);
+ BIF_TRAP3(&re_exec_trap_export,
+ BIF_P,
+ BIF_ARG_1,
+ BIF_ARG_2 /* To avoid GC of precompiled code, XXX: not utilized yet */,
+ magic_bin);
+ }
+
+ res = build_exec_return(BIF_P, rc, &restart, BIF_ARG_1);
+
+ cleanup_restart_context(&restart);
+
+ BIF_RET(res);
+}
+
+BIF_RETTYPE
+re_run_2(BIF_ALIST_2)
+{
+ return re_run_3(BIF_P,BIF_ARG_1, BIF_ARG_2, NIL);
+}
+
+/*
+ * The "magic" trap target, continue a re:run
+ */
+
+static BIF_RETTYPE re_exec_trap(BIF_ALIST_3)
+ /* XXX: Optimize - arg 1 and 2 to be utilized for keeping binary
+ code and subject */
+{
+ Binary *mbp;
+ RestartContext *restartp;
+ int rc;
+ unsigned long loop_count;
+ Uint loop_limit_tmp;
+ Eterm res;
+
+ ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_3));
+
+ mbp = ((ProcBin *) binary_val(BIF_ARG_3))->val;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
+ == cleanup_restart_context_bin);
+
+ restartp = (RestartContext *) ERTS_MAGIC_BIN_DATA(mbp);
+
+ restartp->extra.loop_limit = ERTS_BIF_REDS_LEFT(BIF_P) * LOOP_FACTOR;
+ loop_limit_tmp = max_loop_limit; /* To lesser probability of race in debug
+ situation (erts_debug) */
+ if (restartp->extra.loop_limit > loop_limit_tmp) {
+ restartp->extra.loop_limit = loop_limit_tmp;
+ }
+ restartp->extra.loop_counter_return = &loop_count;
+ restartp->extra.restart_data = &restartp->restart_data;
+ restartp->extra.restart_flags = 0;
+
+#ifdef DEBUG
+ loop_count = 0xFFFFFFFF;
+#endif
+ rc = erts_pcre_exec(NULL, &(restartp->extra), NULL, 0, 0, 0, NULL, 0);
+ ASSERT(loop_count != 0xFFFFFFFF);
+ BUMP_REDS(BIF_P, loop_count / LOOP_FACTOR);
+ if (rc == PCRE_ERROR_LOOP_LIMIT) {
+ /* Trap */
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP3(&re_exec_trap_export, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+ res = build_exec_return(BIF_P, rc, restartp, BIF_ARG_1);
+
+ cleanup_restart_context(restartp);
+
+ BIF_RET(res);
+}
+
+
+
+
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
new file mode 100644
index 0000000000..172bb37952
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -0,0 +1,701 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_bif_timer.h"
+#include "global.h"
+#include "bif.h"
+#include "error.h"
+#include "big.h"
+
+/****************************************************************************
+** BIF Timer support
+****************************************************************************/
+
+#define BTM_FLG_SL_TIMER (((Uint32) 1) << 0)
+#define BTM_FLG_CANCELED (((Uint32) 1) << 1)
+#define BTM_FLG_HEAD (((Uint32) 1) << 2)
+#define BTM_FLG_BYNAME (((Uint32) 1) << 3)
+#define BTM_FLG_WRAP (((Uint32) 1) << 4)
+
+struct ErtsBifTimer_ {
+ struct {
+ union {
+ ErtsBifTimer **head;
+ ErtsBifTimer *prev;
+ } u;
+ ErtsBifTimer *next;
+ } tab;
+ union {
+ Eterm name;
+ struct {
+ ErtsBifTimer *prev;
+ ErtsBifTimer *next;
+ Process *ess;
+ } proc;
+ } receiver;
+ ErlTimer tm;
+ ErlHeapFragment* bp;
+ Uint32 flags;
+ Eterm message;
+ Uint32 ref_numbers[ERTS_REF_NUMBERS];
+};
+
+#ifdef SMALL_MEMORY
+#define TIMER_HASH_VEC_SZ 3331
+#define BTM_PREALC_SZ 10
+#else
+#define TIMER_HASH_VEC_SZ 10007
+#define BTM_PREALC_SZ 100
+#endif
+static ErtsBifTimer **bif_timer_tab;
+static Uint no_bif_timers;
+
+
+static erts_smp_rwmtx_t bif_timer_lock;
+
+#define erts_smp_safe_btm_rwlock(P, L) \
+ safe_btm_lock((P), (L), 1)
+#define erts_smp_safe_btm_rlock(P, L) \
+ safe_btm_lock((P), (L), 0)
+#define erts_smp_btm_rwlock() \
+ erts_smp_rwmtx_rwlock(&bif_timer_lock)
+#define erts_smp_btm_tryrwlock() \
+ erts_smp_rwmtx_tryrwlock(&bif_timer_lock)
+#define erts_smp_btm_rwunlock() \
+ erts_smp_rwmtx_rwunlock(&bif_timer_lock)
+#define erts_smp_btm_rlock() \
+ erts_smp_rwmtx_rlock(&bif_timer_lock)
+#define erts_smp_btm_tryrlock() \
+ erts_smp_rwmtx_tryrlock(&bif_timer_lock)
+#define erts_smp_btm_runlock() \
+ erts_smp_rwmtx_runlock(&bif_timer_lock)
+#define erts_smp_btm_lock_init() \
+ erts_smp_rwmtx_init(&bif_timer_lock, "bif_timers")
+
+
+static ERTS_INLINE int
+safe_btm_lock(Process *c_p, ErtsProcLocks c_p_locks, int rw_lock)
+{
+ ASSERT(c_p && c_p_locks);
+#ifdef ERTS_SMP
+ if ((rw_lock ? erts_smp_btm_tryrwlock() : erts_smp_btm_tryrlock()) != EBUSY)
+ return 0;
+ erts_smp_proc_unlock(c_p, c_p_locks);
+ if (rw_lock)
+ erts_smp_btm_rwlock();
+ else
+ erts_smp_btm_rlock();
+ erts_smp_proc_lock(c_p, c_p_locks);
+ if (ERTS_PROC_IS_EXITING(c_p)) {
+ if (rw_lock)
+ erts_smp_btm_rwunlock();
+ else
+ erts_smp_btm_runlock();
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+ERTS_SCHED_PREF_PALLOC_IMPL(btm_pre, ErtsBifTimer, BTM_PREALC_SZ)
+
+static ERTS_INLINE int
+get_index(Uint32 *ref_numbers, Uint32 len)
+{
+ Uint32 hash;
+ /* len can potentially be larger than ERTS_REF_NUMBERS
+ if it has visited another node... */
+ if (len > ERTS_REF_NUMBERS)
+ len = ERTS_REF_NUMBERS;
+
+#if ERTS_REF_NUMBERS != 3
+#error "ERTS_REF_NUMBERS changed. Update me..."
+#endif
+ switch (len) {
+ case 3: if (!ref_numbers[2]) len = 2;
+ case 2: if (!ref_numbers[1]) len = 1;
+ default: break;
+ }
+
+ ASSERT(1 <= len && len <= ERTS_REF_NUMBERS);
+
+ hash = block_hash((byte *) ref_numbers, len * sizeof(Uint32), 0x08d12e65);
+ return (int) (hash % ((Uint32) TIMER_HASH_VEC_SZ));
+}
+
+static Eterm
+create_ref(Uint *hp, Uint32 *ref_numbers, Uint32 len)
+{
+ Uint32 *datap;
+ int i;
+
+
+ if (len > ERTS_MAX_REF_NUMBERS) {
+ /* Such large refs should no be able to appear in the emulator */
+ erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
+ }
+
+#ifdef ARCH_64
+ hp[0] = make_ref_thing_header(len/2 + 1);
+ datap = (Uint32 *) &hp[1];
+ *(datap++) = len;
+#else
+ hp[0] = make_ref_thing_header(len);
+ datap = (Uint32 *) &hp[1];
+#endif
+
+ for (i = 0; i < len; i++)
+ datap[i] = ref_numbers[i];
+
+ return make_internal_ref(hp);
+}
+
+static int
+eq_non_standard_ref_numbers(Uint32 *rn1, Uint32 len1, Uint32 *rn2, Uint32 len2)
+{
+#ifdef ARCH_64
+#define MAX_REF_HEAP_SZ (1+(ERTS_MAX_REF_NUMBERS/2+1))
+#else
+#define MAX_REF_HEAP_SZ (1+ERTS_MAX_REF_NUMBERS)
+#endif
+ Uint r1_hp[MAX_REF_HEAP_SZ];
+ Uint r2_hp[MAX_REF_HEAP_SZ];
+
+ return eq(create_ref(r1_hp, rn1, len1), create_ref(r2_hp, rn2, len2));
+#undef MAX_REF_HEAP_SZ
+}
+
+static ERTS_INLINE int
+eq_ref_numbers(Uint32 *rn1, Uint32 len1, Uint32 *rn2, Uint32 len2)
+{
+ int res;
+ if (len1 != ERTS_REF_NUMBERS || len2 != ERTS_REF_NUMBERS) {
+ /* Can potentially happen, but will never... */
+ return eq_non_standard_ref_numbers(rn1, len1, rn2, len2);
+ }
+
+#if ERTS_REF_NUMBERS != 3
+#error "ERTS_REF_NUMBERS changed. Update me..."
+#endif
+ res = rn1[0] == rn2[0] && rn1[1] == rn2[1] && rn1[2] == rn2[2];
+
+ ASSERT(res
+ ? eq_non_standard_ref_numbers(rn1, len1, rn2, len2)
+ : !eq_non_standard_ref_numbers(rn1, len1, rn2, len2));
+
+ return res;
+}
+
+static ERTS_INLINE ErtsBifTimer *
+tab_find(Eterm ref)
+{
+ Uint32 *ref_numbers = internal_ref_numbers(ref);
+ Uint32 ref_numbers_len = internal_ref_no_of_numbers(ref);
+ int ix = get_index(ref_numbers, ref_numbers_len);
+ ErtsBifTimer* btm;
+
+ for (btm = bif_timer_tab[ix]; btm; btm = btm->tab.next)
+ if (eq_ref_numbers(ref_numbers, ref_numbers_len,
+ btm->ref_numbers, ERTS_REF_NUMBERS))
+ return btm;
+ return NULL;
+}
+
+static ERTS_INLINE void
+tab_remove(ErtsBifTimer* btm)
+{
+ if (btm->flags & BTM_FLG_HEAD) {
+ *btm->tab.u.head = btm->tab.next;
+ if (btm->tab.next) {
+ btm->tab.next->flags |= BTM_FLG_HEAD;
+ btm->tab.next->tab.u.head = btm->tab.u.head;
+ }
+ }
+ else {
+ btm->tab.u.prev->tab.next = btm->tab.next;
+ if (btm->tab.next)
+ btm->tab.next->tab.u.prev = btm->tab.u.prev;
+ }
+ btm->flags |= BTM_FLG_CANCELED;
+ ASSERT(no_bif_timers > 0);
+ no_bif_timers--;
+}
+
+static ERTS_INLINE void
+tab_insert(ErtsBifTimer* btm)
+{
+ int ix = get_index(btm->ref_numbers, ERTS_REF_NUMBERS);
+ ErtsBifTimer* btm_list = bif_timer_tab[ix];
+
+ if (btm_list) {
+ btm_list->flags &= ~BTM_FLG_HEAD;
+ btm_list->tab.u.prev = btm;
+ }
+
+ btm->flags |= BTM_FLG_HEAD;
+ btm->tab.u.head = &bif_timer_tab[ix];
+ btm->tab.next = btm_list;
+ bif_timer_tab[ix] = btm;
+ no_bif_timers++;
+}
+
+static ERTS_INLINE void
+link_proc(Process *p, ErtsBifTimer* btm)
+{
+ btm->receiver.proc.ess = p;
+ btm->receiver.proc.prev = NULL;
+ btm->receiver.proc.next = p->bif_timers;
+ if (p->bif_timers)
+ p->bif_timers->receiver.proc.prev = btm;
+ p->bif_timers = btm;
+}
+
+static ERTS_INLINE void
+unlink_proc(ErtsBifTimer* btm)
+{
+ if (btm->receiver.proc.prev)
+ btm->receiver.proc.prev->receiver.proc.next = btm->receiver.proc.next;
+ else
+ btm->receiver.proc.ess->bif_timers = btm->receiver.proc.next;
+ if (btm->receiver.proc.next)
+ btm->receiver.proc.next->receiver.proc.prev = btm->receiver.proc.prev;
+}
+
+static void
+bif_timer_cleanup(ErtsBifTimer* btm)
+{
+ ASSERT(btm);
+
+ if (btm->bp)
+ free_message_buffer(btm->bp);
+
+ if (!btm_pre_free(btm)) {
+ if (btm->flags & BTM_FLG_SL_TIMER)
+ erts_free(ERTS_ALC_T_SL_BIF_TIMER, (void *) btm);
+ else
+ erts_free(ERTS_ALC_T_LL_BIF_TIMER, (void *) btm);
+ }
+}
+
+static void
+bif_timer_timeout(ErtsBifTimer* btm)
+{
+ ASSERT(btm);
+
+
+ erts_smp_btm_rwlock();
+
+ if (btm->flags & BTM_FLG_CANCELED) {
+ /*
+ * A concurrent cancel is ongoing. Do not send the timeout message,
+ * but cleanup here since the cancel call-back won't be called.
+ */
+#ifndef ERTS_SMP
+ ASSERT(0);
+#endif
+ }
+ else {
+ ErtsProcLocks rp_locks = 0;
+ Process* rp;
+
+ tab_remove(btm);
+
+ ASSERT(!erts_get_current_process());
+
+ if (btm->flags & BTM_FLG_BYNAME)
+ rp = erts_whereis_process(NULL,0,btm->receiver.name,0,ERTS_P2P_FLG_SMP_INC_REFC);
+ else {
+ rp = btm->receiver.proc.ess;
+ erts_smp_proc_inc_refc(rp);
+ unlink_proc(btm);
+ }
+
+ if (rp) {
+ Eterm message;
+ ErlHeapFragment *bp;
+
+ bp = btm->bp;
+ btm->bp = NULL; /* Prevent cleanup of message buffer... */
+
+ if (!(btm->flags & BTM_FLG_WRAP))
+ message = btm->message;
+ else {
+#if ERTS_REF_NUMBERS != 3
+#error "ERTS_REF_NUMBERS changed. Update me..."
+#endif
+ Eterm ref;
+ Uint *hp;
+ Uint wrap_size = REF_THING_SIZE + 4;
+ message = btm->message;
+
+ if (!bp) {
+ ErlOffHeap *ohp;
+ ASSERT(is_immed(message));
+ hp = erts_alloc_message_heap(wrap_size,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+ } else {
+ Eterm old_size = bp->size;
+ bp = erts_resize_message_buffer(bp, old_size + wrap_size,
+ &message, 1);
+ hp = &bp->mem[0] + old_size;
+ }
+
+ write_ref_thing(hp,
+ btm->ref_numbers[0],
+ btm->ref_numbers[1],
+ btm->ref_numbers[2]);
+ ref = make_internal_ref(hp);
+ hp += REF_THING_SIZE;
+ message = TUPLE3(hp, am_timeout, ref, message);
+ }
+
+ erts_queue_message(rp, &rp_locks, bp, message, NIL);
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+ }
+ }
+
+ erts_smp_btm_rwunlock();
+
+ bif_timer_cleanup(btm);
+}
+
+static Eterm
+setup_bif_timer(Uint32 xflags,
+ Process *c_p,
+ Eterm time,
+ Eterm receiver,
+ Eterm message)
+{
+ Process *rp;
+ ErtsBifTimer* btm;
+ Uint timeout;
+ Eterm ref;
+ Uint32 *ref_numbers;
+
+ if (!term_to_Uint(time, &timeout))
+ return THE_NON_VALUE;
+#ifdef ARCH_64
+ if ((timeout >> 32) != 0)
+ return THE_NON_VALUE;
+#endif
+ if (is_not_internal_pid(receiver) && is_not_atom(receiver))
+ return THE_NON_VALUE;
+
+ ref = erts_make_ref(c_p);
+
+ if (is_atom(receiver))
+ rp = NULL;
+ else {
+ rp = erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
+ receiver, ERTS_PROC_LOCK_MSGQ);
+ if (!rp)
+ return ref;
+ }
+
+ if (timeout < ERTS_ALC_MIN_LONG_LIVED_TIME) {
+ if (timeout < 1000) {
+ btm = btm_pre_alloc();
+ if (!btm)
+ goto sl_timer_alloc;
+ btm->flags = 0;
+ }
+ else {
+ sl_timer_alloc:
+ btm = (ErtsBifTimer *) erts_alloc(ERTS_ALC_T_SL_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ btm->flags = BTM_FLG_SL_TIMER;
+ }
+ }
+ else {
+ btm = (ErtsBifTimer *) erts_alloc(ERTS_ALC_T_LL_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ btm->flags = 0;
+ }
+
+ if (rp) {
+ link_proc(rp, btm);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
+ }
+ else {
+ ASSERT(is_atom(receiver));
+ btm->receiver.name = receiver;
+ btm->flags |= BTM_FLG_BYNAME;
+ }
+
+ btm->flags |= xflags;
+
+ ref_numbers = internal_ref_numbers(ref);
+ ASSERT(internal_ref_no_of_numbers(ref) == 3);
+#if ERTS_REF_NUMBERS != 3
+#error "ERTS_REF_NUMBERS changed. Update me..."
+#endif
+ btm->ref_numbers[0] = ref_numbers[0];
+ btm->ref_numbers[1] = ref_numbers[1];
+ btm->ref_numbers[2] = ref_numbers[2];
+
+ ASSERT(eq_ref_numbers(btm->ref_numbers, ERTS_REF_NUMBERS,
+ ref_numbers, ERTS_REF_NUMBERS));
+
+ if (is_immed(message)) {
+ btm->bp = NULL;
+ btm->message = message;
+ }
+ else {
+ ErlHeapFragment* bp;
+ Eterm* hp;
+ Uint size;
+
+ size = size_object(message);
+ btm->bp = bp = new_message_buffer(size);
+ hp = bp->mem;
+ btm->message = copy_struct(message, size, &hp, &bp->off_heap);
+ }
+
+ tab_insert(btm);
+ ASSERT(btm == tab_find(ref));
+ btm->tm.active = 0; /* MUST be initalized */
+ erl_set_timer(&btm->tm,
+ (ErlTimeoutProc) bif_timer_timeout,
+ (ErlCancelProc) bif_timer_cleanup,
+ (void *) btm,
+ timeout);
+ return ref;
+}
+
+/* send_after(Time, Pid, Message) -> Ref */
+BIF_RETTYPE send_after_3(BIF_ALIST_3)
+{
+ Eterm res;
+
+ if (erts_smp_safe_btm_rwlock(BIF_P, ERTS_PROC_LOCK_MAIN))
+ ERTS_BIF_EXITED(BIF_P);
+
+ res = setup_bif_timer(0, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+
+ erts_smp_btm_rwunlock();
+
+ if (is_non_value(res)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ else {
+ ASSERT(is_internal_ref(res));
+ BIF_RET(res);
+ }
+}
+
+/* start_timer(Time, Pid, Message) -> Ref */
+BIF_RETTYPE start_timer_3(BIF_ALIST_3)
+{
+ Eterm res;
+
+ if (erts_smp_safe_btm_rwlock(BIF_P, ERTS_PROC_LOCK_MAIN))
+ ERTS_BIF_EXITED(BIF_P);
+
+ res = setup_bif_timer(BTM_FLG_WRAP, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+
+ erts_smp_btm_rwunlock();
+
+ if (is_non_value(res)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ else {
+ ASSERT(is_internal_ref(res));
+ BIF_RET(res);
+ }
+}
+
+/* cancel_timer(Ref) -> false | RemainingTime */
+BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
+{
+ Eterm res;
+ ErtsBifTimer *btm;
+
+ if (is_not_internal_ref(BIF_ARG_1)) {
+ if (is_ref(BIF_ARG_1)) {
+ BIF_RET(am_false);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (erts_smp_safe_btm_rwlock(BIF_P, ERTS_PROC_LOCK_MAIN))
+ ERTS_BIF_EXITED(BIF_P);
+
+ btm = tab_find(BIF_ARG_1);
+ if (!btm || btm->flags & BTM_FLG_CANCELED) {
+ erts_smp_btm_rwunlock();
+ res = am_false;
+ }
+ else {
+ Uint left = time_left(&btm->tm);
+ if (!(btm->flags & BTM_FLG_BYNAME)) {
+ erts_smp_proc_lock(btm->receiver.proc.ess, ERTS_PROC_LOCK_MSGQ);
+ unlink_proc(btm);
+ erts_smp_proc_unlock(btm->receiver.proc.ess, ERTS_PROC_LOCK_MSGQ);
+ }
+ tab_remove(btm);
+ ASSERT(!tab_find(BIF_ARG_1));
+ erl_cancel_timer(&btm->tm);
+ erts_smp_btm_rwunlock();
+ res = erts_make_integer(left, BIF_P);
+ }
+
+ BIF_RET(res);
+}
+
+/* read_timer(Ref) -> false | RemainingTime */
+BIF_RETTYPE read_timer_1(BIF_ALIST_1)
+{
+ Eterm res;
+ ErtsBifTimer *btm;
+
+ if (is_not_internal_ref(BIF_ARG_1)) {
+ if (is_ref(BIF_ARG_1)) {
+ BIF_RET(am_false);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (erts_smp_safe_btm_rlock(BIF_P, ERTS_PROC_LOCK_MAIN))
+ ERTS_BIF_EXITED(BIF_P);
+
+ btm = tab_find(BIF_ARG_1);
+ if (!btm || btm->flags & BTM_FLG_CANCELED) {
+ res = am_false;
+ }
+ else {
+ Uint left = time_left(&btm->tm);
+ res = erts_make_integer(left, BIF_P);
+ }
+
+ erts_smp_btm_runlock();
+
+ BIF_RET(res);
+}
+
+void
+erts_print_bif_timer_info(int to, void *to_arg)
+{
+ int i;
+ int lock = !ERTS_IS_CRASH_DUMPING;
+
+ if (lock)
+ erts_smp_btm_rlock();
+
+ for (i = 0; i < TIMER_HASH_VEC_SZ; i++) {
+ ErtsBifTimer *btm;
+ for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
+ Eterm receiver = (btm->flags & BTM_FLG_BYNAME
+ ? btm->receiver.name
+ : btm->receiver.proc.ess->id);
+ erts_print(to, to_arg, "=timer:%T\n", receiver);
+ erts_print(to, to_arg, "Message: %T\n", btm->message);
+ erts_print(to, to_arg, "Time left: %d ms\n", time_left(&btm->tm));
+ }
+ }
+
+ if (lock)
+ erts_smp_btm_runlock();
+}
+
+
+void
+erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
+{
+ ErtsBifTimer *btm;
+
+ if (erts_smp_btm_tryrwlock() == EBUSY) {
+ erts_smp_proc_unlock(p, plocks);
+ erts_smp_btm_rwlock();
+ erts_smp_proc_lock(p, plocks);
+ }
+
+ btm = p->bif_timers;
+ while (btm) {
+ ErtsBifTimer *tmp_btm;
+ ASSERT(!(btm->flags & BTM_FLG_CANCELED));
+ tab_remove(btm);
+ tmp_btm = btm;
+ btm = btm->receiver.proc.next;
+ erl_cancel_timer(&tmp_btm->tm);
+ }
+
+ p->bif_timers = NULL;
+
+ erts_smp_btm_rwunlock();
+}
+
+void erts_bif_timer_init(void)
+{
+ int i;
+ no_bif_timers = 0;
+ init_btm_pre_alloc();
+ erts_smp_btm_lock_init();
+ bif_timer_tab = erts_alloc(ERTS_ALC_T_BIF_TIMER_TABLE,
+ sizeof(ErtsBifTimer *)*TIMER_HASH_VEC_SZ);
+ for (i = 0; i < TIMER_HASH_VEC_SZ; ++i)
+ bif_timer_tab[i] = NULL;
+}
+
+Uint
+erts_bif_timer_memory_size(void)
+{
+ Uint res;
+ int lock = !ERTS_IS_CRASH_DUMPING;
+
+ if (lock)
+ erts_smp_btm_rlock();
+
+ res = (sizeof(ErtsBifTimer *)*TIMER_HASH_VEC_SZ
+ + no_bif_timers*sizeof(ErtsBifTimer));
+
+ if (lock)
+ erts_smp_btm_runlock();
+
+ return res;
+}
+
+
+void
+erts_bif_timer_foreach(void (*func)(Eterm, Eterm, ErlHeapFragment *, void *),
+ void *arg)
+{
+ int i;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+
+ for (i = 0; i < TIMER_HASH_VEC_SZ; i++) {
+ ErtsBifTimer *btm;
+ for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
+ (*func)((btm->flags & BTM_FLG_BYNAME
+ ? btm->receiver.name
+ : btm->receiver.proc.ess->id),
+ btm->message,
+ btm->bp,
+ arg);
+ }
+ }
+}
diff --git a/erts/emulator/beam/erl_bif_timer.h b/erts/emulator/beam/erl_bif_timer.h
new file mode 100644
index 0000000000..1197c176f5
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_timer.h
@@ -0,0 +1,36 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+#ifndef ERL_BIF_TIMER_H__
+#define ERL_BIF_TIMER_H__
+
+typedef struct ErtsBifTimer_ ErtsBifTimer;
+
+#include "sys.h"
+#include "erl_process.h"
+#include "erl_message.h"
+
+Uint erts_bif_timer_memory_size(void);
+void erts_print_bif_timer_info(int to, void *to_arg);
+void erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks);
+void erts_bif_timer_init(void);
+void erts_bif_timer_foreach(void (*func)(Eterm,Eterm,ErlHeapFragment *,void *),
+ void *arg);
+#endif
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
new file mode 100644
index 0000000000..7dff5e0eeb
--- /dev/null
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -0,0 +1,2106 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Trace BIFs.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "big.h"
+#include "dist.h"
+#include "erl_version.h"
+#include "beam_bp.h"
+#include "erl_binary.h"
+
+#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+
+static erts_smp_mtx_t trace_pattern_mutex;
+const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0};
+static int erts_default_trace_pattern_is_on;
+static Binary *erts_default_match_spec;
+static Binary *erts_default_meta_match_spec;
+static struct trace_pattern_flags erts_default_trace_pattern_flags;
+static Eterm erts_default_meta_tracer_pid;
+
+static void new_seq_trace_token(Process* p); /* help func for seq_trace_2*/
+static int already_traced(Process *p, Process *tracee_p, Eterm tracer);
+static int port_already_traced(Process *p, Port *tracee_port, Eterm tracer);
+static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
+static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
+static Eterm trace_info_on_load(Process* p, Eterm key);
+
+static int setup_func_trace(Export* ep, void* match_prog);
+static int reset_func_trace(Export* ep);
+static void reset_bif_trace(int bif_index);
+static void setup_bif_trace(int bif_index);
+static void set_trace_bif(int bif_index, void* match_prog);
+static void clear_trace_bif(int bif_index);
+
+void
+erts_bif_trace_init(void)
+{
+ erts_smp_mtx_init(&trace_pattern_mutex, "trace_pattern");
+ erts_default_trace_pattern_is_on = 0;
+ erts_default_match_spec = NULL;
+ erts_default_meta_match_spec = NULL;
+ erts_default_trace_pattern_flags = erts_trace_pattern_flags_off;
+ erts_default_meta_tracer_pid = NIL;
+}
+
+/*
+ * Turn on/off call tracing for the given function(s).
+ */
+
+Eterm
+trace_pattern_2(Process* p, Eterm MFA, Eterm Pattern)
+{
+ return trace_pattern_3(p,MFA,Pattern,NIL);
+}
+
+Eterm
+trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
+{
+ Eterm mfa[3];
+ int i;
+ int matches = 0;
+ int specified = 0;
+ enum erts_break_op on;
+ Binary* match_prog_set;
+ Eterm l;
+ struct trace_pattern_flags flags = erts_trace_pattern_flags_off;
+ int is_global;
+ Process *meta_tracer_proc = p;
+ Eterm meta_tracer_pid = p->id;
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ /*
+ * Check and compile the match specification.
+ */
+
+ if (Pattern == am_false) {
+ match_prog_set = NULL;
+ on = 0;
+ } else if (is_nil(Pattern) || Pattern == am_true) {
+ match_prog_set = NULL;
+ on = 1;
+ } else if (Pattern == am_restart) {
+ match_prog_set = NULL;
+ on = erts_break_reset;
+ } else if (Pattern == am_pause) {
+ match_prog_set = NULL;
+ on = erts_break_stop;
+ } else if ((match_prog_set = erts_match_set_compile(p, Pattern)) != NULL) {
+ MatchSetRef(match_prog_set);
+ on = 1;
+ } else{
+ goto error;
+ }
+
+ is_global = 0;
+ for(l = flaglist; is_list(l); l = CDR(list_val(l))) {
+ if (is_tuple(CAR(list_val(l)))) {
+ Eterm *tp = tuple_val(CAR(list_val(l)));
+
+ if (arityval(tp[0]) != 2 || tp[1] != am_meta) {
+ goto error;
+ }
+ meta_tracer_pid = tp[2];
+ if (is_internal_pid(meta_tracer_pid)) {
+ meta_tracer_proc = erts_pid2proc(NULL, 0, meta_tracer_pid, 0);
+ if (!meta_tracer_proc) {
+ goto error;
+ }
+ } else if (is_internal_port(meta_tracer_pid)) {
+ Port *meta_tracer_port;
+ meta_tracer_proc = NULL;
+ if (internal_port_index(meta_tracer_pid) >= erts_max_ports)
+ goto error;
+ meta_tracer_port =
+ &erts_port[internal_port_index(meta_tracer_pid)];
+ if (INVALID_TRACER_PORT(meta_tracer_port, meta_tracer_pid)) {
+ goto error;
+ }
+ } else {
+ goto error;
+ }
+ if (is_global) {
+ goto error;
+ }
+ flags.breakpoint = 1;
+ flags.meta = 1;
+ } else {
+ switch (CAR(list_val(l))) {
+ case am_local:
+ if (is_global) {
+ goto error;
+ }
+ flags.breakpoint = 1;
+ flags.local = 1;
+ break;
+ case am_meta:
+ if (is_global) {
+ goto error;
+ }
+ flags.breakpoint = 1;
+ flags.meta = 1;
+ break;
+ case am_global:
+ if (flags.breakpoint) {
+ goto error;
+ }
+ is_global = !0;
+ break;
+ case am_call_count:
+ if (is_global) {
+ goto error;
+ }
+ flags.breakpoint = 1;
+ flags.call_count = 1;
+ break;
+ default:
+ goto error;
+ }
+ }
+ }
+ if (l != NIL) {
+ goto error;
+ }
+
+ if (match_prog_set && !flags.local && !flags.meta && flags.call_count) {
+ /* A match prog is not allowed with just call_count */
+ goto error;
+ }
+
+ /*
+ * Check the MFA specification.
+ */
+
+ if (MFA == am_on_load) {
+ if (flags.local || (! flags.breakpoint)) {
+ MatchSetUnref(erts_default_match_spec);
+ erts_default_match_spec = match_prog_set;
+ MatchSetRef(erts_default_match_spec);
+ }
+ if (flags.meta) {
+ MatchSetUnref(erts_default_meta_match_spec);
+ erts_default_meta_match_spec = match_prog_set;
+ MatchSetRef(erts_default_meta_match_spec);
+ erts_default_meta_tracer_pid = meta_tracer_pid;
+ if (meta_tracer_proc) {
+ meta_tracer_proc->trace_flags |= F_TRACER;
+ }
+ } else if (! flags.breakpoint) {
+ MatchSetUnref(erts_default_meta_match_spec);
+ erts_default_meta_match_spec = NULL;
+ erts_default_meta_tracer_pid = NIL;
+ }
+ MatchSetUnref(match_prog_set);
+ if (erts_default_trace_pattern_flags.breakpoint &&
+ flags.breakpoint) {
+ /* Breakpoint trace -> breakpoint trace */
+ ASSERT(erts_default_trace_pattern_is_on);
+ if (on) {
+ erts_default_trace_pattern_flags.local
+ |= flags.local;
+ erts_default_trace_pattern_flags.meta
+ |= flags.meta;
+ erts_default_trace_pattern_flags.call_count
+ |= (on == 1) ? flags.call_count : 0;
+ } else {
+ erts_default_trace_pattern_flags.local
+ &= ~flags.local;
+ erts_default_trace_pattern_flags.meta
+ &= ~flags.meta;
+ erts_default_trace_pattern_flags.call_count
+ &= ~flags.call_count;
+ if (! (erts_default_trace_pattern_flags.breakpoint =
+ erts_default_trace_pattern_flags.local |
+ erts_default_trace_pattern_flags.meta |
+ erts_default_trace_pattern_flags.call_count)) {
+ erts_default_trace_pattern_is_on = !!on; /* i.e off */
+ }
+ }
+ } else if (! erts_default_trace_pattern_flags.breakpoint &&
+ ! flags.breakpoint) {
+ /* Global call trace -> global call trace */
+ erts_default_trace_pattern_is_on = !!on;
+ } else if (erts_default_trace_pattern_flags.breakpoint &&
+ ! flags.breakpoint) {
+ /* Breakpoint trace -> global call trace */
+ if (on) {
+ erts_default_trace_pattern_flags = flags; /* Struct copy */
+ erts_default_trace_pattern_is_on = !!on;
+ }
+ } else {
+ ASSERT(! erts_default_trace_pattern_flags.breakpoint &&
+ flags.breakpoint);
+ /* Global call trace -> breakpoint trace */
+ if (on) {
+ if (on != 1) {
+ flags.call_count = 0;
+ }
+ flags.breakpoint = flags.local | flags.meta | flags.call_count;
+ erts_default_trace_pattern_flags = flags; /* Struct copy */
+ erts_default_trace_pattern_is_on = !!flags.breakpoint;
+ }
+ }
+
+ goto done;
+ } else if (is_tuple(MFA)) {
+ Eterm *tp = tuple_val(MFA);
+ if (tp[0] != make_arityval(3)) {
+ goto error;
+ }
+ mfa[0] = tp[1];
+ mfa[1] = tp[2];
+ mfa[2] = tp[3];
+ if (!is_atom(mfa[0]) || !is_atom(mfa[1]) ||
+ (!is_small(mfa[2]) && mfa[2] != am_Underscore)) {
+ goto error;
+ }
+ for (i = 0; i < 3 && mfa[i] != am_Underscore; i++, specified++) {
+ /* Empty loop body */
+ }
+ for (i = specified; i < 3; i++) {
+ if (mfa[i] != am_Underscore) {
+ goto error;
+ }
+ }
+ if (is_small(mfa[2])) {
+ mfa[2] = signed_val(mfa[2]);
+ }
+ } else {
+ goto error;
+ }
+
+ if (meta_tracer_proc) {
+ meta_tracer_proc->trace_flags |= F_TRACER;
+ }
+
+
+ matches = erts_set_trace_pattern(mfa, specified,
+ match_prog_set, match_prog_set,
+ on, flags, meta_tracer_pid);
+ MatchSetUnref(match_prog_set);
+
+ done:
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+
+ return make_small(matches);
+
+ error:
+
+ MatchSetUnref(match_prog_set);
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ BIF_ERROR(p, BADARG);
+}
+
+void
+erts_get_default_trace_pattern(int *trace_pattern_is_on,
+ Binary **match_spec,
+ Binary **meta_match_spec,
+ struct trace_pattern_flags *trace_pattern_flags,
+ Eterm *meta_tracer_pid)
+{
+ erts_smp_mtx_lock(&trace_pattern_mutex);
+ if (trace_pattern_is_on)
+ *trace_pattern_is_on = erts_default_trace_pattern_is_on;
+ if (match_spec)
+ *match_spec = erts_default_match_spec;
+ if (meta_match_spec)
+ *meta_match_spec = erts_default_meta_match_spec;
+ if (trace_pattern_flags)
+ *trace_pattern_flags = erts_default_trace_pattern_flags;
+ if (meta_tracer_pid)
+ *meta_tracer_pid = erts_default_meta_tracer_pid;
+ erts_smp_mtx_unlock(&trace_pattern_mutex);
+}
+
+
+
+
+Uint
+erts_trace_flag2bit(Eterm flag)
+{
+ switch (flag) {
+ case am_all: return TRACEE_FLAGS;
+ case am_send: return F_TRACE_SEND;
+ case am_receive: return F_TRACE_RECEIVE;
+ case am_set_on_spawn: return F_TRACE_SOS;
+ case am_procs: return F_TRACE_PROCS;
+ case am_set_on_first_spawn: return F_TRACE_SOS1;
+ case am_set_on_link: return F_TRACE_SOL;
+ case am_set_on_first_link: return F_TRACE_SOL1;
+ case am_timestamp: return F_TIMESTAMP;
+ case am_running: return F_TRACE_SCHED;
+ case am_exiting: return F_TRACE_SCHED_EXIT;
+ case am_garbage_collection: return F_TRACE_GC;
+ case am_call: return F_TRACE_CALLS;
+ case am_arity: return F_TRACE_ARITY_ONLY;
+ case am_return_to: return F_TRACE_RETURN_TO;
+ case am_silent: return F_TRACE_SILENT;
+ case am_scheduler_id: return F_TRACE_SCHED_NO;
+ case am_running_ports: return F_TRACE_SCHED_PORTS;
+ case am_running_procs: return F_TRACE_SCHED_PROCS;
+ case am_ports: return F_TRACE_PORTS;
+ default: return 0;
+ }
+}
+
+/* Scan the argument list and sort out the trace flags.
+**
+** Returns !0 on success, 0 on failure.
+**
+** Sets the result variables on success, if their flags has
+** occurred in the argument list.
+*/
+int
+erts_trace_flags(Eterm List,
+ Uint *pMask, Eterm *pTracer, int *pCpuTimestamp)
+{
+ Eterm list = List;
+ Uint mask = 0;
+ Eterm tracer = NIL;
+ int cpu_timestamp = 0;
+
+ while (is_list(list)) {
+ Uint bit;
+ Eterm item = CAR(list_val(list));
+ if (is_atom(item) && (bit = erts_trace_flag2bit(item))) {
+ mask |= bit;
+#ifdef HAVE_ERTS_NOW_CPU
+ } else if (item == am_cpu_timestamp) {
+ cpu_timestamp = !0;
+#endif
+ } else if (is_tuple(item)) {
+ Eterm* tp = tuple_val(item);
+
+ if (arityval(tp[0]) != 2 || tp[1] != am_tracer) goto error;
+ if (is_internal_pid(tp[2]) || is_internal_port(tp[2])) {
+ tracer = tp[2];
+ } else goto error;
+ } else goto error;
+ list = CDR(list_val(list));
+ }
+ if (is_not_nil(list)) goto error;
+
+ if (pMask && mask) *pMask = mask;
+ if (pTracer && tracer != NIL) *pTracer = tracer;
+ if (pCpuTimestamp && cpu_timestamp) *pCpuTimestamp = cpu_timestamp;
+ return !0;
+ error:
+ return 0;
+}
+
+Eterm
+trace_3(Process* p, Eterm pid_spec, Eterm how, Eterm list)
+{
+ int on;
+ Eterm tracer = NIL;
+ int matches = 0;
+ Uint mask = 0;
+ int cpu_ts = 0;
+#ifdef ERTS_SMP
+ int system_blocked = 0;
+#endif
+
+ if (! erts_trace_flags(list, &mask, &tracer, &cpu_ts)) {
+ BIF_ERROR(p, BADARG);
+ }
+
+ if (is_nil(tracer) || is_internal_pid(tracer)) {
+ Process *tracer_proc = erts_pid2proc(p,
+ ERTS_PROC_LOCK_MAIN,
+ is_nil(tracer) ? p->id : tracer,
+ ERTS_PROC_LOCKS_ALL);
+ if (!tracer_proc)
+ goto error;
+ tracer_proc->trace_flags |= F_TRACER;
+ erts_smp_proc_unlock(tracer_proc,
+ (tracer_proc == p
+ ? ERTS_PROC_LOCKS_ALL_MINOR
+ : ERTS_PROC_LOCKS_ALL));
+ } else if (is_internal_port(tracer)) {
+ Port *tracer_port = erts_id2port(tracer, p, ERTS_PROC_LOCK_MAIN);
+ if (!erts_is_valid_tracer_port(tracer)) {
+ if (tracer_port)
+ erts_smp_port_unlock(tracer_port);
+ goto error;
+ }
+ tracer_port->trace_flags |= F_TRACER;
+ erts_smp_port_unlock(tracer_port);
+ } else
+ goto error;
+
+ switch (how) {
+ case am_false:
+ on = 0;
+ break;
+ case am_true:
+ on = 1;
+ if (is_nil(tracer))
+ tracer = p->id;
+ break;
+ default:
+ goto error;
+ }
+
+ /*
+ * Set/reset the call trace flag for the given Pids.
+ */
+
+ if (is_port(pid_spec)) {
+ Port *tracee_port;
+
+#ifdef HAVE_ERTS_NOW_CPU
+ if (cpu_ts) {
+ goto error;
+ }
+#endif
+
+ if (pid_spec == tracer)
+ goto error;
+
+ tracee_port = erts_id2port(pid_spec, p, ERTS_PROC_LOCK_MAIN);
+ if (!tracee_port)
+ goto error;
+
+ if (tracer != NIL && port_already_traced(p, tracee_port, tracer)) {
+ erts_smp_port_unlock(tracee_port);
+ goto already_traced;
+ }
+
+ if (on)
+ tracee_port->trace_flags |= mask;
+ else
+ tracee_port->trace_flags &= ~mask;
+
+ if (!tracee_port->trace_flags)
+ tracee_port->tracer_proc = NIL;
+ else if (tracer != NIL)
+ tracee_port->tracer_proc = tracer;
+
+ erts_smp_port_unlock(tracee_port);
+
+ matches = 1;
+ } else if (is_pid(pid_spec)) {
+ Process *tracee_p;
+
+#ifdef HAVE_ERTS_NOW_CPU
+ if (cpu_ts) {
+ goto error;
+ }
+#endif
+ /* Check that the tracee is not dead, not tracing
+ * and not about to be tracing.
+ */
+
+ if (pid_spec == tracer)
+ goto error;
+
+ tracee_p = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
+ pid_spec, ERTS_PROC_LOCKS_ALL);
+ if (!tracee_p)
+ goto error;
+
+ if (tracer != NIL && already_traced(p, tracee_p, tracer)) {
+ erts_smp_proc_unlock(tracee_p,
+ (tracee_p == p
+ ? ERTS_PROC_LOCKS_ALL_MINOR
+ : ERTS_PROC_LOCKS_ALL));
+ goto already_traced;
+ }
+
+ if (on)
+ tracee_p->trace_flags |= mask;
+ else
+ tracee_p->trace_flags &= ~mask;
+
+ if ((tracee_p->trace_flags & TRACEE_FLAGS) == 0)
+ tracee_p->tracer_proc = NIL;
+ else if (tracer != NIL)
+ tracee_p->tracer_proc = tracer;
+
+ erts_smp_proc_unlock(tracee_p,
+ (tracee_p == p
+ ? ERTS_PROC_LOCKS_ALL_MINOR
+ : ERTS_PROC_LOCKS_ALL));
+
+ matches = 1;
+ } else {
+ int ok = 0;
+
+#ifdef HAVE_ERTS_NOW_CPU
+ if (cpu_ts) {
+ if (pid_spec == am_all) {
+ if (on) {
+ if (!erts_cpu_timestamp) {
+#ifdef HAVE_CLOCK_GETTIME
+ /*
+ Perhaps clock_gettime was found during config
+ on a different machine than this. We check
+ if it works here and now, then don't bother
+ about checking return value for error later.
+ */
+ {
+ SysCpuTime start, stop;
+ SysTimespec tp;
+ int i;
+
+ if (sys_get_proc_cputime(start, tp) < 0)
+ goto error;
+ start = ((SysCpuTime)tp.tv_sec * 1000000000LL) +
+ (SysCpuTime)tp.tv_nsec;
+ for (i = 0; i < 100; i++)
+ sys_get_proc_cputime(stop, tp);
+ stop = ((SysCpuTime)tp.tv_sec * 1000000000LL) +
+ (SysCpuTime)tp.tv_nsec;
+ if (start == 0) goto error;
+ if (start == stop) goto error;
+ }
+#else /* HAVE_GETHRVTIME */
+ if (erts_start_now_cpu() < 0) {
+ goto error;
+ }
+#endif /* HAVE_CLOCK_GETTIME */
+ erts_cpu_timestamp = !0;
+ }
+ }
+ } else {
+ goto error;
+ }
+ }
+#endif
+
+ if (pid_spec == am_all || pid_spec == am_existing) {
+ int i;
+ int procs = 0;
+ int ports = 0;
+ int mods = 0;
+
+ if (mask & (ERTS_PROC_TRACEE_FLAGS & ~ERTS_TRACEE_MODIFIER_FLAGS))
+ procs = 1;
+ if (mask & (ERTS_PORT_TRACEE_FLAGS & ~ERTS_TRACEE_MODIFIER_FLAGS))
+ ports = 1;
+ if (mask & ERTS_TRACEE_MODIFIER_FLAGS)
+ mods = 1;
+
+#ifdef ERTS_SMP
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ system_blocked = 1;
+#endif
+
+ ok = 1;
+ if (procs || mods) {
+ /* tracing of processes */
+ for (i = 0; i < erts_max_processes; i++) {
+ Process* tracee_p = process_tab[i];
+
+ if (! tracee_p)
+ continue;
+ if (tracer != NIL) {
+ if (tracee_p->id == tracer)
+ continue;
+ if (already_traced(NULL, tracee_p, tracer))
+ continue;
+ }
+ if (on) {
+ tracee_p->trace_flags |= mask;
+ } else {
+ tracee_p->trace_flags &= ~mask;
+ }
+ if(!(tracee_p->trace_flags & TRACEE_FLAGS)) {
+ tracee_p->tracer_proc = NIL;
+ } else if (tracer != NIL) {
+ tracee_p->tracer_proc = tracer;
+ }
+ matches++;
+ }
+ }
+ if (ports || mods) {
+ /* tracing of ports */
+ for (i = 0; i < erts_max_ports; i++) {
+ Port *tracee_port = &erts_port[i];
+ if (tracee_port->status & ERTS_PORT_SFLGS_DEAD) continue;
+ if (tracer != NIL) {
+ if (tracee_port->id == tracer) continue;
+ if (port_already_traced(NULL, tracee_port, tracer)) continue;
+ }
+
+ if (on) tracee_port->trace_flags |= mask;
+ else tracee_port->trace_flags &= ~mask;
+
+ if (!(tracee_port->trace_flags & TRACEE_FLAGS)) {
+ tracee_port->tracer_proc = NIL;
+ } else if (tracer != NIL) {
+ tracee_port->tracer_proc = tracer;
+ }
+ /* matches are not counted for ports since it would violate compability */
+ /* This could be a reason to modify this function or make a new one. */
+ }
+ }
+ }
+
+ if (pid_spec == am_all || pid_spec == am_new) {
+ Uint def_flags = mask;
+ Eterm def_tracer = tracer;
+
+ ok = 1;
+ erts_change_default_tracing(on, &def_flags, &def_tracer);
+
+#ifdef HAVE_ERTS_NOW_CPU
+ if (cpu_ts && !on) {
+ /* cpu_ts => pid_spec == am_all */
+ if (erts_cpu_timestamp) {
+#ifdef HAVE_GETHRVTIME
+ erts_stop_now_cpu();
+#endif
+ erts_cpu_timestamp = 0;
+ }
+ }
+#endif
+ }
+
+ if (!ok)
+ goto error;
+ }
+
+#ifdef ERTS_SMP
+ if (system_blocked) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ }
+#endif
+
+ BIF_RET(make_small(matches));
+
+ already_traced:
+ erts_send_error_to_logger_str(p->group_leader,
+ "** can only have one tracer per process\n");
+
+ error:
+
+#ifdef ERTS_SMP
+ if (system_blocked) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ }
+#endif
+
+ BIF_ERROR(p, BADARG);
+}
+
+/* Check that the process to be traced is not already traced
+ * by a valid other tracer than the tracer to be.
+ */
+static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
+{
+ /*
+ * SMP build assumes that either system is blocked or:
+ * * main lock is held on c_p
+ * * all locks are held on port tracee_p
+ */
+ if ((tracee_port->trace_flags & TRACEE_FLAGS)
+ && tracee_port->tracer_proc != tracer) {
+ /* This tracee is already being traced, and not by the
+ * tracer to be */
+ if (is_internal_port(tracee_port->tracer_proc)) {
+ if (!erts_is_valid_tracer_port(tracee_port->tracer_proc)) {
+ /* Current trace port now invalid
+ * - discard it and approve the new. */
+ goto remove_tracer;
+ } else
+ return 1;
+ }
+ else if(is_internal_pid(tracee_port->tracer_proc)) {
+ Process *tracer_p = erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
+ tracee_port->tracer_proc, 0);
+ if (!tracer_p) {
+ /* Current trace process now invalid
+ * - discard it and approve the new. */
+ goto remove_tracer;
+ } else
+ return 1;
+ }
+ else {
+ remove_tracer:
+ tracee_port->trace_flags &= ~TRACEE_FLAGS;
+ tracee_port->tracer_proc = NIL;
+ }
+ }
+ return 0;
+}
+
+/* Check that the process to be traced is not already traced
+ * by a valid other tracer than the tracer to be.
+ */
+static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
+{
+ /*
+ * SMP build assumes that either system is blocked or:
+ * * main lock is held on c_p
+ * * all locks multiple are held on tracee_p
+ */
+ if ((tracee_p->trace_flags & TRACEE_FLAGS)
+ && tracee_p->tracer_proc != tracer) {
+ /* This tracee is already being traced, and not by the
+ * tracer to be */
+ if (is_internal_port(tracee_p->tracer_proc)) {
+ if (!erts_is_valid_tracer_port(tracee_p->tracer_proc)) {
+ /* Current trace port now invalid
+ * - discard it and approve the new. */
+ goto remove_tracer;
+ } else
+ return 1;
+ }
+ else if(is_internal_pid(tracee_p->tracer_proc)) {
+ Process *tracer_p = erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
+ tracee_p->tracer_proc, 0);
+ if (!tracer_p) {
+ /* Current trace process now invalid
+ * - discard it and approve the new. */
+ goto remove_tracer;
+ } else
+ return 1;
+ }
+ else {
+ remove_tracer:
+ tracee_p->trace_flags &= ~TRACEE_FLAGS;
+ tracee_p->tracer_proc = NIL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Return information about a process or an external function being traced.
+ */
+
+Eterm
+trace_info_2(Process* p, Eterm What, Eterm Key)
+{
+ Eterm res;
+ if (What == am_on_load) {
+ res = trace_info_on_load(p, Key);
+ } else if (is_atom(What) || is_pid(What)) {
+ res = trace_info_pid(p, What, Key);
+ } else if (is_tuple(What)) {
+ res = trace_info_func(p, What, Key);
+ } else {
+ BIF_ERROR(p, BADARG);
+ }
+ BIF_RET(res);
+}
+
+static Eterm
+trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
+{
+ Eterm tracer;
+ Uint trace_flags;
+ Eterm* hp;
+
+ if (pid_spec == am_new) {
+ erts_get_default_tracing(&trace_flags, &tracer);
+ } else if (is_internal_pid(pid_spec)
+ && internal_pid_index(pid_spec) < erts_max_processes) {
+ Process *tracee;
+ tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
+ pid_spec, ERTS_PROC_LOCKS_ALL);
+
+ if (!tracee) {
+ return am_undefined;
+ } else {
+ tracer = tracee->tracer_proc;
+ trace_flags = tracee->trace_flags;
+ }
+
+ if (is_internal_pid(tracer)) {
+ if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, tracer, 0)) {
+ reset_tracer:
+ tracee->trace_flags &= ~TRACEE_FLAGS;
+ trace_flags = tracee->trace_flags;
+ tracer = tracee->tracer_proc = NIL;
+ }
+ }
+ else if (is_internal_port(tracer)) {
+ if (!erts_is_valid_tracer_port(tracer))
+ goto reset_tracer;
+ }
+#ifdef ERTS_SMP
+ erts_smp_proc_unlock(tracee,
+ (tracee == p
+ ? ERTS_PROC_LOCKS_ALL_MINOR
+ : ERTS_PROC_LOCKS_ALL));
+#endif
+ } else if (is_external_pid(pid_spec)
+ && external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
+ return am_undefined;
+ } else {
+ error:
+ BIF_ERROR(p, BADARG);
+ }
+
+ if (key == am_flags) {
+ int num_flags = 19; /* MAXIMUM number of flags. */
+ Uint needed = 3+2*num_flags;
+ Eterm flag_list = NIL;
+ Eterm* limit;
+
+#define FLAG0(flag_mask,flag) \
+ if (trace_flags & (flag_mask)) { flag_list = CONS(hp, flag, flag_list); hp += 2; } else {}
+
+#if defined(DEBUG)
+ /*
+ * Check num_flags if this assertion fires.
+ */
+# define FLAG ASSERT(num_flags-- > 0); FLAG0
+#else
+# define FLAG FLAG0
+#endif
+ hp = HAlloc(p, needed);
+ limit = hp+needed;
+ FLAG(F_TRACE_SEND, am_send);
+ FLAG(F_TRACE_RECEIVE, am_receive);
+ FLAG(F_TRACE_SOS, am_set_on_spawn);
+ FLAG(F_TRACE_CALLS, am_call);
+ FLAG(F_TRACE_PROCS, am_procs);
+ FLAG(F_TRACE_SOS1, am_set_on_first_spawn);
+ FLAG(F_TRACE_SOL, am_set_on_link);
+ FLAG(F_TRACE_SOL1, am_set_on_first_link);
+ FLAG(F_TRACE_SCHED, am_running);
+ FLAG(F_TRACE_SCHED_EXIT, am_exiting);
+ FLAG(F_TRACE_GC, am_garbage_collection);
+ FLAG(F_TIMESTAMP, am_timestamp);
+ FLAG(F_TRACE_ARITY_ONLY, am_arity);
+ FLAG(F_TRACE_RETURN_TO, am_return_to);
+ FLAG(F_TRACE_SILENT, am_silent);
+ FLAG(F_TRACE_SCHED_NO, am_scheduler_id);
+ FLAG(F_TRACE_PORTS, am_ports);
+ FLAG(F_TRACE_SCHED_PORTS, am_running_ports);
+ FLAG(F_TRACE_SCHED_PROCS, am_running_procs);
+#undef FLAG0
+#undef FLAG
+ HRelease(p,limit,hp+3);
+ return TUPLE2(hp, key, flag_list);
+ } else if (key == am_tracer) {
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, tracer); /* Local pid or port */
+ } else {
+ goto error;
+ }
+}
+
+#define FUNC_TRACE_NOEXIST 0
+#define FUNC_TRACE_UNTRACED (1<<0)
+#define FUNC_TRACE_GLOBAL_TRACE (1<<1)
+#define FUNC_TRACE_LOCAL_TRACE (1<<2)
+#define FUNC_TRACE_META_TRACE (1<<3)
+#define FUNC_TRACE_COUNT_TRACE (1<<4)
+/*
+ * Returns either FUNC_TRACE_NOEXIST, FUNC_TRACE_UNTRACED,
+ * FUNC_TRACE_GLOBAL_TRACE, or,
+ * an or'ed combination of at least one of FUNC_TRACE_LOCAL_TRACE,
+ * FUNC_TRACE_META_TRACE, FUNC_TRACE_COUNT_TRACE.
+ *
+ * If the return value contains FUNC_TRACE_GLOBAL_TRACE
+ * or FUNC_TRACE_LOCAL_TRACE *ms is set.
+ *
+ * If the return value contains FUNC_TRACE_META_TRACE,
+ * *ms_meta or *tracer_pid_meta is set.
+ *
+ * If the return value contains FUNC_TRACE_COUNT_TRACE, *count is set.
+ */
+static int function_is_traced(Eterm mfa[3],
+ Binary **ms, /* out */
+ Binary **ms_meta, /* out */
+ Eterm *tracer_pid_meta, /* out */
+ Sint *count) /* out */
+{
+ Export e;
+ Export* ep;
+ int i;
+ Uint *code;
+
+ /* First look for an export entry */
+ e.code[0] = mfa[0];
+ e.code[1] = mfa[1];
+ e.code[2] = mfa[2];
+ if ((ep = export_get(&e)) != NULL) {
+ if (ep->address == ep->code+3 &&
+ ep->code[3] != (Uint) em_call_error_handler) {
+ if (ep->code[3] == (Uint) em_call_traced_function) {
+ *ms = ep->match_prog_set;
+ return FUNC_TRACE_GLOBAL_TRACE;
+ }
+ if (ep->code[3] == (Uint) em_apply_bif) {
+ for (i = 0; i < BIF_SIZE; ++i) {
+ if (bif_export[i] == ep) {
+ int r = 0;
+
+ if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
+ *ms = ep->match_prog_set;
+ return FUNC_TRACE_GLOBAL_TRACE;
+ } else {
+ if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
+ r |= FUNC_TRACE_LOCAL_TRACE;
+ *ms = ep->match_prog_set;
+ }
+ if (erts_is_mtrace_bif(ep->code+3, ms_meta,
+ tracer_pid_meta)) {
+ r |= FUNC_TRACE_META_TRACE;
+ }
+ }
+ return r ? r : FUNC_TRACE_UNTRACED;
+ }
+ }
+ erl_exit(1,"Impossible ghost bif encountered in trace_info.");
+ }
+ }
+ }
+
+ /* OK, now look for breakpoint tracing */
+ if ((code = erts_find_local_func(mfa)) != NULL) {
+ int r =
+ (erts_is_trace_break(code, ms, NULL)
+ ? FUNC_TRACE_LOCAL_TRACE : 0)
+ | (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta)
+ ? FUNC_TRACE_META_TRACE : 0)
+ | (erts_is_count_break(code, count)
+ ? FUNC_TRACE_COUNT_TRACE : 0);
+
+ return r ? r : FUNC_TRACE_UNTRACED;
+ }
+ return FUNC_TRACE_NOEXIST;
+}
+
+static Eterm
+trace_info_func(Process* p, Eterm func_spec, Eterm key)
+{
+ Eterm* tp;
+ Eterm* hp;
+ Eterm mfa[3];
+ Binary *ms = NULL, *ms_meta = NULL;
+ Sint count = 0;
+ Eterm traced = am_false;
+ Eterm match_spec = am_false;
+ Eterm retval = am_false;
+ Eterm meta = am_false;
+ int r;
+
+ if (!is_tuple(func_spec)) {
+ goto error;
+ }
+ tp = tuple_val(func_spec);
+ if (tp[0] != make_arityval(3)) {
+ goto error;
+ }
+ if (!is_atom(tp[1]) || !is_atom(tp[2]) || !is_small(tp[3])) {
+ goto error;
+ }
+ mfa[0] = tp[1];
+ mfa[1] = tp[2];
+ mfa[2] = signed_val(tp[3]);
+
+ r = function_is_traced(mfa, &ms, &ms_meta, &meta, &count);
+ switch (r) {
+ case FUNC_TRACE_NOEXIST:
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, am_undefined);
+ case FUNC_TRACE_UNTRACED:
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, am_false);
+ case FUNC_TRACE_GLOBAL_TRACE:
+ traced = am_global;
+ match_spec = NIL; /* Fix up later if it's asked for*/
+ break;
+ default:
+ if (r & FUNC_TRACE_LOCAL_TRACE) {
+ traced = am_local;
+ match_spec = NIL; /* Fix up later if it's asked for*/
+ }
+ break;
+ }
+
+ switch (key) {
+ case am_traced:
+ retval = traced;
+ break;
+ case am_match_spec:
+ if (ms) {
+ match_spec = MatchSetGetSource(ms);
+ match_spec = copy_object(match_spec, p);
+ }
+ retval = match_spec;
+ break;
+ case am_meta:
+ retval = meta;
+ break;
+ case am_meta_match_spec:
+ if (r & FUNC_TRACE_META_TRACE) {
+ if (ms_meta) {
+ retval = MatchSetGetSource(ms_meta);
+ retval = copy_object(retval, p);
+ } else {
+ retval = NIL;
+ }
+ }
+ break;
+ case am_call_count:
+ if (r & FUNC_TRACE_COUNT_TRACE) {
+ retval = count < 0 ?
+ erts_make_integer(-count-1, p) :
+ erts_make_integer(count, p);
+ }
+ break;
+ case am_all: {
+ Eterm match_spec_meta = am_false, c = am_false, t;
+
+ if (ms) {
+ match_spec = MatchSetGetSource(ms);
+ match_spec = copy_object(match_spec, p);
+ }
+ if (r & FUNC_TRACE_META_TRACE) {
+ if (ms_meta) {
+ match_spec_meta = MatchSetGetSource(ms_meta);
+ match_spec_meta = copy_object(match_spec_meta, p);
+ } else
+ match_spec_meta = NIL;
+ }
+ if (r & FUNC_TRACE_COUNT_TRACE) {
+ c = count < 0 ?
+ erts_make_integer(-count-1, p) :
+ erts_make_integer(count, p);
+ }
+ hp = HAlloc(p, (3+2)*5);
+ retval = NIL;
+ t = TUPLE2(hp, am_call_count, c); hp += 3;
+ retval = CONS(hp, t, retval); hp += 2;
+ t = TUPLE2(hp, am_meta_match_spec, match_spec_meta); hp += 3;
+ retval = CONS(hp, t, retval); hp += 2;
+ t = TUPLE2(hp, am_meta, meta); hp += 3;
+ retval = CONS(hp, t, retval); hp += 2;
+ t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
+ retval = CONS(hp, t, retval); hp += 2;
+ t = TUPLE2(hp, am_traced, traced); hp += 3;
+ retval = CONS(hp, t, retval); hp += 2;
+ } break;
+ default:
+ goto error;
+ }
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, retval);
+
+ error:
+ BIF_ERROR(p, BADARG);
+}
+
+static Eterm
+trace_info_on_load(Process* p, Eterm key)
+{
+ Eterm* hp;
+
+ if (! erts_default_trace_pattern_is_on) {
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, am_false);
+ }
+ switch (key) {
+ case am_traced:
+ {
+ Eterm traced = am_false;
+
+ if (! erts_default_trace_pattern_flags.breakpoint) {
+ traced = am_global;
+ } else if (erts_default_trace_pattern_flags.local) {
+ traced = am_local;
+ }
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, traced);
+ }
+ case am_match_spec:
+ {
+ Eterm match_spec = am_false;
+
+ if ((! erts_default_trace_pattern_flags.breakpoint) ||
+ erts_default_trace_pattern_flags.local) {
+ if (erts_default_match_spec) {
+ match_spec = MatchSetGetSource(erts_default_match_spec);
+ match_spec = copy_object(match_spec, p);
+ hp = HAlloc(p, 3);
+ } else {
+ match_spec = NIL;
+ hp = HAlloc(p, 3);
+ }
+ } else {
+ hp = HAlloc(p, 3);
+ }
+ return TUPLE2(hp, key, match_spec);
+ }
+ case am_meta:
+ hp = HAlloc(p, 3);
+ if (erts_default_trace_pattern_flags.meta) {
+ return TUPLE2(hp, key, erts_default_meta_tracer_pid);
+ } else {
+ return TUPLE2(hp, key, am_false);
+ }
+ case am_meta_match_spec:
+ {
+ Eterm match_spec = am_false;
+
+ if (erts_default_trace_pattern_flags.meta) {
+ if (erts_default_meta_match_spec) {
+ match_spec =
+ MatchSetGetSource(erts_default_meta_match_spec);
+ match_spec = copy_object(match_spec, p);
+ hp = HAlloc(p, 3);
+ } else {
+ match_spec = NIL;
+ hp = HAlloc(p, 3);
+ }
+ } else {
+ hp = HAlloc(p, 3);
+ }
+ return TUPLE2(hp, key, match_spec);
+ }
+ case am_call_count:
+ hp = HAlloc(p, 3);
+ if (erts_default_trace_pattern_flags.call_count) {
+ return TUPLE2(hp, key, am_true);
+ } else {
+ return TUPLE2(hp, key, am_false);
+ }
+ case am_all:
+ {
+ Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t;
+
+ if (erts_default_trace_pattern_flags.local ||
+ (! erts_default_trace_pattern_flags.breakpoint)) {
+ match_spec = NIL;
+ }
+ if (erts_default_match_spec) {
+ match_spec = MatchSetGetSource(erts_default_match_spec);
+ match_spec = copy_object(match_spec, p);
+ }
+ if (erts_default_trace_pattern_flags.meta) {
+ meta_match_spec = NIL;
+ }
+ if (erts_default_meta_match_spec) {
+ meta_match_spec =
+ MatchSetGetSource(erts_default_meta_match_spec);
+ meta_match_spec = copy_object(meta_match_spec, p);
+ }
+ hp = HAlloc(p, (3+2)*5 + 3);
+ t = TUPLE2(hp, am_call_count,
+ (erts_default_trace_pattern_flags.call_count
+ ? am_true : am_false)); hp += 3;
+ r = CONS(hp, t, r); hp += 2;
+ t = TUPLE2(hp, am_meta_match_spec, meta_match_spec); hp += 3;
+ r = CONS(hp, t, r); hp += 2;
+ t = TUPLE2(hp, am_meta,
+ (erts_default_trace_pattern_flags.meta
+ ? erts_default_meta_tracer_pid : am_false)); hp += 3;
+ r = CONS(hp, t, r); hp += 2;
+ t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
+ r = CONS(hp, t, r); hp += 2;
+ t = TUPLE2(hp, am_traced,
+ (! erts_default_trace_pattern_flags.breakpoint ?
+ am_global : (erts_default_trace_pattern_flags.local ?
+ am_local : am_false))); hp += 3;
+ r = CONS(hp, t, r); hp += 2;
+ return TUPLE2(hp, key, r);
+ }
+ default:
+ BIF_ERROR(p, BADARG);
+ }
+}
+
+#undef FUNC_TRACE_NOEXIST
+#undef FUNC_TRACE_UNTRACED
+#undef FUNC_TRACE_GLOBAL_TRACE
+#undef FUNC_TRACE_LOCAL_TRACE
+
+int
+erts_set_trace_pattern(Eterm* mfa, int specified,
+ Binary* match_prog_set, Binary *meta_match_prog_set,
+ int on, struct trace_pattern_flags flags,
+ Eterm meta_tracer_pid)
+{
+ int matches = 0;
+ int i;
+
+ /*
+ * First work on normal functions (not real BIFs).
+ */
+
+ for (i = 0; i < export_list_size(); i++) {
+ Export* ep = export_list(i);
+ int j;
+
+ if (ExportIsBuiltIn(ep)) {
+ continue;
+ }
+
+ for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
+ /* Empty loop body */
+ }
+ if (j == specified) {
+ if (on) {
+ if (! flags.breakpoint)
+ matches += setup_func_trace(ep, match_prog_set);
+ else
+ reset_func_trace(ep);
+ } else if (! flags.breakpoint) {
+ matches += reset_func_trace(ep);
+ }
+ }
+ }
+
+ /*
+ ** OK, now for the bif's
+ */
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ int j;
+
+ if (!ExportIsBuiltIn(ep)) {
+ continue;
+ }
+
+ if (bif_table[i].f == bif_table[i].traced) {
+ /* Trace wrapper same as regular function - untraceable */
+ continue;
+ }
+
+ for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
+ /* Empty loop body */
+ }
+ if (j == specified) {
+ if (! flags.breakpoint) { /* Export entry call trace */
+ if (on) {
+ if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
+ ASSERT(ExportIsBuiltIn(bif_export[i]));
+ erts_clear_mtrace_bif
+ ((Uint *)bif_export[i]->code + 3);
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
+ }
+ set_trace_bif(i, match_prog_set);
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
+ erts_bif_trace_flags[i] |= BIF_TRACE_AS_GLOBAL;
+ setup_bif_trace(i);
+ } else { /* off */
+ if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
+ clear_trace_bif(i);
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ }
+ if (! erts_bif_trace_flags[i]) {
+ reset_bif_trace(i);
+ }
+ }
+ matches++;
+ } else { /* Breakpoint call trace */
+ int m = 0;
+
+ if (on) {
+ if (flags.local) {
+ set_trace_bif(i, match_prog_set);
+ erts_bif_trace_flags[i] |= BIF_TRACE_AS_LOCAL;
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ m = 1;
+ }
+ if (flags.meta) {
+ erts_set_mtrace_bif
+ ((Uint *)bif_export[i]->code + 3,
+ meta_match_prog_set, meta_tracer_pid);
+ erts_bif_trace_flags[i] |= BIF_TRACE_AS_META;
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ m = 1;
+ }
+ if (erts_bif_trace_flags[i]) {
+ setup_bif_trace(i);
+ }
+ } else { /* off */
+ if (flags.local) {
+ if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
+ clear_trace_bif(i);
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
+ }
+ m = 1;
+ }
+ if (flags.meta) {
+ if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
+ erts_clear_mtrace_bif
+ ((Uint *)bif_export[i]->code + 3);
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
+ }
+ m = 1;
+ }
+ if (! erts_bif_trace_flags[i]) {
+ reset_bif_trace(i);
+ }
+ }
+ matches += m;
+ }
+ }
+ }
+
+ /*
+ ** So, now for breakpoint tracing
+ */
+ if (on) {
+ if (! flags.breakpoint) {
+ erts_clear_trace_break(mfa, specified);
+ erts_clear_mtrace_break(mfa, specified);
+ erts_clear_count_break(mfa, specified);
+ } else {
+ int m = 0;
+ if (flags.local) {
+ m = erts_set_trace_break(mfa, specified, match_prog_set,
+ am_true);
+ }
+ if (flags.meta) {
+ m = erts_set_mtrace_break(mfa, specified, meta_match_prog_set,
+ meta_tracer_pid);
+ }
+ if (flags.call_count) {
+ m = erts_set_count_break(mfa, specified, on);
+ }
+ /* All assignments to 'm' above should give the same value,
+ * so just use the last */
+ matches += m;
+ }
+ } else {
+ int m = 0;
+ if (flags.local) {
+ m = erts_clear_trace_break(mfa, specified);
+ }
+ if (flags.meta) {
+ m = erts_clear_mtrace_break(mfa, specified);
+ }
+ if (flags.call_count) {
+ m = erts_clear_count_break(mfa, specified);
+ }
+ /* All assignments to 'm' above should give the same value,
+ * so just use the last */
+ matches += m;
+ }
+
+ return matches;
+}
+
+/*
+ * Setup function tracing for the given exported function.
+ *
+ * Return Value: 1 if entry refers to a BIF or loaded function,
+ * 0 if the entry refers to a function not loaded.
+ */
+
+static int
+setup_func_trace(Export* ep, void* match_prog)
+{
+ if (ep->address == ep->code+3) {
+ if (ep->code[3] == (Uint) em_call_error_handler) {
+ return 0;
+ } else if (ep->code[3] == (Uint) em_call_traced_function) {
+ MatchSetUnref(ep->match_prog_set);
+ ep->match_prog_set = match_prog;
+ MatchSetRef(ep->match_prog_set);
+ return 1;
+ } else {
+ /*
+ * We ignore apply/3 and anything else.
+ */
+ return 0;
+ }
+ }
+
+ /*
+ * Currently no trace support for native code.
+ */
+ if (erts_is_native_break(ep->address)) {
+ return 0;
+ }
+
+ ep->code[3] = (Uint) em_call_traced_function;
+ ep->code[4] = (Uint) ep->address;
+ ep->address = ep->code+3;
+ ep->match_prog_set = match_prog;
+ MatchSetRef(ep->match_prog_set);
+ return 1;
+}
+
+static void setup_bif_trace(int bif_index) {
+ Export *ep = bif_export[bif_index];
+
+ ASSERT(ExportIsBuiltIn(ep));
+ ASSERT(ep->code[4]);
+ ep->code[4] = (Uint) bif_table[bif_index].traced;
+}
+
+static void set_trace_bif(int bif_index, void* match_prog) {
+ Export *ep = bif_export[bif_index];
+
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "set_trace_bif: %T:%T/%bpu\n",
+ ep->code[0], ep->code[1], ep->code[2]);
+#endif
+ ASSERT(ExportIsBuiltIn(ep));
+ MatchSetUnref(ep->match_prog_set);
+ ep->match_prog_set = match_prog;
+ MatchSetRef(ep->match_prog_set);
+}
+
+/*
+ * Reset function tracing for the given exported function.
+ *
+ * Return Value: 1 if entry refers to a BIF or loaded function,
+ * 0 if the entry refers to a function not loaded.
+ */
+
+static int
+reset_func_trace(Export* ep)
+{
+ if (ep->address == ep->code+3) {
+ if (ep->code[3] == (Uint) em_call_error_handler) {
+ return 0;
+ } else if (ep->code[3] == (Uint) em_call_traced_function) {
+ ep->address = (Uint *) ep->code[4];
+ MatchSetUnref(ep->match_prog_set);
+ ep->match_prog_set = NULL;
+ return 1;
+ } else {
+ /*
+ * We ignore apply/3 and anything else.
+ */
+ return 0;
+ }
+ }
+
+ /*
+ * Currently no trace support for native code.
+ */
+ if (erts_is_native_break(ep->address)) {
+ return 0;
+ }
+
+ /*
+ * Nothing to do, but the export entry matches.
+ */
+
+ return 1;
+}
+
+static void reset_bif_trace(int bif_index) {
+ Export *ep = bif_export[bif_index];
+
+ ASSERT(ExportIsBuiltIn(ep));
+ ASSERT(ep->code[4]);
+ ASSERT(! ep->match_prog_set);
+ ASSERT(! erts_is_mtrace_bif((Uint *)ep->code+3, NULL, NULL));
+ ep->code[4] = (Uint) bif_table[bif_index].f;
+}
+
+static void clear_trace_bif(int bif_index) {
+ Export *ep = bif_export[bif_index];
+
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "clear_trace_bif: %T:%T/%bpu\n",
+ ep->code[0], ep->code[1], ep->code[2]);
+#endif
+ ASSERT(ExportIsBuiltIn(ep));
+ MatchSetUnref(ep->match_prog_set);
+ ep->match_prog_set = NULL;
+}
+
+/*
+ * Sequential tracing
+ *
+ * The sequential trace token is internally implemented as
+ * a tuple
+ * {Flags, Label, Serial, Sender, LastSerial}
+ *
+ * where
+ * - Flags is an integer (using masks 1, 2, and 4, for send,
+ * receive and print, respectively),
+ * - Label is any term, Serial (for now XXX) is an integer (it should
+ * be a list reflecting split traces), and
+ * - Sender is the Pid of the sender (i.e. the current process,
+ * except immediately after a message reception, in case it is
+ * the pid of the process that sent the message).
+ *
+ */
+
+BIF_RETTYPE seq_trace_2(BIF_ALIST_2)
+{
+ Eterm res;
+ res = erts_seq_trace(BIF_P, BIF_ARG_1, BIF_ARG_2, 1);
+ if (is_non_value(res)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(res);
+}
+
+Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
+ int build_result)
+{
+ Eterm flags;
+ Eterm old_value = am_true;
+ Eterm* hp;
+ int current_flag;
+
+ if (!is_atom(arg1)) {
+ return THE_NON_VALUE;
+ }
+
+
+ if (arg1 == am_send) {
+ current_flag = SEQ_TRACE_SEND;
+ } else if (arg1 == am_receive) {
+ current_flag = SEQ_TRACE_RECEIVE;
+ } else if (arg1 == am_print) {
+ current_flag = SEQ_TRACE_PRINT;
+ } else if (arg1 == am_timestamp) {
+ current_flag = SEQ_TRACE_TIMESTAMP;
+ }
+ else
+ current_flag = 0;
+
+ if (current_flag && ( (arg2 == am_true) || (arg2 == am_false)) ) {
+ /* Flags */
+ new_seq_trace_token(p);
+ flags = unsigned_val(SEQ_TRACE_TOKEN_FLAGS(p));
+ if (build_result) {
+ old_value = flags & current_flag ? am_true : am_false;
+ }
+ if (arg2 == am_true)
+ SEQ_TRACE_TOKEN_FLAGS(p) = make_small(flags|current_flag);
+ else if (arg2 == am_false)
+ SEQ_TRACE_TOKEN_FLAGS(p) = make_small(flags&~current_flag);
+ else {
+ return THE_NON_VALUE;
+ }
+ return old_value;
+ }
+ else if (arg1 == am_label) {
+ if (! is_small(arg2)) {
+ return THE_NON_VALUE;
+ }
+ new_seq_trace_token(p);
+ if (build_result) {
+ old_value = SEQ_TRACE_TOKEN_LABEL(p);
+ }
+ SEQ_TRACE_TOKEN_LABEL(p) = arg2;
+ return old_value;
+ }
+ else if (arg1 == am_serial) {
+ Eterm* tp;
+ if (is_not_tuple(arg2)) {
+ return THE_NON_VALUE;
+ }
+ tp = tuple_val(arg2);
+ if ((*tp != make_arityval(2)) || is_not_small(*(tp+1)) || is_not_small(*(tp+2))) {
+ return THE_NON_VALUE;
+ }
+ new_seq_trace_token(p);
+ if (build_result) {
+ hp = HAlloc(p,3);
+ old_value = TUPLE2(hp, SEQ_TRACE_TOKEN_LASTCNT(p),
+ SEQ_TRACE_TOKEN_SERIAL(p));
+ }
+ SEQ_TRACE_TOKEN_LASTCNT(p) = *(tp+1);
+ SEQ_TRACE_TOKEN_SERIAL(p) = *(tp+2);
+ p->seq_trace_clock = unsigned_val(*(tp+2));
+ p->seq_trace_lastcnt = unsigned_val(*(tp+1));
+ return old_value;
+ }
+ else if (arg1 == am_sequential_trace_token) {
+ if (is_not_nil(arg2)) {
+ return THE_NON_VALUE;
+ }
+ if (build_result) {
+ old_value = SEQ_TRACE_TOKEN(p);
+ }
+ SEQ_TRACE_TOKEN(p) = NIL;
+ return old_value;
+ }
+ else {
+ return THE_NON_VALUE;
+ }
+}
+
+void
+new_seq_trace_token(Process* p)
+{
+ Eterm* hp;
+
+ if (SEQ_TRACE_TOKEN(p) == NIL) {
+ hp = HAlloc(p, 6);
+ SEQ_TRACE_TOKEN(p) = TUPLE5(hp, make_small(0), /* Flags */
+ make_small(0), /* Label */
+ make_small(0), /* Serial */
+ p->id, /* Internal pid */ /* From */
+ make_small(p->seq_trace_lastcnt));
+ }
+}
+
+BIF_RETTYPE seq_trace_info_1(BIF_ALIST_1)
+{
+ Eterm item;
+ Eterm res;
+ Eterm* hp;
+ Uint current_flag;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ item = BIF_ARG_1;
+
+ if (SEQ_TRACE_TOKEN(BIF_P) == NIL) {
+ if ((item == am_send) || (item == am_receive) ||
+ (item == am_print) || (item == am_timestamp)) {
+ hp = HAlloc(BIF_P,3);
+ res = TUPLE2(hp, item, am_false);
+ BIF_RET(res);
+ } else if ((item == am_label) || (item == am_serial)) {
+ BIF_RET(NIL);
+ } else {
+ goto error;
+ }
+ }
+
+ if (BIF_ARG_1 == am_send) {
+ current_flag = SEQ_TRACE_SEND;
+ } else if (BIF_ARG_1 == am_receive) {
+ current_flag = SEQ_TRACE_RECEIVE;
+ } else if (BIF_ARG_1 == am_print) {
+ current_flag = SEQ_TRACE_PRINT;
+ } else if (BIF_ARG_1 == am_timestamp) {
+ current_flag = SEQ_TRACE_TIMESTAMP;
+ } else {
+ current_flag = 0;
+ }
+
+ if (current_flag) {
+ res = unsigned_val(SEQ_TRACE_TOKEN_FLAGS(BIF_P)) & current_flag ?
+ am_true : am_false;
+ } else if (item == am_label) {
+ res = SEQ_TRACE_TOKEN_LABEL(BIF_P);
+ } else if (item == am_serial) {
+ hp = HAlloc(BIF_P, 3);
+ res = TUPLE2(hp, SEQ_TRACE_TOKEN_LASTCNT(BIF_P), SEQ_TRACE_TOKEN_SERIAL(BIF_P));
+ } else {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ hp = HAlloc(BIF_P, 3);
+ res = TUPLE2(hp, item, res);
+ BIF_RET(res);
+}
+
+/*
+ seq_trace_print(Message) -> true | false
+ This function passes Message to the system_tracer
+ if the trace_token is not NIL.
+ Returns true if Message is passed else false
+ Note! That true is returned if the conditions to pass Message is
+ fulfilled, but nothing is passed if system_seq_tracer is not set.
+ */
+BIF_RETTYPE seq_trace_print_1(BIF_ALIST_1)
+{
+ if (SEQ_TRACE_TOKEN(BIF_P) == NIL)
+ BIF_RET(am_false);
+ seq_trace_update_send(BIF_P);
+ seq_trace_output(SEQ_TRACE_TOKEN(BIF_P), BIF_ARG_1,
+ SEQ_TRACE_PRINT, NIL, BIF_P);
+ BIF_RET(am_true);
+}
+
+/*
+ seq_trace_print(Label,Message) -> true | false
+ This function passes Message to the system_tracer
+ if the trace_token is not NIL and the trace_token label is equal to
+ Label. Returns true if Message is passed else false
+ Note! That true is returned if the conditions to pass Message is
+ fulfilled, but nothing is passed if system_seq_tracer is not set.
+ */
+BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2)
+{
+ if (SEQ_TRACE_TOKEN(BIF_P) == NIL)
+ BIF_RET(am_false);
+ if (!(is_atom(BIF_ARG_1) || is_small(BIF_ARG_1))) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (SEQ_TRACE_TOKEN_LABEL(BIF_P) != BIF_ARG_1)
+ BIF_RET(am_false);
+ seq_trace_update_send(BIF_P);
+ seq_trace_output(SEQ_TRACE_TOKEN(BIF_P), BIF_ARG_2,
+ SEQ_TRACE_PRINT, NIL, BIF_P);
+ BIF_RET(am_true);
+}
+
+void erts_system_monitor_clear(Process *c_p) {
+#ifdef ERTS_SMP
+ if (c_p) {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ }
+#endif
+ erts_set_system_monitor(NIL);
+ erts_system_monitor_long_gc = 0;
+ erts_system_monitor_large_heap = 0;
+ erts_system_monitor_flags.busy_port = 0;
+ erts_system_monitor_flags.busy_dist_port = 0;
+#ifdef ERTS_SMP
+ if (c_p) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+#endif
+}
+
+
+static Eterm system_monitor_get(Process *p)
+{
+ Eterm *hp;
+ Eterm system_monitor = erts_get_system_monitor();
+
+ if (system_monitor == NIL) {
+ return am_undefined;
+ } else {
+ Eterm res;
+ Uint hsz = 3 + (erts_system_monitor_flags.busy_dist_port ? 2 : 0) +
+ (erts_system_monitor_flags.busy_port ? 2 : 0);
+ Eterm long_gc = NIL;
+ Eterm large_heap = NIL;
+
+ if (erts_system_monitor_long_gc != 0) {
+ hsz += 2+3;
+ (void) erts_bld_uint(NULL, &hsz, erts_system_monitor_long_gc);
+ }
+ if (erts_system_monitor_large_heap != 0) {
+ hsz += 2+3;
+ (void) erts_bld_uint(NULL, &hsz, erts_system_monitor_large_heap);
+ }
+
+ hp = HAlloc(p, hsz);
+ if (erts_system_monitor_long_gc != 0) {
+ long_gc = erts_bld_uint(&hp, NULL, erts_system_monitor_long_gc);
+ }
+ if (erts_system_monitor_large_heap != 0) {
+ large_heap = erts_bld_uint(&hp, NULL, erts_system_monitor_large_heap);
+ }
+ res = NIL;
+ if (long_gc != NIL) {
+ Eterm t = TUPLE2(hp, am_long_gc, long_gc); hp += 3;
+ res = CONS(hp, t, res); hp += 2;
+ }
+ if (large_heap != NIL) {
+ Eterm t = TUPLE2(hp, am_large_heap, large_heap); hp += 3;
+ res = CONS(hp, t, res); hp += 2;
+ }
+ if (erts_system_monitor_flags.busy_port) {
+ res = CONS(hp, am_busy_port, res); hp += 2;
+ }
+ if (erts_system_monitor_flags.busy_dist_port) {
+ res = CONS(hp, am_busy_dist_port, res); hp += 2;
+ }
+ return TUPLE2(hp, system_monitor, res);
+ }
+}
+
+
+BIF_RETTYPE system_monitor_0(Process *p) {
+ BIF_RET(system_monitor_get(p));
+}
+
+BIF_RETTYPE system_monitor_1(Process *p, Eterm spec) {
+ if (spec == am_undefined) {
+ BIF_RET(system_monitor_2(p, spec, NIL));
+ } else if (is_tuple(spec)) {
+ Eterm *tp = tuple_val(spec);
+ if (tp[0] != make_arityval(2)) goto error;
+ BIF_RET(system_monitor_2(p, tp[1], tp[2]));
+ }
+ error:
+ BIF_ERROR(p, BADARG);
+}
+
+BIF_RETTYPE system_monitor_2(Process *p, Eterm monitor_pid, Eterm list) {
+ Eterm prev;
+ int system_blocked = 0;
+
+ if (monitor_pid == am_undefined || list == NIL) {
+ prev = system_monitor_get(p);
+ erts_system_monitor_clear(p);
+ BIF_RET(prev);
+ }
+ if (is_not_list(list)) goto error;
+ else {
+ Uint long_gc, large_heap;
+ int busy_port, busy_dist_port;
+
+ system_blocked = 1;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0))
+ goto error;
+
+ for (long_gc = 0, large_heap = 0, busy_port = 0, busy_dist_port = 0;
+ is_list(list);
+ list = CDR(list_val(list))) {
+ Eterm t = CAR(list_val(list));
+ if (is_tuple(t)) {
+ Eterm *tp = tuple_val(t);
+ if (arityval(tp[0]) != 2) goto error;
+ if (tp[1] == am_long_gc) {
+ if (! term_to_Uint(tp[2], &long_gc)) goto error;
+ if (long_gc < 1) long_gc = 1;
+ } else if (tp[1] == am_large_heap) {
+ if (! term_to_Uint(tp[2], &large_heap)) goto error;
+ if (large_heap < 16384) large_heap = 16384;
+ /* 16 Kword is not an unnatural heap size */
+ } else goto error;
+ } else if (t == am_busy_port) {
+ busy_port = !0;
+ } else if (t == am_busy_dist_port) {
+ busy_dist_port = !0;
+ } else goto error;
+ }
+ if (is_not_nil(list)) goto error;
+ prev = system_monitor_get(p);
+ erts_set_system_monitor(monitor_pid);
+ erts_system_monitor_long_gc = long_gc;
+ erts_system_monitor_large_heap = large_heap;
+ erts_system_monitor_flags.busy_port = !!busy_port;
+ erts_system_monitor_flags.busy_dist_port = !!busy_dist_port;
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(prev);
+ }
+
+ error:
+
+ if (system_blocked) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ }
+
+ BIF_ERROR(p, BADARG);
+}
+
+/* Begin: Trace for System Profiling */
+
+void erts_system_profile_clear(Process *c_p) {
+#ifdef ERTS_SMP
+ if (c_p) {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ }
+#endif
+ erts_set_system_profile(NIL);
+ erts_system_profile_flags.scheduler = 0;
+ erts_system_profile_flags.runnable_procs = 0;
+ erts_system_profile_flags.runnable_ports = 0;
+ erts_system_profile_flags.exclusive = 0;
+#ifdef ERTS_SMP
+ if (c_p) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+#endif
+}
+
+static Eterm system_profile_get(Process *p) {
+ Eterm *hp;
+ Eterm system_profile = erts_get_system_profile();
+ if (system_profile == NIL) {
+ return am_undefined;
+ } else {
+ Eterm res;
+ Uint hsz = 3
+ + (erts_system_profile_flags.scheduler ? 2 : 0)
+ + (erts_system_profile_flags.runnable_ports ? 2 : 0)
+ + (erts_system_profile_flags.exclusive ? 2 : 0)
+ + (erts_system_profile_flags.runnable_procs ? 2 : 0);
+
+ hp = HAlloc(p, hsz);
+ res = NIL;
+ if (erts_system_profile_flags.runnable_ports) {
+ res = CONS(hp, am_runnable_ports, res); hp += 2;
+ }
+ if (erts_system_profile_flags.runnable_procs) {
+ res = CONS(hp, am_runnable_procs, res); hp += 2;
+ }
+ if (erts_system_profile_flags.scheduler) {
+ res = CONS(hp, am_scheduler, res); hp += 2;
+ }
+ if (erts_system_profile_flags.exclusive) {
+ res = CONS(hp, am_exclusive, res); hp += 2;
+ }
+ return TUPLE2(hp, system_profile, res);
+ }
+}
+
+BIF_RETTYPE system_profile_0(Process *p) {
+ BIF_RET(system_profile_get(p));
+}
+
+BIF_RETTYPE system_profile_2(Process *p, Eterm profiler, Eterm list) {
+ Eterm prev;
+ int system_blocked = 0;
+ Process *profiler_p = NULL;
+ Port *profiler_port = NULL;
+
+ if (profiler == am_undefined || list == NIL) {
+ prev = system_profile_get(p);
+ erts_system_profile_clear(p);
+ BIF_RET(prev);
+ }
+ if (is_not_list(list)) {
+ goto error;
+ } else {
+ int scheduler, runnable_procs, runnable_ports, exclusive;
+ system_blocked = 1;
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ /* Check if valid process, no locks are taken */
+
+ if (is_internal_pid(profiler)) {
+ if (internal_pid_index(profiler) >= erts_max_processes) goto error;
+ profiler_p = process_tab[internal_pid_index(profiler)];
+ if (INVALID_PID(profiler_p, profiler)) goto error;
+ } else if (is_internal_port(profiler)) {
+ if (internal_port_index(profiler) >= erts_max_ports) goto error;
+ profiler_port = &erts_port[internal_port_index(profiler)];
+ if (INVALID_TRACER_PORT(profiler_port, profiler)) goto error;
+ } else {
+ goto error;
+ }
+
+ for (scheduler = 0, runnable_ports = 0, runnable_procs = 0, exclusive = 0;
+ is_list(list);
+ list = CDR(list_val(list))) {
+
+ Eterm t = CAR(list_val(list));
+ if (t == am_runnable_procs) {
+ runnable_procs = !0;
+ } else if (t == am_runnable_ports) {
+ runnable_ports = !0;
+ } else if (t == am_exclusive) {
+ exclusive = !0;
+ } else if (t == am_scheduler) {
+ scheduler = !0;
+ } else goto error;
+ }
+ if (is_not_nil(list)) goto error;
+ prev = system_profile_get(p);
+ erts_set_system_profile(profiler);
+
+ erts_system_profile_flags.scheduler = !!scheduler;
+ if (erts_system_profile_flags.scheduler)
+ erts_system_profile_setup_active_schedulers();
+ erts_system_profile_flags.runnable_ports = !!runnable_ports;
+ erts_system_profile_flags.runnable_procs = !!runnable_procs;
+ erts_system_profile_flags.exclusive = !!exclusive;
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+
+ BIF_RET(prev);
+
+ }
+
+ error:
+ if (system_blocked) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ }
+
+ BIF_ERROR(p, BADARG);
+}
+/* End: Trace for System Profiling */
+
+BIF_RETTYPE
+trace_delivered_1(BIF_ALIST_1)
+{
+ DECL_AM(trace_delivered);
+#ifdef ERTS_SMP
+ ErlHeapFragment *bp;
+#else
+ ErtsProcLocks locks = 0;
+#endif
+ Eterm *hp;
+ Eterm msg, ref, msg_ref;
+ Process *p;
+ if (BIF_ARG_1 == am_all) {
+ p = NULL;
+ } else if (! (p = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCKS_ALL))) {
+ if (is_not_internal_pid(BIF_ARG_1)
+ || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ }
+
+ ref = erts_make_ref(BIF_P);
+
+#ifdef ERTS_SMP
+ bp = new_message_buffer(REF_THING_SIZE + 4);
+ hp = &bp->mem[0];
+ msg_ref = STORE_NC(&hp, &bp->off_heap.externals, ref);
+#else
+ hp = HAlloc(BIF_P, 4);
+ msg_ref = ref;
+#endif
+
+ msg = TUPLE3(hp, AM_trace_delivered, BIF_ARG_1, msg_ref);
+
+#ifdef ERTS_SMP
+ erts_send_sys_msg_proc(BIF_P->id, BIF_P->id, msg, bp);
+ if (p)
+ erts_smp_proc_unlock(p,
+ (BIF_P == p
+ ? ERTS_PROC_LOCKS_ALL_MINOR
+ : ERTS_PROC_LOCKS_ALL));
+#else
+ erts_send_message(BIF_P, BIF_P, &locks, msg, ERTS_SND_FLG_NO_SEQ_TRACE);
+#endif
+
+ BIF_RET(ref);
+}
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
new file mode 100644
index 0000000000..dc5539faad
--- /dev/null
+++ b/erts/emulator/beam/erl_binary.h
@@ -0,0 +1,282 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ERL_BINARY_H
+#define __ERL_BINARY_H
+
+#include "erl_threads.h"
+
+/*
+ * Maximum number of bytes to place in a heap binary.
+ */
+
+#define ERL_ONHEAP_BIN_LIMIT 64
+
+/*
+ * This structure represents a SUB_BINARY.
+ *
+ * Note: The last field (orig) is not counted in arityval in the header.
+ * This simplifies garbage collection.
+ */
+
+typedef struct erl_sub_bin {
+ Eterm thing_word; /* Subtag SUB_BINARY_SUBTAG. */
+ Uint size; /* Binary size in bytes. */
+ Uint offs; /* Offset into original binary. */
+ byte bitsize;
+ byte bitoffs;
+ byte is_writable; /* The underlying binary is writable */
+ Eterm orig; /* Original binary (REFC or HEAP binary). */
+} ErlSubBin;
+
+#define ERL_SUB_BIN_SIZE (sizeof(ErlSubBin)/sizeof(Eterm))
+#define HEADER_SUB_BIN _make_header(ERL_SUB_BIN_SIZE-2,_TAG_HEADER_SUB_BIN)
+
+/*
+ * This structure represents a HEAP_BINARY.
+ */
+
+typedef struct erl_heap_bin {
+ Eterm thing_word; /* Subtag HEAP_BINARY_SUBTAG. */
+ Uint size; /* Binary size in bytes. */
+ Eterm data[1]; /* The data in the binary. */
+} ErlHeapBin;
+
+#define heap_bin_size(num_bytes) \
+ (sizeof(ErlHeapBin)/sizeof(Eterm) - 1 + \
+ ((num_bytes)+sizeof(Eterm)-1)/sizeof(Eterm))
+
+#define header_heap_bin(num_bytes) \
+ _make_header(heap_bin_size(num_bytes)-1,_TAG_HEADER_HEAP_BIN)
+
+/*
+ * Get the size in bytes of any type of binary.
+ */
+
+#define binary_size(Bin) (binary_val(Bin)[1])
+
+#define binary_bitsize(Bin) \
+ ((*binary_val(Bin) == HEADER_SUB_BIN) ? \
+ ((ErlSubBin *) binary_val(Bin))->bitsize: \
+ 0)
+
+#define binary_bitoffset(Bin) \
+ ((*binary_val(Bin) == HEADER_SUB_BIN) ? \
+ ((ErlSubBin *) binary_val(Bin))->bitoffs: \
+ 0)
+
+/*
+ * Get the pointer to the actual data bytes in a binary.
+ * Works for any type of binary. Always use binary_bytes() if
+ * you know that the binary cannot be a sub binary.
+ *
+ * Bin: input variable (Eterm)
+ * Bytep: output variable (byte *)
+ * Bitoffs: output variable (Uint)
+ * Bitsize: output variable (Uint)
+ */
+
+#define ERTS_GET_BINARY_BYTES(Bin,Bytep,Bitoffs,Bitsize) \
+do { \
+ Eterm* _real_bin = binary_val(Bin); \
+ Uint _offs = 0; \
+ Bitoffs = Bitsize = 0; \
+ if (*_real_bin == HEADER_SUB_BIN) { \
+ ErlSubBin* _sb = (ErlSubBin *) _real_bin; \
+ _offs = _sb->offs; \
+ Bitoffs = _sb->bitoffs; \
+ Bitsize = _sb->bitsize; \
+ _real_bin = binary_val(_sb->orig); \
+ } \
+ if (*_real_bin == HEADER_PROC_BIN) { \
+ Bytep = ((ProcBin *) _real_bin)->bytes + _offs; \
+ } else { \
+ Bytep = (byte *)(&(((ErlHeapBin *) _real_bin)->data)) + _offs; \
+ } \
+} while (0)
+
+/*
+ * Get the real binary from any binary type, where "real" means
+ * a REFC or HEAP binary. Also get the byte and bit offset into the
+ * real binary. Useful if you want to build a SUB binary from
+ * any binary.
+ *
+ * Bin: Input variable (Eterm)
+ * RealBin: Output variable (Eterm)
+ * ByteOffset: Output variable (Uint)
+ * BitOffset: Offset in bits (Uint)
+ * BitSize: Extra bit size (Uint)
+ */
+
+#define ERTS_GET_REAL_BIN(Bin, RealBin, ByteOffset, BitOffset, BitSize) \
+ do { \
+ ErlSubBin* _sb = (ErlSubBin *) binary_val(Bin); \
+ if (_sb->thing_word == HEADER_SUB_BIN) { \
+ RealBin = _sb->orig; \
+ ByteOffset = _sb->offs; \
+ BitOffset = _sb->bitoffs; \
+ BitSize = _sb->bitsize; \
+ } else { \
+ RealBin = Bin; \
+ ByteOffset = BitOffset = BitSize = 0; \
+ } \
+ } while (0)
+
+/*
+ * Get a pointer to the binary bytes, for a heap or refc binary
+ * (NOT sub binary).
+ */
+#define binary_bytes(Bin) \
+ (*binary_val(Bin) == HEADER_PROC_BIN ? \
+ ((ProcBin *) binary_val(Bin))->bytes : \
+ (ASSERT_EXPR(thing_subtag(*binary_val(Bin)) == HEAP_BINARY_SUBTAG), \
+ (byte *)(&(((ErlHeapBin *) binary_val(Bin))->data))))
+
+void erts_init_binary(void);
+
+byte* erts_get_aligned_binary_bytes(Eterm, byte**);
+
+#if defined(__i386__) || !defined(__GNUC__)
+/*
+ * Doubles aren't required to be 8-byte aligned on intel x86.
+ * (if not gnuc we don't know if __i386__ is defined on x86;
+ * therefore, assume intel x86...)
+ */
+# define ERTS_BIN_ALIGNMENT_MASK ((Uint) 3)
+#else
+# define ERTS_BIN_ALIGNMENT_MASK ((Uint) 7)
+#endif
+
+#define ERTS_CHK_BIN_ALIGNMENT(B) \
+ do { ASSERT(!(B) || (((Uint) &((Binary *)(B))->orig_bytes[0]) & ERTS_BIN_ALIGNMENT_MASK) == ((Uint) 0)) } while(0)
+
+ERTS_GLB_INLINE void erts_free_aligned_binary_bytes(byte* buf);
+ERTS_GLB_INLINE Binary *erts_bin_drv_alloc_fnf(Uint size);
+ERTS_GLB_INLINE Binary *erts_bin_drv_alloc(Uint size);
+ERTS_GLB_INLINE Binary *erts_bin_nrml_alloc(Uint size);
+ERTS_GLB_INLINE Binary *erts_bin_realloc_fnf(Binary *bp, Uint size);
+ERTS_GLB_INLINE Binary *erts_bin_realloc(Binary *bp, Uint size);
+ERTS_GLB_INLINE void erts_bin_free(Binary *bp);
+ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size,
+ void (*destructor)(Binary *));
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_free_aligned_binary_bytes(byte* buf)
+{
+ if (buf) {
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ }
+}
+
+ERTS_GLB_INLINE Binary *
+erts_bin_drv_alloc_fnf(Uint size)
+{
+ Uint bsize = sizeof(Binary) - 1 + size;
+ void *res;
+ res = erts_alloc_fnf(ERTS_ALC_T_DRV_BINARY, bsize);
+ ERTS_CHK_BIN_ALIGNMENT(res);
+ return (Binary *) res;
+}
+
+ERTS_GLB_INLINE Binary *
+erts_bin_drv_alloc(Uint size)
+{
+ Uint bsize = sizeof(Binary) - 1 + size;
+ void *res;
+ res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
+ ERTS_CHK_BIN_ALIGNMENT(res);
+ return (Binary *) res;
+}
+
+
+ERTS_GLB_INLINE Binary *
+erts_bin_nrml_alloc(Uint size)
+{
+ Uint bsize = sizeof(Binary) - 1 + size;
+ void *res;
+ res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
+ ERTS_CHK_BIN_ALIGNMENT(res);
+ return (Binary *) res;
+}
+
+ERTS_GLB_INLINE Binary *
+erts_bin_realloc_fnf(Binary *bp, Uint size)
+{
+ Binary *nbp;
+ Uint bsize = sizeof(Binary) - 1 + size;
+ ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
+ if (bp->flags & BIN_FLAG_DRV)
+ nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
+ else
+ nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ ERTS_CHK_BIN_ALIGNMENT(nbp);
+ return nbp;
+}
+
+ERTS_GLB_INLINE Binary *
+erts_bin_realloc(Binary *bp, Uint size)
+{
+ Binary *nbp;
+ Uint bsize = sizeof(Binary) - 1 + size;
+ ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
+ if (bp->flags & BIN_FLAG_DRV)
+ nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize);
+ else
+ nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize);
+ if (!nbp)
+ erts_realloc_n_enomem(ERTS_ALC_T2N(bp->flags & BIN_FLAG_DRV
+ ? ERTS_ALC_T_DRV_BINARY
+ : ERTS_ALC_T_BINARY),
+ bp,
+ bsize);
+ ERTS_CHK_BIN_ALIGNMENT(nbp);
+ return nbp;
+}
+
+ERTS_GLB_INLINE void
+erts_bin_free(Binary *bp)
+{
+ if (bp->flags & BIN_FLAG_MAGIC)
+ ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp);
+ if (bp->flags & BIN_FLAG_DRV)
+ erts_free(ERTS_ALC_T_DRV_BINARY, (void *) bp);
+ else
+ erts_free(ERTS_ALC_T_BINARY, (void *) bp);
+}
+
+ERTS_GLB_INLINE Binary *
+erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
+{
+ Uint bsize = sizeof(Binary) - 1 + sizeof(ErtsBinaryMagicPart) - 1 + size;
+ Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
+ if (!bptr)
+ erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize);
+ ERTS_CHK_BIN_ALIGNMENT(bptr);
+ bptr->flags = BIN_FLAG_MAGIC;
+ bptr->orig_size = sizeof(ErtsBinaryMagicPart) - 1 + size;
+ erts_refc_init(&bptr->refc, 0);
+ ERTS_MAGIC_BIN_DESTRUCTOR(bptr) = destructor;
+ return bptr;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
new file mode 100644
index 0000000000..e4f5d50ddf
--- /dev/null
+++ b/erts/emulator/beam/erl_bits.c
@@ -0,0 +1,1975 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_bits.h"
+#include "erl_binary.h"
+
+#ifdef MAX
+#undef MAX
+#endif
+#define MAX(x,y) (((x)>(y))?(x):(y))
+#ifdef MIN
+#undef MIN
+#endif
+#define MIN(x,y) (((x)<(y))?(x):(y))
+
+#if defined(WORDS_BIGENDIAN)
+# define BIT_ENDIAN_MACHINE 0
+#else
+# define BIT_ENDIAN_MACHINE BSF_LITTLE
+#endif
+
+#define BIT_IS_MACHINE_ENDIAN(x) (((x)&BSF_LITTLE) == BIT_ENDIAN_MACHINE)
+
+/*
+ * MAKE_MASK(n) constructs a mask with n bits.
+ * Example: MAKE_MASK(3) returns the binary number 00000111.
+ */
+
+#define MAKE_MASK(n) ((((Uint) 1) << (n))-1)
+
+/*
+ * MASK_BITS assign src to dst, but preserves the dst bits outside the mask.
+ */
+
+#define MASK_BITS(src,dst,mask) (((src) & (mask)) | ((dst) & ~(mask)))
+
+static byte get_bit(byte b, size_t a_offs);
+
+#if defined(ERTS_SMP)
+/* the state resides in the current process' scheduler data */
+#elif defined(ERL_BITS_REENTRANT)
+/* reentrant API but with a hidden single global state, for testing only */
+struct erl_bits_state ErlBitsState_;
+#else
+/* non-reentrant API with a single global state */
+struct erl_bits_state ErlBitsState;
+#endif
+
+#define byte_buf (ErlBitsState.byte_buf_)
+#define byte_buf_len (ErlBitsState.byte_buf_len_)
+
+#ifdef ERTS_SMP
+static erts_smp_atomic_t bits_bufs_size;
+#endif
+
+Uint
+erts_bits_bufs_size(void)
+{
+ return 0;
+}
+
+#if !defined(ERTS_SMP)
+static
+#endif
+void
+erts_bits_init_state(ERL_BITS_PROTO_0)
+{
+ byte_buf_len = 1;
+ byte_buf = erts_alloc(ERTS_ALC_T_BITS_BUF, byte_buf_len);
+
+ erts_bin_offset = 0;
+}
+
+#if defined(ERTS_SMP)
+void
+erts_bits_destroy_state(ERL_BITS_PROTO_0)
+{
+ erts_free(ERTS_ALC_T_BITS_BUF, byte_buf);
+}
+#endif
+
+void
+erts_init_bits(void)
+{
+#if defined(ERTS_SMP)
+ erts_smp_atomic_init(&bits_bufs_size, 0);
+ /* erl_process.c calls erts_bits_init_state() on all state instances */
+#else
+ ERL_BITS_DECLARE_STATEP;
+ erts_bits_init_state(ERL_BITS_ARGS_0);
+#endif
+}
+
+/*****************************************************************
+ ***
+ *** New matching binaries functions
+ ***
+ *****************************************************************/
+
+#define ReadToVariable(v64, Buffer, x) \
+ do{ \
+ int _i; \
+ v64 = 0; \
+ for(_i = 0; _i < x; _i++) { \
+ v64 = ((Uint)Buffer[_i] <<(8*_i)) + v64; \
+ } \
+ }while(0) \
+
+Eterm
+erts_bs_start_match_2(Process *p, Eterm Binary, Uint Max)
+{
+ Eterm Orig;
+ Uint offs;
+ Uint* hp;
+ Uint NeededSize;
+ ErlBinMatchState *ms;
+ Uint bitoffs;
+ Uint bitsize;
+ Uint total_bin_size;
+ ProcBin* pb;
+
+ ASSERT(is_binary(Binary));
+ total_bin_size = binary_size(Binary);
+ if ((total_bin_size >> (8*sizeof(Uint)-3)) != 0) {
+ return THE_NON_VALUE;
+ }
+ NeededSize = ERL_BIN_MATCHSTATE_SIZE(Max);
+ hp = HeapOnlyAlloc(p, NeededSize);
+ ms = (ErlBinMatchState *) hp;
+ ERTS_GET_REAL_BIN(Binary, Orig, offs, bitoffs, bitsize);
+ pb = (ProcBin *) boxed_val(Orig);
+ if (pb->thing_word == HEADER_PROC_BIN && pb->flags != 0) {
+ erts_emasculate_writable_binary(pb);
+ }
+ ms->thing_word = HEADER_BIN_MATCHSTATE(Max);
+ (ms->mb).orig = Orig;
+ (ms->mb).base = binary_bytes(Orig);
+ (ms->mb).offset = ms->save_offset[0] = 8 * offs + bitoffs;
+ (ms->mb).size = total_bin_size * 8 + (ms->mb).offset + bitsize;
+ return make_matchstate(ms);
+}
+
+Eterm
+erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb)
+{
+ Uint bytes;
+ Uint bits;
+ Uint offs;
+ byte bigbuf[64];
+ byte* LSB;
+ byte* MSB;
+ Uint* hp;
+ Uint* hp_end;
+ Uint words_needed;
+ Uint actual;
+ Uint v32;
+ int sgn = 0;
+ Eterm res = THE_NON_VALUE;
+
+ if (num_bits == 0) {
+ return SMALL_ZERO;
+ }
+
+ if (mb->size - mb->offset < num_bits) { /* Asked for too many bits. */
+ return THE_NON_VALUE;
+ }
+
+ /*
+ * Special cases for field sizes up to the size of Uint.
+ */
+
+ if (num_bits <= 8-(offs = BIT_OFFSET(mb->offset))) {
+ /*
+ * All bits are in one byte in the binary. We only need
+ * shift them right and mask them.
+ */
+ Uint b = mb->base[BYTE_OFFSET(mb->offset)];
+ Uint mask = MAKE_MASK(num_bits);
+ mb->offset += num_bits;
+ b >>= 8 - offs - num_bits;
+ b &= mask;
+ if ((flags & BSF_SIGNED) && b >> (num_bits-1)) {
+ b |= ~mask;
+ }
+ return make_small(b);
+ } else if (num_bits <= 8) {
+ /*
+ * The bits are in two different bytes. It is easiest to
+ * combine the bytes to a word first, and then shift right and
+ * mask to extract the bits.
+ */
+ Uint byte_offset = BYTE_OFFSET(mb->offset);
+ Uint w = mb->base[byte_offset] << 8 | mb->base[byte_offset+1];
+ Uint mask = MAKE_MASK(num_bits);
+ mb->offset += num_bits;
+ w >>= 16 - offs - num_bits;
+ w &= mask;
+ if ((flags & BSF_SIGNED) && w >> (num_bits-1)) {
+ w |= ~mask;
+ }
+ return make_small(w);
+ } else if (num_bits < SMALL_BITS && (flags & BSF_LITTLE) == 0) {
+ /*
+ * Handle field sizes from 9 up to SMALL_BITS-1 bits, big-endian,
+ * stored in at least two bytes.
+ */
+ byte* bp = mb->base + BYTE_OFFSET(mb->offset);
+ Uint n;
+ Uint w;
+
+ n = num_bits;
+ mb->offset += num_bits;
+
+ /*
+ * Handle the most signicant byte if it contains 1 to 7 bits.
+ * It only needs to be masked, not shifted.
+ */
+ if (offs == 0) {
+ w = 0;
+ } else {
+ Uint num_bits_in_msb = 8 - offs;
+ w = *bp++;
+ n -= num_bits_in_msb;
+ w &= MAKE_MASK(num_bits_in_msb);
+ }
+
+ /*
+ * Simply shift whole bytes into the result.
+ */
+ switch (BYTE_OFFSET(n)) {
+#ifdef ARCH_64
+ case 7: w = (w << 8) | *bp++;
+ case 6: w = (w << 8) | *bp++;
+ case 5: w = (w << 8) | *bp++;
+ case 4: w = (w << 8) | *bp++;
+#endif
+ case 3: w = (w << 8) | *bp++;
+ case 2: w = (w << 8) | *bp++;
+ case 1: w = (w << 8) | *bp++;
+ }
+ n = BIT_OFFSET(n);
+
+ /*
+ * Handle the 1 to 7 bits remaining in the last byte (if any).
+ * They need to be shifted right, but there is no need to mask;
+ * then they can be shifted into the word.
+ */
+ if (n > 0) {
+ Uint b = *bp;
+ b >>= 8 - n;
+ w = (w << n) | b;
+ }
+
+ /*
+ * Sign extend the result if the field type is 'signed' and the
+ * most significant bit is 1.
+ */
+ if ((flags & BSF_SIGNED) != 0 && (w >> (num_bits-1) != 0)) {
+ w |= ~MAKE_MASK(num_bits);
+ }
+ return make_small(w);
+ }
+
+ /*
+ * Handle everything else, that is:
+ *
+ * Big-endian fields >= SMALL_BITS (potentially bignums).
+ * Little-endian fields with 9 or more bits.
+ */
+
+ bytes = NBYTES(num_bits);
+ if ((bits = BIT_OFFSET(num_bits)) == 0) { /* number of bits in MSB */
+ bits = 8;
+ }
+ offs = 8 - bits; /* adjusted offset in MSB */
+
+ if (bytes <= sizeof bigbuf) {
+ LSB = bigbuf;
+ } else {
+ LSB = erts_alloc(ERTS_ALC_T_TMP, bytes);
+ }
+ MSB = LSB + bytes - 1;
+
+ /*
+ * Move bits to temporary buffer. We want the buffer to be stored in
+ * little-endian order, since bignums are little-endian.
+ */
+
+ if (flags & BSF_LITTLE) {
+ erts_copy_bits(mb->base, mb->offset, 1, LSB, 0, 1, num_bits);
+ *MSB >>= offs; /* adjust msb */
+ } else {
+ *MSB = 0;
+ erts_copy_bits(mb->base, mb->offset, 1, MSB, offs, -1, num_bits);
+ }
+ mb->offset += num_bits;
+
+ /*
+ * Get the sign bit.
+ */
+ sgn = 0;
+ if ((flags & BSF_SIGNED) && (*MSB & (1<<(bits-1)))) {
+ byte* ptr = LSB;
+ byte c = 1;
+
+ /* sign extend MSB */
+ *MSB |= ~MAKE_MASK(bits);
+
+ /* two's complement */
+ while (ptr <= MSB) {
+ byte pd = ~(*ptr);
+ byte d = pd + c;
+ c = (d < pd);
+ *ptr++ = d;
+ }
+ sgn = 1;
+ }
+
+ /* normalize */
+ while ((*MSB == 0) && (MSB > LSB)) {
+ MSB--;
+ bytes--;
+ }
+
+ /* check for guaranteed small num */
+ switch (bytes) {
+ case 1:
+ v32 = LSB[0];
+ goto big_small;
+ case 2:
+ v32 = LSB[0] + (LSB[1]<<8);
+ goto big_small;
+ case 3:
+ v32 = LSB[0] + (LSB[1]<<8) + (LSB[2]<<16);
+ goto big_small;
+#if !defined(ARCH_64)
+ case 4:
+ v32 = (LSB[0] + (LSB[1]<<8) + (LSB[2]<<16) + (LSB[3]<<24));
+ if (!IS_USMALL(sgn, v32)) {
+ goto make_big;
+ }
+#else
+ case 4:
+ ReadToVariable(v32, LSB, 4);
+ goto big_small;
+ case 5:
+ ReadToVariable(v32, LSB, 5);
+ goto big_small;
+ case 6:
+ ReadToVariable(v32, LSB, 6);
+ goto big_small;
+ case 7:
+ ReadToVariable(v32, LSB, 7);
+ goto big_small;
+ case 8:
+ ReadToVariable(v32, LSB, 8);
+ if (!IS_USMALL(sgn, v32)) {
+ goto make_big;
+ }
+#endif
+ big_small: /* v32 loaded with value which fits in fixnum */
+ if (sgn) {
+ res = make_small(-((Sint)v32));
+ } else {
+ res = make_small(v32);
+ }
+ break;
+ make_big:
+ hp = HeapOnlyAlloc(p, BIG_UINT_HEAP_SIZE);
+ if (sgn) {
+ hp[0] = make_neg_bignum_header(1);
+ } else {
+ hp[0] = make_pos_bignum_header(1);
+ }
+ BIG_DIGIT(hp,0) = v32;
+ res = make_big(hp);
+ break;
+ default:
+ words_needed = 1+WSIZE(bytes);
+ hp = HeapOnlyAlloc(p, words_needed);
+ hp_end = hp + words_needed;
+ res = bytes_to_big(LSB, bytes, sgn, hp);
+ if (is_small(res)) {
+ p->htop = hp;
+ } else if ((actual = bignum_header_arity(*hp)+1) < words_needed) {
+ p->htop = hp + actual;
+ }
+ break;
+ }
+
+ if (LSB != bigbuf) {
+ erts_free(ERTS_ALC_T_TMP, (void *) LSB);
+ }
+ return res;
+}
+
+Eterm
+erts_bs_get_binary_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb)
+{
+ ErlSubBin* sb;
+ size_t num_bytes; /* Number of bytes in binary. */
+
+ if (mb->size - mb->offset < num_bits) { /* Asked for too many bits. */
+ return THE_NON_VALUE;
+ }
+
+ /*
+ * From now on, we can't fail.
+ */
+
+ num_bytes = NBYTES(num_bits);
+ sb = (ErlSubBin *) HeapOnlyAlloc(p, ERL_SUB_BIN_SIZE);
+
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->orig = mb->orig;
+ sb->size = BYTE_OFFSET(num_bits);
+ sb->bitsize = BIT_OFFSET(num_bits);
+ sb->offs = BYTE_OFFSET(mb->offset);
+ sb->bitoffs = BIT_OFFSET(mb->offset);
+ sb->is_writable = 0;
+ mb->offset += num_bits;
+
+ return make_binary(sb);
+}
+
+Eterm
+erts_bs_get_float_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb)
+{
+ Eterm* hp;
+ float f32;
+ double f64;
+ byte* fptr;
+ FloatDef f;
+
+ if (num_bits == 0) {
+ f.fd = 0.0;
+ hp = HeapOnlyAlloc(p, FLOAT_SIZE_OBJECT);
+ PUT_DOUBLE(f, hp);
+ return make_float(hp);
+ }
+ if (mb->size - mb->offset < num_bits) { /* Asked for too many bits. */
+ return THE_NON_VALUE;
+ }
+ if (num_bits == 32) {
+ fptr = (byte *) &f32;
+ } else if (num_bits == 64) {
+ fptr = (byte *) &f64;
+ } else {
+ return THE_NON_VALUE;
+ }
+
+ if (BIT_IS_MACHINE_ENDIAN(flags)) {
+ erts_copy_bits(mb->base, mb->offset, 1,
+ fptr, 0, 1,
+ num_bits);
+ } else {
+ erts_copy_bits(mb->base, mb->offset, 1,
+ fptr + NBYTES(num_bits) - 1, 0, -1,
+ num_bits);
+ }
+ ERTS_FP_CHECK_INIT(p);
+ if (num_bits == 32) {
+ ERTS_FP_ERROR_THOROUGH(p, f32, return THE_NON_VALUE);
+ f.fd = f32;
+ } else {
+ ERTS_FP_ERROR_THOROUGH(p, f64, return THE_NON_VALUE);
+ f.fd = f64;
+ }
+ mb->offset += num_bits;
+ hp = HeapOnlyAlloc(p, FLOAT_SIZE_OBJECT);
+ PUT_DOUBLE(f, hp);
+ return make_float(hp);
+}
+
+Eterm
+erts_bs_get_binary_all_2(Process *p, ErlBinMatchBuffer* mb)
+{
+ ErlSubBin* sb;
+ Uint size;
+ size = mb->size-mb->offset;
+ sb = (ErlSubBin *) HeapOnlyAlloc(p, ERL_SUB_BIN_SIZE);
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = BYTE_OFFSET(size);
+ sb->bitsize = BIT_OFFSET(size);
+ sb->offs = BYTE_OFFSET(mb->offset);
+ sb->bitoffs = BIT_OFFSET(mb->offset);
+ sb->is_writable = 0;
+ sb->orig = mb->orig;
+ mb->offset = mb->size;
+ return make_binary(sb);
+}
+
+/****************************************************************
+ ***
+ *** Building binaries
+ ***
+ ****************************************************************/
+
+
+/* COPY_VAL:
+ * copy sz byte from val to dst buffer,
+ * dst, val are updated!!!
+ */
+
+#define COPY_VAL(dst,ddir,val,sz) do { \
+ Uint __sz = (sz); \
+ while(__sz) { \
+ switch(__sz) { \
+ default: \
+ case 4: *dst = (val&0xff); dst += ddir; val >>= 8; __sz--; \
+ case 3: *dst = (val&0xff); dst += ddir; val >>= 8; __sz--; \
+ case 2: *dst = (val&0xff); dst += ddir; val >>= 8; __sz--; \
+ case 1: *dst = (val&0xff); dst += ddir; val >>= 8; __sz--; \
+ } \
+ } \
+ } while(0)
+
+/* calculate a - *cp (carry) (store result in b), *cp is updated! */
+#define SUBc(a, cp, b) do { \
+ byte __x = (a); \
+ byte __y = (__x - (*(cp))); \
+ (*cp) = (__y > __x); \
+ *(b) = ~__y; \
+ } while(0)
+
+static int
+fmt_int(byte *buf, Uint sz, Eterm val, Uint size, Uint flags)
+{
+ unsigned long offs;
+
+ ASSERT(size != 0);
+ offs = BIT_OFFSET(size);
+ if (is_small(val)) {
+ Sint v = signed_val(val);
+ if (flags & BSF_LITTLE) { /* Little endian */
+ sz--;
+ COPY_VAL(buf,1,v,sz);
+ *buf = offs ? ((v << (8-offs)) & 0xff) : (v & 0xff);
+ } else { /* Big endian */
+ buf += (sz - 1);
+ if (offs) {
+ *buf-- = (v << (8-offs)) & 0xff;
+ sz--;
+ v >>= offs;
+ }
+ COPY_VAL(buf,-1,v,sz);
+ }
+ } else if (is_big(val)) {
+ int sign = big_sign(val);
+ Uint ds = big_size(val)*sizeof(ErtsDigit); /* number of digits bytes */
+ ErtsDigit* dp = big_v(val);
+ int n = MIN(sz,ds);
+
+ if (flags & BSF_LITTLE) {
+ sz -= n; /* pad with this amount */
+ if (sign) {
+ int c = 1;
+ while(n >= sizeof(ErtsDigit)) {
+ ErtsDigit d = *dp++;
+ int i;
+ for(i = 0; i < sizeof(ErtsDigit); ++i) {
+ SUBc((d&0xff), &c, buf);
+ buf++;
+ d >>= 8;
+ }
+ n -= sizeof(ErtsDigit);
+ }
+ if (n) {
+ ErtsDigit d = *dp;
+ do {
+ SUBc((d&0xff), &c, buf);
+ buf++;
+ d >>= 8;
+ } while (--n > 0);
+ }
+ /* pad */
+ while(sz--) {
+ SUBc(0, &c, buf);
+ buf++;
+ }
+ }
+ else {
+ while(n >= sizeof(ErtsDigit)) {
+ ErtsDigit d = *dp++;
+ int i;
+ for(i = 0; i < sizeof(ErtsDigit); ++i) {
+ *buf++ = (d & 0xff);
+ d >>= 8;
+ }
+ n -= sizeof(ErtsDigit);
+ }
+ if (n) {
+ ErtsDigit d = *dp;
+ do {
+ *buf++ = (d & 0xff);
+ d >>= 8;
+ } while (--n > 0);
+ }
+ /* pad */
+ while(sz) {
+ *buf++ = 0;
+ sz--;
+ }
+ }
+ /* adjust MSB!!! */
+ if (offs) {
+ buf--;
+ *buf <<= (8 - offs);
+ }
+ }
+ else { /* BIG ENDIAN */
+ ErtsDigit acc = 0;
+ ErtsDigit d;
+
+ buf += (sz - 1); /* end of buffer */
+ sz -= n; /* pad with this amount */
+ offs = offs ? (8-offs) : 0; /* shift offset */
+
+ if (sign) { /* SIGNED */
+ int c = 1;
+
+ while (n >= sizeof(ErtsDigit)) {
+ int i;
+
+ d = *dp++;
+ acc |= d << offs;
+ SUBc((acc&0xff), &c, buf);
+ buf--;
+ acc = d >> (8-offs);
+ for (i = 0; i < sizeof(ErtsDigit)-1; ++i) {
+ SUBc((acc&0xff), &c, buf);
+ buf--;
+ acc >>= 8;
+ }
+ n -= sizeof(ErtsDigit);
+ }
+ if (n) {
+ acc |= ((ErtsDigit)*dp << offs);
+ do {
+ SUBc((acc & 0xff), &c, buf);
+ buf--;
+ acc >>= 8;
+ } while (--n > 0);
+ }
+ /* pad */
+ while(sz--) {
+ SUBc((acc & 0xff), &c, buf);
+ buf--;
+ acc >>= 8;
+ }
+ }
+ else { /* UNSIGNED */
+ while (n >= sizeof(ErtsDigit)) {
+ int i;
+
+ d = *dp++;
+ acc |= d << offs;
+ *buf-- = acc;
+ acc = d >> (8-offs);
+ for (i = 0; i < sizeof(ErtsDigit)-1; ++i) {
+ *buf-- = acc;
+ acc >>= 8;
+ }
+ n -= sizeof(ErtsDigit);
+ }
+ if (n) {
+ acc |= ((ErtsDigit)*dp << offs);
+ do {
+ *buf-- = acc & 0xff;
+ acc >>= 8;
+ } while (--n > 0);
+ }
+ while (sz--) {
+ *buf-- = acc & 0xff;
+ acc >>= 8;
+ }
+ }
+ }
+ } else { /* Neither small nor big */
+ return -1;
+ }
+ return 0;
+}
+
+static void
+ERTS_INLINE need_byte_buf(ERL_BITS_PROTO_1(int need))
+{
+ if (byte_buf_len < need) {
+#ifdef ERTS_SMP
+ erts_smp_atomic_add(&bits_bufs_size, need - byte_buf_len);
+#endif
+ byte_buf_len = need;
+ byte_buf = erts_realloc(ERTS_ALC_T_BITS_BUF, byte_buf, byte_buf_len);
+ }
+}
+
+int
+erts_new_bs_put_integer(ERL_BITS_PROTO_3(Eterm arg, Uint num_bits, unsigned flags))
+{
+ Uint bin_offset = erts_bin_offset;
+ Uint bit_offset;
+ Uint b;
+ byte *iptr;
+
+ if (num_bits == 0) {
+ return 1;
+ }
+
+ bit_offset = BIT_OFFSET(bin_offset);
+ if (is_small(arg)) {
+ Uint rbits = 8 - bit_offset;
+
+ if (bit_offset + num_bits <= 8) {
+ /*
+ * All bits are in the same byte.
+ */
+ iptr = erts_current_bin+BYTE_OFFSET(bin_offset);
+ b = *iptr & (0xff << rbits);
+ b |= (signed_val(arg) & ((1 << num_bits)-1)) << (8-bit_offset-num_bits);
+ *iptr = b;
+ } else if (bit_offset == 0) {
+ /*
+ * More than one bit, starting at a byte boundary.
+ * That will be quite efficiently handled by fmt_int().
+ *
+ * (We know that fmt_int() can't fail here.)
+ */
+ (void) fmt_int(erts_current_bin+BYTE_OFFSET(bin_offset),
+ NBYTES(num_bits), arg, num_bits, flags);
+ } else if (flags & BSF_LITTLE) {
+ /*
+ * Can't handle unaligned little-endian in a simple way.
+ */
+ goto unaligned;
+ } else { /* Big endian */
+ /*
+ * Big-endian, more than one byte, but not aligned on a byte boundary.
+ * Handle the bits up to the next byte boundary specially,
+ * then let fmt_int() handle the rest.
+ */
+ Uint shift_count = num_bits - rbits;
+ Sint val = signed_val(arg);
+ iptr = erts_current_bin+BYTE_OFFSET(bin_offset);
+ b = *iptr & (0xff << rbits);
+
+ /*
+ * Shifting with a shift count greater than or equal to the word
+ * size may be a no-op (instead of 0 the result may be the unshifted
+ * value). Therefore, only do the shift and the OR if the shift count
+ * is less than the word size if the number is positive; if negative,
+ * we must simulate the sign extension.
+ */
+ if (shift_count < sizeof(Uint)*8) {
+ b |= (val >> shift_count) & ((1 << rbits) - 1);
+ } else if (val < 0) {
+ /* Simulate sign extension. */
+ b |= (-1) & ((1 << rbits) - 1);
+ }
+ *iptr++ = b;
+
+ /* fmt_int() can't fail here. */
+ (void) fmt_int(iptr, NBYTES(num_bits-rbits), arg,
+ num_bits-rbits, flags);
+ }
+ } else if (bit_offset == 0) {
+ /*
+ * Big number, aligned on a byte boundary. We can format the
+ * integer directly into the binary.
+ */
+ if (fmt_int(erts_current_bin+BYTE_OFFSET(bin_offset),
+ NBYTES(num_bits), arg, num_bits, flags) < 0) {
+ return 0;
+ }
+ } else {
+ unaligned:
+ /*
+ * Big number or small little-endian number, not byte-aligned,
+ * or not a number at all.
+ *
+ * We must format the number into a temporary buffer, and then
+ * copy that into the binary.
+ */
+ need_byte_buf(ERL_BITS_ARGS_1(NBYTES(num_bits)));
+ iptr = byte_buf;
+ if (fmt_int(iptr, NBYTES(num_bits), arg, num_bits, flags) < 0) {
+ return 0;
+ }
+ erts_copy_bits(iptr, 0, 1, erts_current_bin, bin_offset, 1, num_bits);
+ }
+ erts_bin_offset = bin_offset + num_bits;
+ return 1;
+}
+
+int
+erts_bs_put_utf8(ERL_BITS_PROTO_1(Eterm arg))
+{
+ Uint bin_offset = erts_bin_offset;
+ Uint bit_offset;
+ Uint num_bits;
+ byte tmp_buf[4];
+ byte* dst;
+ Sint val;
+
+ if (is_not_small(arg)) {
+ return 0;
+ }
+ val = signed_val(arg);
+ if (val < 0) {
+ return 0;
+ }
+
+ if ((bit_offset = BIT_OFFSET(bin_offset)) == 0) {
+ /* We can write directly into the destination binary. */
+ dst = erts_current_bin+BYTE_OFFSET(bin_offset);
+ } else {
+ /* Unaligned destination binary. Must use a temporary buffer. */
+ dst = tmp_buf;
+ }
+ if (val < 0x80) {
+ dst[0] = val;
+ num_bits = 8;
+ } else if (val < 0x800) {
+ dst[0] = 0xC0 | (val >> 6);
+ dst[1] = 0x80 | (val & 0x3F);
+ num_bits = 16;
+ } else if (val < 0x10000UL) {
+ if ((0xD800 <= val && val <= 0xDFFF) ||
+ val == 0xFFFE || val == 0xFFFF) {
+ return 0;
+ }
+ dst[0] = 0xE0 | (val >> 12);
+ dst[1] = 0x80 | ((val >> 6) & 0x3F);
+ dst[2] = 0x80 | (val & 0x3F);
+ num_bits = 24;
+ } else if (val < 0x110000) {
+ dst[0] = 0xF0 | (val >> 18);
+ dst[1] = 0x80 | ((val >> 12) & 0x3F);
+ dst[2] = 0x80 | ((val >> 6) & 0x3F);
+ dst[3] = 0x80 | (val & 0x3F);
+ num_bits = 32;
+ } else {
+ return 0;
+ }
+
+ if (bin_offset != 0) {
+ erts_copy_bits(dst, 0, 1, erts_current_bin, bin_offset, 1, num_bits);
+ }
+
+ erts_bin_offset += num_bits;
+
+ return 1;
+}
+
+int
+erts_bs_put_utf16(ERL_BITS_PROTO_2(Eterm arg, Uint flags))
+{
+ Uint bin_offset = erts_bin_offset;
+ Uint bit_offset;
+ Uint num_bits;
+ byte tmp_buf[4];
+ byte* dst;
+ Uint val;
+
+ if (is_not_small(arg)) {
+ return 0;
+ }
+ val = unsigned_val(arg);
+ if (val > 0x10FFFF || (0xD800 <= val && val <= 0xDFFF) ||
+ val == 0xFFFE || val == 0xFFFF) {
+ return 0;
+ }
+
+ if ((bit_offset = BIT_OFFSET(bin_offset)) == 0) {
+ /* We can write directly into the destination binary. */
+ dst = erts_current_bin+BYTE_OFFSET(bin_offset);
+ } else {
+ /* Unaligned destination binary. Must use a temporary buffer. */
+ dst = tmp_buf;
+ }
+
+ if (val < 0x10000UL) {
+ num_bits = 16;
+ if (flags & BSF_LITTLE) {
+ dst[0] = val;
+ dst[1] = val >> 8;
+ } else {
+ dst[0] = val >> 8;
+ dst[1] = val;
+ }
+ } else {
+ Uint16 w1, w2;
+
+ num_bits = 32;
+ val = val - 0x10000UL;
+ w1 = 0xD800 | (val >> 10);
+ w2 = 0xDC00 | (val & 0x3FF);
+ if (flags & BSF_LITTLE) {
+ dst[0] = w1;
+ dst[1] = w1 >> 8;
+ dst[2] = w2;
+ dst[3] = w2 >> 8;
+ } else {
+ dst[0] = w1 >> 8;
+ dst[1] = w1;
+ dst[2] = w2 >> 8;
+ dst[3] = w2;
+ }
+ }
+
+ if (bin_offset != 0) {
+ erts_copy_bits(dst, 0, 1, erts_current_bin, bin_offset, 1, num_bits);
+ }
+
+ erts_bin_offset += num_bits;
+ return 1;
+}
+
+
+int
+erts_new_bs_put_binary(ERL_BITS_PROTO_2(Eterm arg, Uint num_bits))
+{
+ byte *bptr;
+ Uint bitoffs;
+ Uint bitsize;
+
+ if (!is_binary(arg)) {
+ return 0;
+ }
+ ERTS_GET_BINARY_BYTES(arg, bptr, bitoffs, bitsize);
+ if (num_bits > 8*binary_size(arg)+bitsize) {
+ return 0;
+ }
+ copy_binary_to_buffer(erts_current_bin, erts_bin_offset, bptr, bitoffs, num_bits);
+ erts_bin_offset += num_bits;
+ return 1;
+}
+
+int
+erts_new_bs_put_binary_all(ERL_BITS_PROTO_2(Eterm arg, Uint unit))
+{
+ byte *bptr;
+ Uint bitoffs;
+ Uint bitsize;
+ Uint num_bits;
+
+ /*
+ * This type test is not needed if the code was compiled with
+ * an R12B or later compiler, since there would have been a
+ * call to bit_size/1 or byte_size/1 that would have failed if
+ * 'arg' was not a binary. However, in R11B and earlier releases,
+ * size/1 was use for calculating the size of the binary, and
+ * therefore 'arg' could be a tuple.
+ */
+ if (!is_binary(arg)) {
+ return 0;
+ }
+
+ ERTS_GET_BINARY_BYTES(arg, bptr, bitoffs, bitsize);
+ num_bits = 8*binary_size(arg)+bitsize;
+ if (unit == 8) {
+ if (bitsize != 0) {
+ return 0;
+ }
+ } else if (unit != 1 && num_bits % unit != 0) {
+ return 0;
+ }
+ copy_binary_to_buffer(erts_current_bin, erts_bin_offset, bptr, bitoffs, num_bits);
+ erts_bin_offset += num_bits;
+ return 1;
+}
+
+int
+erts_new_bs_put_float(Process *c_p, Eterm arg, Uint num_bits, int flags)
+{
+ ERL_BITS_DEFINE_STATEP(c_p);
+
+ if (BIT_OFFSET(erts_bin_offset) == 0) {
+ Uint32 a;
+ Uint32 b;
+
+ if (num_bits == 64) {
+ union {
+ double f64;
+ Uint32 i32[2];
+ } u;
+
+ if (is_float(arg)) {
+ FloatDef *fdp = (FloatDef*)(float_val(arg) + 1);
+ a = fdp->fw[0];
+ b = fdp->fw[1];
+ } else if (is_small(arg)) {
+ u.f64 = (double) signed_val(arg);
+ a = u.i32[0];
+ b = u.i32[1];
+ } else if (is_big(arg)) {
+ if (big_to_double(arg, &u.f64) < 0) {
+ return 0;
+ }
+ a = u.i32[0];
+ b = u.i32[1];
+ } else {
+ return 0;
+ }
+ } else if (num_bits == 32) {
+ union {
+ float f32;
+ Uint32 i32;
+ } u;
+
+ b = 0;
+ if (is_float(arg)) {
+ FloatDef f;
+ GET_DOUBLE(arg, f);
+ ERTS_FP_CHECK_INIT(c_p);
+ u.f32 = f.fd;
+ ERTS_FP_ERROR(c_p,u.f32,;);
+ a = u.i32;
+ } else if (is_small(arg)) {
+ u.f32 = (float) signed_val(arg);
+ a = u.i32;
+ } else if (is_big(arg)) {
+ double f64;
+ if (big_to_double(arg, &f64) < 0) {
+ return 0;
+ }
+ ERTS_FP_CHECK_INIT(c_p);
+ u.f32 = (float) f64;
+ ERTS_FP_ERROR(c_p,u.f32,;);
+ a = u.i32;
+ } else {
+ return 0;
+ }
+ } else {
+ return 0;
+ }
+
+ if (BIT_IS_MACHINE_ENDIAN(flags)) {
+ byte* t = erts_current_bin+BYTE_OFFSET(erts_bin_offset);
+#ifdef WORDS_BIGENDIAN
+ t[0] = a >> 24;
+ t[1] = a >> 16;
+ t[2] = a >> 8;
+ t[3] = a;
+ if (num_bits == 64) {
+ t[4] = b >> 24;
+ t[5] = b >> 16;
+ t[6] = b >> 8;
+ t[7] = b;
+ }
+#else
+ t[3] = a >> 24;
+ t[2] = a >> 16;
+ t[1] = a >> 8;
+ t[0] = a;
+ if (num_bits == 64) {
+ t[7] = b >> 24;
+ t[6] = b >> 16;
+ t[5] = b >> 8;
+ t[4] = b;
+ }
+#endif
+ } else {
+ byte* t = erts_current_bin+BYTE_OFFSET(erts_bin_offset) + NBYTES(num_bits);
+#ifdef WORDS_BIGENDIAN
+ t[-1] = a >> 24;
+ t[-2] = a >> 16;
+ t[-3] = a >> 8;
+ t[-4] = a;
+ if (num_bits == 64) {
+ t[-5] = b >> 24;
+ t[-6] = b >> 16;
+ t[-7] = b >> 8;
+ t[-8] = b;
+ }
+#else
+ t[-1] = a;
+ t[-2] = a >> 8;
+ t[-3] = a >> 16;
+ t[-4] = a >> 24;
+ if (num_bits == 64) {
+ t[-5] = b;
+ t[-6] = b >> 8;
+ t[-7] = b >> 16;
+ t[-8] = b >> 24;
+ }
+#endif
+ }
+ } else {
+ byte *bptr;
+ double f64;
+ float f32;
+
+ if (num_bits == 64) {
+ if (is_float(arg)) {
+ bptr = (byte *) (float_val(arg) + 1);
+ } else if (is_small(arg)) {
+ f64 = (double) signed_val(arg);
+ bptr = (byte *) &f64;
+ } else if (is_big(arg)) {
+ if (big_to_double(arg, &f64) < 0) {
+ return 0;
+ }
+ bptr = (byte *) &f64;
+ } else {
+ return 0;
+ }
+ } else if (num_bits == 32) {
+ if (is_float(arg)) {
+ FloatDef f;
+ GET_DOUBLE(arg, f);
+ ERTS_FP_CHECK_INIT(c_p);
+ f32 = f.fd;
+ ERTS_FP_ERROR(c_p,f32,;);
+ bptr = (byte *) &f32;
+ } else if (is_small(arg)) {
+ f32 = (float) signed_val(arg);
+ bptr = (byte *) &f32;
+ } else if (is_big(arg)) {
+ if (big_to_double(arg, &f64) < 0) {
+ return 0;
+ }
+ ERTS_FP_CHECK_INIT(c_p);
+ f32 = (float) f64;
+ ERTS_FP_ERROR(c_p,f32,;);
+ bptr = (byte *) &f32;
+ } else {
+ return 0;
+ }
+ } else {
+ return 0;
+ }
+ if (BIT_IS_MACHINE_ENDIAN(flags)) {
+ erts_copy_bits(bptr, 0, 1,
+ erts_current_bin,
+ erts_bin_offset, 1, num_bits);
+ } else {
+ erts_copy_bits(bptr+NBYTES(num_bits)-1, 0, -1,
+ erts_current_bin, erts_bin_offset, 1,
+ num_bits);
+ }
+ }
+ erts_bin_offset += num_bits;
+ return 1;
+}
+
+void
+erts_new_bs_put_string(ERL_BITS_PROTO_2(byte* iptr, Uint num_bytes))
+{
+ if (BIT_OFFSET(erts_bin_offset) != 0) {
+ erts_copy_bits(iptr, 0, 1, erts_current_bin, erts_bin_offset, 1, num_bytes*8);
+ } else {
+ sys_memcpy(erts_current_bin+BYTE_OFFSET(erts_bin_offset), iptr, num_bytes);
+ }
+ erts_bin_offset += num_bytes*8;
+}
+
+Eterm
+erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
+ Uint extra_words, Uint unit)
+{
+ Eterm bin; /* Given binary */
+ Eterm* ptr;
+ Eterm hdr;
+ ErlSubBin* sb;
+ ProcBin* pb;
+ Binary* binp;
+ Uint heap_need;
+ Uint build_size_in_bits;
+ Uint used_size_in_bits;
+ Uint unsigned_bits;
+ ERL_BITS_DEFINE_STATEP(c_p);
+
+ /*
+ * Check and untag the requested build size.
+ */
+ if (is_small(build_size_term)) {
+ Sint signed_bits = signed_val(build_size_term);
+ if (signed_bits < 0) {
+ goto badarg;
+ }
+ build_size_in_bits = (Uint) signed_bits;
+ } else if (term_to_Uint(build_size_term, &unsigned_bits)) {
+ build_size_in_bits = unsigned_bits;
+ } else {
+ c_p->freason = unsigned_bits;
+ return THE_NON_VALUE;
+ }
+
+ /*
+ * Check the binary argument.
+ */
+ bin = reg[live];
+ if (!is_boxed(bin)) {
+ badarg:
+ c_p->freason = BADARG;
+ return THE_NON_VALUE;
+ }
+ ptr = boxed_val(bin);
+ hdr = *ptr;
+ if (!is_binary_header(hdr)) {
+ goto badarg;
+ }
+ if (hdr != HEADER_SUB_BIN) {
+ goto not_writable;
+ }
+ sb = (ErlSubBin *) ptr;
+ if (!sb->is_writable) {
+ goto not_writable;
+ }
+ pb = (ProcBin *) boxed_val(sb->orig);
+ ASSERT(pb->thing_word == HEADER_PROC_BIN);
+ if ((pb->flags & PB_IS_WRITABLE) == 0) {
+ goto not_writable;
+ }
+
+ /*
+ * OK, the binary is writable.
+ */
+
+ erts_bin_offset = 8*sb->size + sb->bitsize;
+ used_size_in_bits = erts_bin_offset + build_size_in_bits;
+ sb->is_writable = 0; /* Make sure that no one else can write. */
+ pb->size = NBYTES(used_size_in_bits);
+ pb->flags |= PB_ACTIVE_WRITER;
+
+ /*
+ * Reallocate the binary if it is too small.
+ */
+ binp = pb->val;
+ if (binp->orig_size < pb->size) {
+ Uint new_size = 2*pb->size;
+ binp = erts_bin_realloc(binp, new_size);
+ binp->orig_size = new_size;
+ pb->val = binp;
+ pb->bytes = (byte *) binp->orig_bytes;
+ }
+ erts_current_bin = pb->bytes;
+
+ /*
+ * Allocate heap space and build a new sub binary.
+ */
+ reg[live] = sb->orig;
+ heap_need = ERL_SUB_BIN_SIZE + extra_words;
+ if (c_p->stop - c_p->htop < heap_need) {
+ (void) erts_garbage_collect(c_p, heap_need, reg, live+1);
+ }
+ sb = (ErlSubBin *) c_p->htop;
+ c_p->htop += ERL_SUB_BIN_SIZE;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = BYTE_OFFSET(used_size_in_bits);
+ sb->bitsize = BIT_OFFSET(used_size_in_bits);
+ sb->offs = 0;
+ sb->bitoffs = 0;
+ sb->is_writable = 1;
+ sb->orig = reg[live];
+
+ return make_binary(sb);
+
+ /*
+ * The binary is not writable. We must create a new writable binary and
+ * copy the old contents of the binary.
+ */
+ not_writable:
+ {
+ Uint used_size_in_bytes; /* Size of old binary + data to be built */
+ Uint bin_size;
+ Binary* bptr;
+ byte* src_bytes;
+ Uint bitoffs;
+ Uint bitsize;
+ Eterm* hp;
+
+ /*
+ * Allocate heap space.
+ */
+ heap_need = PROC_BIN_SIZE + ERL_SUB_BIN_SIZE + extra_words;
+ if (c_p->stop - c_p->htop < heap_need) {
+ (void) erts_garbage_collect(c_p, heap_need, reg, live+1);
+ bin = reg[live];
+ }
+ hp = c_p->htop;
+
+ /*
+ * Calculate sizes. The size of the new binary, is the sum of the
+ * build size and the size of the old binary. Allow some room
+ * for growing.
+ */
+ ERTS_GET_BINARY_BYTES(bin, src_bytes, bitoffs, bitsize);
+ erts_bin_offset = 8*binary_size(bin) + bitsize;
+ used_size_in_bits = erts_bin_offset + build_size_in_bits;
+ used_size_in_bytes = NBYTES(used_size_in_bits);
+ bin_size = 2*used_size_in_bytes;
+ bin_size = (bin_size < 256) ? 256 : bin_size;
+
+ /*
+ * Allocate the binary data struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(bin_size);
+ bptr->flags = 0;
+ bptr->orig_size = bin_size;
+ erts_refc_init(&bptr->refc, 1);
+ erts_current_bin = (byte *) bptr->orig_bytes;
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) hp;
+ hp += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = used_size_in_bytes;
+ pb->next = MSO(c_p).mso;
+ MSO(c_p).mso = pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER;
+ MSO(c_p).overhead += pb->size / sizeof(Eterm);
+
+ /*
+ * Now allocate the sub binary and set its size to include the
+ * data about to be built.
+ */
+ sb = (ErlSubBin *) hp;
+ hp += ERL_SUB_BIN_SIZE;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = BYTE_OFFSET(used_size_in_bits);
+ sb->bitsize = BIT_OFFSET(used_size_in_bits);
+ sb->offs = 0;
+ sb->bitoffs = 0;
+ sb->is_writable = 1;
+ sb->orig = make_binary(pb);
+
+ c_p->htop = hp;
+
+ /*
+ * Now copy the data into the binary.
+ */
+ if (unit > 1) {
+ if ((unit == 8 && (erts_bin_offset & 7) != 0) ||
+ (erts_bin_offset % unit) != 0) {
+ return THE_NON_VALUE;
+ }
+ }
+ copy_binary_to_buffer(erts_current_bin, 0, src_bytes, bitoffs, erts_bin_offset);
+
+ return make_binary(sb);
+ }
+}
+
+Eterm
+erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
+{
+ Eterm* ptr;
+ ErlSubBin* sb;
+ ProcBin* pb;
+ Binary* binp;
+ Uint build_size_in_bits;
+ Uint pos_in_bits_after_build;
+ Uint unsigned_bits;
+ ERL_BITS_DEFINE_STATEP(p);
+
+ /*
+ * Check and untag the requested build size.
+ */
+ if (is_small(build_size_term)) {
+ Sint signed_bits = signed_val(build_size_term);
+ if (signed_bits < 0) {
+ p->freason = BADARG;
+ return THE_NON_VALUE;
+ }
+ build_size_in_bits = (Uint) signed_bits;
+ } else if (term_to_Uint(build_size_term, &unsigned_bits)) {
+ build_size_in_bits = unsigned_bits;
+ } else {
+ p->freason = unsigned_bits;
+ return THE_NON_VALUE;
+ }
+
+ ptr = boxed_val(bin);
+ ASSERT(*ptr == HEADER_SUB_BIN);
+
+ sb = (ErlSubBin *) ptr;
+ ASSERT(sb->is_writable);
+
+ pb = (ProcBin *) boxed_val(sb->orig);
+ ASSERT(pb->thing_word == HEADER_PROC_BIN);
+
+ /*
+ * Calculate new size in bytes.
+ */
+ erts_bin_offset = 8*sb->size + sb->bitsize;
+ pos_in_bits_after_build = erts_bin_offset + build_size_in_bits;
+ pb->size = (pos_in_bits_after_build+7) >> 3;
+ pb->flags |= PB_ACTIVE_WRITER;
+
+ /*
+ * Reallocate the binary if it is too small.
+ */
+ binp = pb->val;
+ if (binp->orig_size < pb->size) {
+ Uint new_size = 2*pb->size;
+
+ if (pb->flags & PB_IS_WRITABLE) {
+ /*
+ * This is the normal case - the binary is writable.
+ * There are no other references to the binary, so it
+ * is safe to reallocate it.
+ */
+ binp = erts_bin_realloc(binp, new_size);
+ binp->orig_size = new_size;
+ pb->val = binp;
+ pb->bytes = (byte *) binp->orig_bytes;
+ } else {
+ /*
+ * The binary is NOT writable. The only way that is
+ * supposed to happen if is call trace has been turned
+ * on. That means that a trace process now has (or have
+ * had) a reference to the binary, so we are not allowed
+ * to reallocate the binary. Instead, we must allocate a new
+ * binary and copy the contents of the old binary into it.
+ */
+ Binary* bptr = erts_bin_nrml_alloc(new_size);
+ bptr->flags = 0;
+ bptr->orig_size = new_size;
+ erts_refc_init(&bptr->refc, 1);
+ sys_memcpy(bptr->orig_bytes, binp->orig_bytes, pb->size);
+ pb->flags |= PB_IS_WRITABLE | PB_ACTIVE_WRITER;
+ pb->val = bptr;
+ pb->bytes = (byte *) bptr->orig_bytes;
+ if (erts_refc_dectest(&binp->refc, 0) == 0) {
+ erts_bin_free(binp);
+ }
+ }
+ }
+ erts_current_bin = pb->bytes;
+
+ sb->size = pos_in_bits_after_build >> 3;
+ sb->bitsize = pos_in_bits_after_build & 7;
+ return bin;
+}
+
+Eterm
+erts_bs_init_writable(Process* p, Eterm sz)
+{
+ Uint bin_size = 1024;
+ Uint heap_need;
+ Binary* bptr;
+ ProcBin* pb;
+ ErlSubBin* sb;
+ Eterm* hp;
+
+ if (is_small(sz)) {
+ Sint s = signed_val(sz);
+ if (s >= 0) {
+ bin_size = (Uint) s;
+ }
+ }
+
+ /*
+ * Allocate heap space.
+ */
+ heap_need = PROC_BIN_SIZE + ERL_SUB_BIN_SIZE;
+ if (p->stop - p->htop < heap_need) {
+ (void) erts_garbage_collect(p, heap_need, NULL, 0);
+ }
+ hp = p->htop;
+
+ /*
+ * Allocate the binary data struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(bin_size);
+ bptr->flags = 0;
+ bptr->orig_size = bin_size;
+ erts_refc_init(&bptr->refc, 1);
+
+ /*
+ * Now allocate the ProcBin on the heap.
+ */
+ pb = (ProcBin *) hp;
+ hp += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = 0;
+ pb->next = MSO(p).mso;
+ MSO(p).mso = pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = PB_IS_WRITABLE | PB_ACTIVE_WRITER;
+ MSO(p).overhead += pb->size / sizeof(Eterm);
+
+ /*
+ * Now allocate the sub binary.
+ */
+ sb = (ErlSubBin *) hp;
+ hp += ERL_SUB_BIN_SIZE;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = 0;
+ sb->offs = 0;
+ sb->bitsize = 0;
+ sb->bitoffs = 0;
+ sb->is_writable = 1;
+ sb->orig = make_binary(pb);
+
+ p->htop = hp;
+ return make_binary(sb);
+}
+
+void
+erts_emasculate_writable_binary(ProcBin* pb)
+{
+ Binary* binp;
+ Uint unused;
+
+ pb->flags = 0;
+ binp = pb->val;
+ ASSERT(binp->orig_size >= pb->size);
+ unused = binp->orig_size - pb->size;
+ /* Our allocators are 8 byte aligned, i.e., shrinking with
+ less than 8 bytes will have no real effect */
+ if (unused >= 8) {
+ Uint new_size = pb->size;
+ binp = erts_bin_realloc(binp, pb->size);
+ binp->orig_size = new_size;
+ pb->val = binp;
+ pb->bytes = (byte *) binp->orig_bytes;
+ }
+}
+
+Uint32
+erts_bs_get_unaligned_uint32(ErlBinMatchBuffer* mb)
+{
+ Uint bytes;
+ Uint bits;
+ Uint offs;
+ byte bigbuf[4];
+ byte* LSB;
+ byte* MSB;
+
+ ASSERT((mb->offset & 7) != 0);
+ ASSERT(mb->size - mb->offset >= 32);
+
+ bytes = 4;
+ bits = 8;
+ offs = 0;
+
+ LSB = bigbuf;
+ MSB = LSB + bytes - 1;
+
+ *MSB = 0;
+ erts_copy_bits(mb->base, mb->offset, 1, MSB, offs, -1, 32);
+ return LSB[0] | (LSB[1]<<8) | (LSB[2]<<16) | (LSB[3]<<24);
+}
+
+void
+erts_align_utf8_bytes(ErlBinMatchBuffer* mb, byte* buf)
+{
+ Uint bits = mb->size - mb->offset;
+
+ /*
+ * Copy up to 4 bytes into the supplied buffer.
+ */
+
+ ASSERT(bits >= 8);
+ if (bits <= 15) {
+ bits = 8;
+ } else if (bits >= 32) {
+ bits = 32;
+ } else if (bits >= 24) {
+ bits = 24;
+ } else {
+ bits = 16;
+ }
+ erts_copy_bits(mb->base, mb->offset, 1, buf, 0, 1, bits);
+}
+
+Eterm
+erts_bs_get_utf8(ErlBinMatchBuffer* mb)
+{
+ Eterm result;
+ Uint remaining_bits;
+ byte* pos;
+ byte tmp_buf[4];
+ Eterm a, b, c;
+
+ /*
+ * Number of trailing bytes for each value of the first byte.
+ */
+ static const byte erts_trailing_bytes_for_utf8[256] = {
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,9,9,9,9,9,9,9,9
+ };
+
+ if ((remaining_bits = mb->size - mb->offset) < 8) {
+ return THE_NON_VALUE;
+ }
+ if (BIT_OFFSET(mb->offset) == 0) {
+ pos = mb->base + BYTE_OFFSET(mb->offset);
+ } else {
+ erts_align_utf8_bytes(mb, tmp_buf);
+ pos = tmp_buf;
+ }
+ result = pos[0];
+ switch (erts_trailing_bytes_for_utf8[result]) {
+ case 0:
+ /* One byte only */
+ mb->offset += 8;
+ break;
+ case 1:
+ /* Two bytes */
+ if (remaining_bits < 16) {
+ return THE_NON_VALUE;
+ }
+ a = pos[1];
+ if ((a & 0xC0) != 0x80) {
+ return THE_NON_VALUE;
+ }
+ result = (result << 6) + a - (Eterm) 0x00003080UL;
+ mb->offset += 16;
+ break;
+ case 2:
+ /* Three bytes */
+ if (remaining_bits < 24) {
+ return THE_NON_VALUE;
+ }
+ a = pos[1];
+ b = pos[2];
+ if ((a & 0xC0) != 0x80 || (b & 0xC0) != 0x80 ||
+ (result == 0xE0 && a < 0xA0)) {
+ return THE_NON_VALUE;
+ }
+ result = (((result << 6) + a) << 6) + b - (Eterm) 0x000E2080UL;
+ if ((0xD800 <= result && result <= 0xDFFF) ||
+ result == 0xFFFE || result == 0xFFFF) {
+ return THE_NON_VALUE;
+ }
+ mb->offset += 24;
+ break;
+ case 3:
+ /* Four bytes */
+ if (remaining_bits < 32) {
+ return THE_NON_VALUE;
+ }
+ a = pos[1];
+ b = pos[2];
+ c = pos[3];
+ if ((a & 0xC0) != 0x80 || (b & 0xC0) != 0x80 ||
+ (c & 0xC0) != 0x80 ||
+ (result == 0xF0 && a < 0x90)) {
+ return THE_NON_VALUE;
+ }
+ result = (((((result << 6) + a) << 6) + b) << 6) +
+ c - (Eterm) 0x03C82080UL;
+ if (result > 0x10FFFF) {
+ return THE_NON_VALUE;
+ }
+ mb->offset += 32;
+ break;
+ default:
+ return THE_NON_VALUE;
+ }
+ return make_small(result);
+}
+
+Eterm
+erts_bs_get_utf16(ErlBinMatchBuffer* mb, Uint flags)
+{
+ Uint bit_offset;
+ Uint num_bits = mb->size - mb->offset;
+ byte* src;
+ byte tmp_buf[4];
+ Uint16 w1;
+ Uint16 w2;
+
+ if (num_bits < 16) {
+ return THE_NON_VALUE;
+ }
+
+ /*
+ * Set up the pointer to the source bytes.
+ */
+ if ((bit_offset = BIT_OFFSET(mb->offset)) == 0) {
+ /* We can access the binary directly because the bytes are aligned. */
+ src = mb->base + BYTE_OFFSET(mb->offset);
+ } else {
+ /*
+ * We must copy the data to a temporary buffer. If possible,
+ * get 4 bytes, otherwise two bytes.
+ */
+ Uint n = num_bits < 32 ? 16 : 32;
+ erts_copy_bits(mb->base, mb->offset, 1, tmp_buf, 0, 1, n);
+ src = tmp_buf;
+ }
+
+ /*
+ * Get the first (and maybe only) 16-bit word. See if we are done.
+ */
+ if (flags & BSF_LITTLE) {
+ w1 = src[0] | (src[1] << 8);
+ } else {
+ w1 = (src[0] << 8) | src[1];
+ }
+ if (w1 < 0xD800 || w1 > 0xDFFF) {
+ if (w1 == 0xFFFE || w1 == 0xFFFF) {
+ return THE_NON_VALUE;
+ }
+ mb->offset += 16;
+ return make_small(w1);
+ } else if (w1 > 0xDBFF) {
+ return THE_NON_VALUE;
+ }
+
+ /*
+ * Get the second 16-bit word and combine it with the first.
+ */
+ if (num_bits < 32) {
+ return THE_NON_VALUE;
+ } else if (flags & BSF_LITTLE) {
+ w2 = src[2] | (src[3] << 8);
+ } else {
+ w2 = (src[2] << 8) | src[3];
+ }
+ if (!(0xDC00 <= w2 && w2 <= 0xDFFF)) {
+ return THE_NON_VALUE;
+ }
+ mb->offset += 32;
+ return make_small((((w1 & 0x3FF) << 10) | (w2 & 0x3FF)) + 0x10000UL);
+}
+
+static byte
+get_bit(byte b, size_t offs)
+{
+ return (b >> (7-offs)) & 1;
+}
+
+int
+erts_cmp_bits(byte* a_ptr, size_t a_offs, byte* b_ptr, size_t b_offs, size_t size)
+{
+ byte a;
+ byte b;
+ byte a_bit;
+ byte b_bit;
+ Uint lshift;
+ Uint rshift;
+ int cmp;
+
+ if (((a_offs | b_offs | size) & 7) == 0) {
+ int byte_size = size >> 3;
+ return sys_memcmp(a_ptr, b_ptr, byte_size);
+ }
+
+ /* Compare bit by bit until a_ptr is aligned on byte boundary */
+ a = *a_ptr++;
+ b = *b_ptr++;
+ while (size > 0) {
+ a_bit = get_bit(a, a_offs);
+ b_bit = get_bit(b, b_offs);
+ if ((cmp = (a_bit-b_bit)) != 0) {
+ return cmp;
+ }
+ size--;
+ b_offs++;
+ if (b_offs == 8) {
+ b_offs = 0;
+ b = *b_ptr++;
+ }
+ a_offs++;
+ if (a_offs == 8) {
+ a_offs = 0;
+ a = *a_ptr++;
+ break;
+ }
+ }
+
+ /* Compare byte by byte as long as at least 8 bits remain */
+ lshift = b_offs;
+ rshift = 8 - lshift;
+ while (size >= 8) {
+ byte b_cmp = (b << lshift);
+ b = *b_ptr++;
+ b_cmp |= b >> rshift;
+ if ((cmp = (a - b_cmp)) != 0) {
+ return cmp;
+ }
+ a = *a_ptr++;
+ size -= 8;
+ }
+
+ /* Compare the remaining bits bit by bit */
+ while (size > 0) {
+ a_bit = get_bit(a, a_offs);
+ b_bit = get_bit(b, b_offs);
+ if ((cmp = (a_bit-b_bit)) != 0) {
+ return cmp;
+ }
+ a_offs++;
+ if (a_offs == 8) {
+ a_offs = 0;
+ a = *a_ptr++;
+ }
+ b_offs++;
+ if (b_offs == 8) {
+ b_offs = 0;
+ b = *b_ptr++;
+ }
+ size--;
+ }
+
+ return 0;
+}
+
+/*
+ * The basic bit copy operation. Copies n bits from the source buffer to
+ * the destination buffer. Depending on the directions, it can reverse the
+ * copied bits.
+ */
+
+
+void
+erts_copy_bits(byte* src, /* Base pointer to source. */
+ size_t soffs, /* Bit offset for source relative to src. */
+ int sdir, /* Direction: 1 (forward) or -1 (backward). */
+ byte* dst, /* Base pointer to destination. */
+ size_t doffs, /* Bit offset for destination relative to dst. */
+ int ddir, /* Direction: 1 (forward) or -1 (backward). */
+ size_t n) /* Number of bits to copy. */
+{
+ Uint lmask;
+ Uint rmask;
+ Uint count;
+ Uint deoffs;
+
+ if (n == 0) {
+ return;
+ }
+
+ src += sdir*BYTE_OFFSET(soffs);
+ dst += ddir*BYTE_OFFSET(doffs);
+ soffs = BIT_OFFSET(soffs);
+ doffs = BIT_OFFSET(doffs);
+ deoffs = BIT_OFFSET(doffs+n);
+ lmask = (doffs) ? MAKE_MASK(8-doffs) : 0;
+ rmask = (deoffs) ? (MAKE_MASK(deoffs)<<(8-deoffs)) : 0;
+
+ /*
+ * Take care of the case that all bits are in the same byte.
+ */
+
+ if (doffs+n < 8) { /* All bits are in the same byte */
+ lmask = (lmask & rmask) ? (lmask & rmask) : (lmask | rmask);
+
+ if (soffs == doffs) {
+ *dst = MASK_BITS(*src,*dst,lmask);
+ } else if (soffs > doffs) {
+ Uint bits = (*src << (soffs-doffs));
+ if (soffs+n > 8) {
+ src += sdir;
+ bits |= (*src >> (8-(soffs-doffs)));
+ }
+ *dst = MASK_BITS(bits,*dst,lmask);
+ } else {
+ *dst = MASK_BITS((*src >> (doffs-soffs)),*dst,lmask);
+ }
+ return; /* We are done! */
+ }
+
+ /*
+ * At this point, we know that the bits are in 2 or more bytes.
+ */
+
+ count = ((lmask) ? (n - (8 - doffs)) : n) >> 3;
+
+ if (soffs == doffs) {
+ /*
+ * The bits are aligned in the same way. We can just copy the bytes
+ * (except for the first and last bytes). Note that the directions
+ * might be different, so we can't just use memcpy().
+ */
+
+ if (lmask) {
+ *dst = MASK_BITS(*src, *dst, lmask);
+ dst += ddir;
+ src += sdir;
+ }
+
+ while (count--) {
+ *dst = *src;
+ dst += ddir;
+ src += sdir;
+ }
+
+ if (rmask) {
+ *dst = MASK_BITS(*src,*dst,rmask);
+ }
+ } else {
+ Uint bits;
+ Uint bits1;
+ Uint rshift;
+ Uint lshift;
+
+ /*
+ * The tricky case. The bits must be shifted into position.
+ */
+
+ if (soffs > doffs) {
+ lshift = (soffs - doffs);
+ rshift = 8 - lshift;
+ bits = *src;
+ if (soffs + n > 8) {
+ src += sdir;
+ }
+ } else {
+ rshift = (doffs - soffs);
+ lshift = 8 - rshift;
+ bits = 0;
+ }
+
+ if (lmask) {
+ bits1 = bits << lshift;
+ bits = *src;
+ src += sdir;
+ bits1 |= (bits >> rshift);
+ *dst = MASK_BITS(bits1,*dst,lmask);
+ dst += ddir;
+ }
+
+ while (count--) {
+ bits1 = bits << lshift;
+ bits = *src;
+ src += sdir;
+ *dst = bits1 | (bits >> rshift);
+ dst += ddir;
+ }
+
+ if (rmask) {
+ bits1 = bits << lshift;
+ if ((rmask << rshift) & 0xff) {
+ bits = *src;
+ bits1 |= (bits >> rshift);
+ }
+ *dst = MASK_BITS(bits1,*dst,rmask);
+ }
+ }
+}
+
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
new file mode 100644
index 0000000000..e3f8e0b679
--- /dev/null
+++ b/erts/emulator/beam/erl_bits.h
@@ -0,0 +1,212 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ERL_BITS_H__
+#define __ERL_BITS_H__
+
+/*
+ * This structure represents a binary to be matched.
+ */
+
+typedef struct erl_bin_match_buffer {
+ Eterm orig; /* Original binary term. */
+ byte* base; /* Current position in binary. */
+ Uint offset; /* Offset in bits. */
+ size_t size; /* Size of binary in bits. */
+} ErlBinMatchBuffer;
+
+struct erl_bits_state {
+ /*
+ * Used for building binaries.
+ */
+ byte *byte_buf_;
+ int byte_buf_len_;
+ /*
+ * Used for building binaries using the new instruction set.
+ */
+ byte* erts_current_bin_; /* Pointer to beginning of current binary. */
+ /*
+ * Offset in bits into the current binary (new instruction set) or
+ * buffer (old instruction set).
+ */
+ Uint erts_bin_offset_;
+ /*
+ * Whether the current binary is writable.
+ */
+ unsigned erts_writable_bin_;
+};
+
+typedef struct erl_bin_match_struct{
+ Eterm thing_word;
+ ErlBinMatchBuffer mb; /* Present match buffer */
+ Eterm save_offset[1]; /* Saved offsets */
+} ErlBinMatchState;
+
+#define ERL_BIN_MATCHSTATE_SIZE(_Max) ((sizeof(ErlBinMatchState) + (_Max)*sizeof(Eterm))/sizeof(Eterm))
+#define HEADER_BIN_MATCHSTATE(_Max) _make_header(ERL_BIN_MATCHSTATE_SIZE((_Max))-1, _TAG_HEADER_BIN_MATCHSTATE)
+#define HEADER_NUM_SLOTS(hdr) (header_arity(hdr)-sizeof(ErlBinMatchState)/sizeof(Eterm)+1)
+
+#define make_matchstate(_Ms) make_boxed((Eterm*)(_Ms))
+#define ms_matchbuffer(_Ms) &(((ErlBinMatchState*)(_Ms - TAG_PRIMARY_BOXED))->mb)
+
+
+#if defined(ERTS_SMP)
+#define ERL_BITS_REENTRANT
+#else
+/* uncomment to test the reentrant API in the non-SMP runtime system */
+/* #define ERL_BITS_REENTRANT */
+#endif
+
+#ifdef ERL_BITS_REENTRANT
+
+/*
+ * Reentrant API with the state passed as a parameter.
+ * (Except when the current Process* already is a parameter.)
+ */
+#ifdef ERTS_SMP
+/* the state resides in the current process' scheduler data */
+#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS
+#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &(P)->scheduler_data->erl_bits_state;}while(0)
+#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &(P)->scheduler_data->erl_bits_state
+#else
+/* reentrant API but with a hidden single global state, for testing only */
+extern struct erl_bits_state ErlBitsState_;
+#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS = &ErlBitsState_
+#define ERL_BITS_RELOAD_STATEP(P) do{}while(0)
+#define ERL_BITS_DEFINE_STATEP(P) ERL_BITS_DECLARE_STATEP
+#endif
+#define ErlBitsState (*EBS)
+
+#define ERL_BITS_PROTO_0 struct erl_bits_state *EBS
+#define ERL_BITS_PROTO_1(PARM1) struct erl_bits_state *EBS, PARM1
+#define ERL_BITS_PROTO_2(PARM1,PARM2) struct erl_bits_state *EBS, PARM1, PARM2
+#define ERL_BITS_PROTO_3(PARM1,PARM2,PARM3) struct erl_bits_state *EBS, PARM1, PARM2, PARM3
+#define ERL_BITS_ARGS_0 EBS
+#define ERL_BITS_ARGS_1(ARG1) EBS, ARG1
+#define ERL_BITS_ARGS_2(ARG1,ARG2) EBS, ARG1, ARG2
+#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) EBS, ARG1, ARG2, ARG3
+
+#else /* ERL_BITS_REENTRANT */
+
+/*
+ * Non-reentrant API with a single global state.
+ */
+extern struct erl_bits_state ErlBitsState;
+#define ERL_BITS_DECLARE_STATEP /*empty*/
+#define ERL_BITS_RELOAD_STATEP(P) do{}while(0)
+#define ERL_BITS_DEFINE_STATEP(P) /*empty*/
+
+#define ERL_BITS_PROTO_0 void
+#define ERL_BITS_PROTO_1(PARM1) PARM1
+#define ERL_BITS_PROTO_2(PARM1,PARM2) PARM1, PARM2
+#define ERL_BITS_PROTO_3(PARM1,PARM2,PARM3) PARM1, PARM2, PARM3
+#define ERL_BITS_ARGS_0 /*empty*/
+#define ERL_BITS_ARGS_1(ARG1) ARG1
+#define ERL_BITS_ARGS_2(ARG1,ARG2) ARG1, ARG2
+#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) ARG1, ARG2, ARG3
+
+#endif /* ERL_BITS_REENTRANT */
+
+#define erts_bin_offset (ErlBitsState.erts_bin_offset_)
+#define erts_current_bin (ErlBitsState.erts_current_bin_)
+#define erts_writable_bin (ErlBitsState.erts_writable_bin_)
+
+#define copy_binary_to_buffer(DstBuffer, DstBufOffset, SrcBuffer, SrcBufferOffset, NumBits) \
+ do { \
+ if (BIT_OFFSET(DstBufOffset) == 0 && (SrcBufferOffset == 0) && \
+ (BIT_OFFSET(NumBits)==0)) { \
+ sys_memcpy(DstBuffer+BYTE_OFFSET(DstBufOffset), \
+ SrcBuffer, NBYTES(NumBits)); \
+ } else { \
+ erts_copy_bits(SrcBuffer, SrcBufferOffset, 1, \
+ (byte*)DstBuffer, DstBufOffset, 1, NumBits); \
+ } \
+ } while (0)
+
+void erts_init_bits(void); /* Initialization once. */
+#ifdef ERTS_SMP
+void erts_bits_init_state(ERL_BITS_PROTO_0);
+void erts_bits_destroy_state(ERL_BITS_PROTO_0);
+#endif
+
+
+/*
+ * NBYTES(x) returns the number of bytes needed to store x bits.
+ */
+
+#define NBYTES(x) (((x) + 7) >> 3)
+#define BYTE_OFFSET(ofs) ((Uint) (ofs) >> 3)
+#define BIT_OFFSET(ofs) ((ofs) & 7)
+
+/*
+ * Return number of Eterm words needed for allocation with HAlloc(),
+ * given a number of bytes.
+ */
+#define WSIZE(n) ((n + sizeof(Eterm) - 1) / sizeof(Eterm))
+
+/*
+ * Binary matching.
+ */
+
+Eterm erts_bs_start_match_2(Process *p, Eterm Bin, Uint Max);
+Eterm erts_bs_get_integer_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
+Eterm erts_bs_get_binary_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
+Eterm erts_bs_get_float_2(Process *p, Uint num_bits, unsigned flags, ErlBinMatchBuffer* mb);
+Eterm erts_bs_get_binary_all_2(Process *p, ErlBinMatchBuffer* mb);
+
+/*
+ * Binary construction, new instruction set.
+ */
+
+int erts_new_bs_put_integer(ERL_BITS_PROTO_3(Eterm Integer, Uint num_bits, unsigned flags));
+int erts_bs_put_utf8(ERL_BITS_PROTO_1(Eterm Integer));
+int erts_bs_put_utf16(ERL_BITS_PROTO_2(Eterm Integer, Uint flags));
+int erts_new_bs_put_binary(ERL_BITS_PROTO_2(Eterm Bin, Uint num_bits));
+int erts_new_bs_put_binary_all(ERL_BITS_PROTO_2(Eterm Bin, Uint unit));
+int erts_new_bs_put_float(Process *c_p, Eterm Float, Uint num_bits, int flags);
+void erts_new_bs_put_string(ERL_BITS_PROTO_2(byte* iptr, Uint num_bytes));
+
+Uint erts_bits_bufs_size(void);
+Uint32 erts_bs_get_unaligned_uint32(ErlBinMatchBuffer* mb);
+void erts_align_utf8_bytes(ErlBinMatchBuffer* mb, byte* buf);
+Eterm erts_bs_get_utf8(ErlBinMatchBuffer* mb);
+Eterm erts_bs_get_utf16(ErlBinMatchBuffer* mb, Uint flags);
+Eterm erts_bs_append(Process* p, Eterm* reg, Uint live, Eterm build_size_term,
+ Uint extra_words, Uint unit);
+Eterm erts_bs_private_append(Process* p, Eterm bin, Eterm sz, Uint unit);
+Eterm erts_bs_init_writable(Process* p, Eterm sz);
+
+/*
+ * Common utilities.
+ */
+void erts_copy_bits(byte* src, size_t soffs, int sdir,
+ byte* dst, size_t doffs,int ddir, size_t n);
+int erts_cmp_bits(byte* a_ptr, size_t a_offs, byte* b_ptr, size_t b_offs, size_t size);
+
+/*
+ * Flags for bs_get_* / bs_put_* / bs_init* instructions.
+ */
+
+#define BSF_ALIGNED 1 /* Field is guaranteed to be byte-aligned. */
+#define BSF_LITTLE 2 /* Field is little-endian (otherwise big-endian). */
+#define BSF_SIGNED 4 /* Field is signed (otherwise unsigned). */
+#define BSF_EXACT 8 /* Size in bs_init is exact. */
+#define BSF_NATIVE 16 /* Native endian. */
+
+#endif /* __ERL_BITS_H__ */
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
new file mode 100644
index 0000000000..b02150008f
--- /dev/null
+++ b/erts/emulator/beam/erl_db.c
@@ -0,0 +1,3631 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * This file contains the bif interface functions and
+ * the handling of the "meta tables" ie the tables of
+ * db tables.
+ */
+
+/*
+#ifdef DEBUG
+#define HARDDEBUG 1
+#endif
+*/
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#define ERTS_WANT_DB_INTERNAL__
+#include "erl_db.h"
+#include "bif.h"
+#include "big.h"
+
+
+erts_smp_atomic_t erts_ets_misc_mem_size;
+
+/*
+** Utility macros
+*/
+
+/* Get a key from any table structure and a tagged object */
+#define TERM_GETKEY(tb, obj) db_getkey((tb)->common.keypos, (obj))
+
+
+/* How safe are we from double-hits or missed objects
+** when iterating without fixation? */
+enum DbIterSafety {
+ ITER_UNSAFE, /* Must fixate to be safe */
+ ITER_SAFE_LOCKED, /* Safe while table is locked, not between trap calls */
+ ITER_SAFE /* No need to fixate at all */
+};
+#ifdef ERTS_SMP
+# define ITERATION_SAFETY(Proc,Tab) \
+ ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
+ : (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED))
+#else
+# define ITERATION_SAFETY(Proc,Tab) \
+ ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) \
+ ? ITER_SAFE : ITER_SAFE_LOCKED)
+#endif
+
+#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
+
+
+/*
+** The main meta table, containing all ets tables.
+*/
+#ifdef ERTS_SMP
+# define META_MAIN_TAB_LOCK_CNT 16
+static union {
+ erts_smp_spinlock_t lck;
+ byte _cache_line_alignment[64];
+}meta_main_tab_locks[META_MAIN_TAB_LOCK_CNT];
+#endif
+static struct {
+ union {
+ DbTable *tb; /* Only directly readable if slot is ALIVE */
+ Uint next_free; /* (index<<2)|1 if slot is FREE */
+ }u;
+} *meta_main_tab;
+
+/* A slot in meta_main_tab can have three states:
+ * FREE : Free to use for new table. Part of linked free-list.
+ * ALIVE: Contains a table
+ * DEAD : Contains a table that is being removed.
+ */
+#define IS_SLOT_FREE(i) (meta_main_tab[(i)].u.next_free & 1)
+#define IS_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free & 2)
+#define IS_SLOT_ALIVE(i) (!(meta_main_tab[(i)].u.next_free & (1|2)))
+#define GET_NEXT_FREE_SLOT(i) (meta_main_tab[(i)].u.next_free >> 2)
+#define SET_NEXT_FREE_SLOT(i,next) (meta_main_tab[(i)].u.next_free = ((next)<<2)|1)
+#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
+#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
+
+static ERTS_INLINE void meta_main_tab_lock(unsigned slot)
+{
+#ifdef ERTS_SMP
+ erts_smp_spin_lock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
+#endif
+}
+
+static ERTS_INLINE void meta_main_tab_unlock(unsigned slot)
+{
+#ifdef ERTS_SMP
+ erts_smp_spin_unlock(&meta_main_tab_locks[slot % META_MAIN_TAB_LOCK_CNT].lck);
+#endif
+}
+
+static erts_smp_spinlock_t meta_main_tab_main_lock;
+static Uint meta_main_tab_first_free; /* Index of first free slot */
+static int meta_main_tab_cnt; /* Number of active tables */
+static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed table id */
+static Uint meta_main_tab_seq_incr;
+static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */
+
+
+
+/*
+** The meta hash table of all NAMED ets tables
+*/
+#ifdef ERTS_SMP
+# define META_NAME_TAB_LOCK_CNT 16
+union {
+ erts_smp_rwmtx_t lck;
+ byte _cache_line_alignment[64];
+}meta_name_tab_rwlocks[META_NAME_TAB_LOCK_CNT];
+#endif
+static struct meta_name_tab_entry {
+ union {
+ Eterm name_atom;
+ Eterm mcnt; /* Length of mvec in multiple tab entry */
+ }u;
+ union {
+ DbTable *tb;
+ struct meta_name_tab_entry* mvec;
+ }pu;
+} *meta_name_tab;
+
+static unsigned meta_name_tab_mask;
+
+static ERTS_INLINE
+struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
+ erts_smp_rwmtx_t** lockp)
+{
+ unsigned bix = atom_val(name) & meta_name_tab_mask;
+ struct meta_name_tab_entry* bucket = &meta_name_tab[bix];
+#ifdef ERTS_SMP
+ *lockp = &meta_name_tab_rwlocks[bix % META_NAME_TAB_LOCK_CNT].lck;
+#endif
+ return bucket;
+}
+
+
+typedef enum {
+ LCK_READ=1, /* read only access */
+ LCK_WRITE=2, /* exclusive table write access */
+ LCK_WRITE_REC=3 /* record write access */
+} db_lock_kind_t;
+
+extern DbTableMethod db_hash;
+extern DbTableMethod db_tree;
+
+int user_requested_db_max_tabs;
+int erts_ets_realloc_always_moves;
+static int db_max_tabs;
+static DbTable *meta_pid_to_tab; /* Pid mapped to owned tables */
+static DbTable *meta_pid_to_fixed_tab; /* Pid mapped to fixed tables */
+static Eterm ms_delete_all;
+static Eterm ms_delete_all_buff[8]; /* To compare with for deletion
+ of all objects */
+
+/*
+** Forward decls, static functions
+*/
+
+static void fix_table_locked(Process* p, DbTable* tb);
+static void unfix_table_locked(Process* p, DbTable* tb, db_lock_kind_t* kind);
+static void set_heir(Process* me, DbTable* tb, Eterm heir, Eterm heir_data);
+static void free_heir_data(DbTable*);
+static void free_fixations_locked(DbTable *tb);
+
+static int free_table_cont(Process *p,
+ DbTable *tb,
+ int first,
+ int clean_meta_tab);
+static void print_table(int to, void *to_arg, int show, DbTable* tb);
+static BIF_RETTYPE ets_select_delete_1(Process *p, Eterm a1);
+static BIF_RETTYPE ets_select_count_1(Process *p, Eterm a1);
+static BIF_RETTYPE ets_select_trap_1(Process *p, Eterm a1);
+static BIF_RETTYPE ets_delete_trap(Process *p, Eterm a1);
+static Eterm table_info(Process* p, DbTable* tb, Eterm What);
+
+/*
+ * Exported global
+ */
+Export ets_select_delete_continue_exp;
+Export ets_select_count_continue_exp;
+Export ets_select_continue_exp;
+
+/*
+ * Static traps
+ */
+static Export ets_delete_continue_exp;
+
+static ERTS_INLINE DbTable* db_ref(DbTable* tb)
+{
+ if (tb != NULL) {
+ erts_refc_inc(&tb->common.ref, 2);
+ }
+ return tb;
+}
+
+static ERTS_INLINE DbTable* db_unref(DbTable* tb)
+{
+ if (!erts_refc_dectest(&tb->common.ref, 0)) {
+#ifdef HARDDEBUG
+ if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) {
+ erts_fprintf(stderr, "ets: db_unref memory remain=%ld fix=%x\n",
+ erts_smp_atomic_read(&tb->common.memory_size)-sizeof(DbTable),
+ tb->common.fixations);
+ }
+ erts_fprintf(stderr, "ets: db_unref(%T) deleted!!!\r\n",
+ tb->common.id);
+
+ erts_fprintf(stderr, "ets: db_unref: meta_pid_to_tab common.memory_size = %ld\n",
+ erts_smp_atomic_read(&meta_pid_to_tab->common.memory_size));
+ print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab);
+
+
+ erts_fprintf(stderr, "ets: db_unref: meta_pid_to_fixed_tab common.memory_size = %ld\n",
+ erts_smp_atomic_read(&meta_pid_to_fixed_tab->common.memory_size));
+ print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab);
+
+#endif
+#ifdef ERTS_SMP
+ erts_smp_rwmtx_destroy(&tb->common.rwlock);
+ erts_smp_mtx_destroy(&tb->common.fixlock);
+#endif
+ ASSERT(is_immed(tb->common.heir_data));
+ erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable));
+ return NULL;
+ }
+ return tb;
+}
+
+static ERTS_INLINE void db_init_lock(DbTable* tb, char *rwname, char* fixname)
+{
+ erts_refc_init(&tb->common.ref, 1);
+ erts_refc_init(&tb->common.fixref, 0);
+#ifdef ERTS_SMP
+# ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_smp_rwmtx_init_x(&tb->common.rwlock, rwname, tb->common.the_name);
+ erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
+# else
+ erts_smp_rwmtx_init(&tb->common.rwlock, rwname);
+ erts_smp_mtx_init(&tb->common.fixlock, fixname);
+# endif
+ tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
+#endif
+}
+
+static ERTS_INLINE void db_lock_take_over_ref(DbTable* tb, db_lock_kind_t kind)
+{
+#ifdef ERTS_SMP
+ ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
+ if (tb->common.type & DB_FINE_LOCKED) {
+ if (kind == LCK_WRITE) {
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ tb->common.is_thread_safe = 1;
+ } else {
+ erts_smp_rwmtx_rlock(&tb->common.rwlock);
+ ASSERT(!tb->common.is_thread_safe);
+ }
+ }
+ else
+ {
+ switch (kind) {
+ case LCK_WRITE:
+ case LCK_WRITE_REC:
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ break;
+ default:
+ erts_smp_rwmtx_rlock(&tb->common.rwlock);
+ }
+ ASSERT(tb->common.is_thread_safe);
+ }
+#endif
+}
+
+static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
+{
+ (void) db_ref(tb);
+#ifdef ERTS_SMP
+ db_lock_take_over_ref(tb, kind);
+#endif
+}
+
+static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
+{
+#ifdef ERTS_SMP
+ ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
+
+ if (tb->common.type & DB_FINE_LOCKED) {
+ if (tb->common.is_thread_safe) {
+ ASSERT(kind == LCK_WRITE);
+ tb->common.is_thread_safe = 0;
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ }
+ else {
+ ASSERT(kind != LCK_WRITE);
+ erts_smp_rwmtx_runlock(&tb->common.rwlock);
+ }
+ }
+ else {
+ ASSERT(tb->common.is_thread_safe);
+ switch (kind) {
+ case LCK_WRITE:
+ case LCK_WRITE_REC:
+ erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ break;
+ default:
+ erts_smp_rwmtx_runlock(&tb->common.rwlock);
+ }
+ }
+#endif
+ (void) db_unref(tb); /* May delete table... */
+}
+
+
+static ERTS_INLINE void db_meta_lock(DbTable* tb, db_lock_kind_t kind)
+{
+ ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
+ ASSERT(kind != LCK_WRITE);
+ /* As long as we only lock for READ we don't have to lock at all. */
+}
+
+static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
+{
+ ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
+ ASSERT(kind != LCK_WRITE);
+}
+
+static ERTS_INLINE
+DbTable* db_get_table(Process *p,
+ Eterm id,
+ int what,
+ db_lock_kind_t kind)
+{
+ DbTable *tb = NULL;
+
+ if (is_small(id)) {
+ Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
+ meta_main_tab_lock(slot);
+ if (slot < db_max_tabs && IS_SLOT_ALIVE(slot)) {
+ /* SMP: inc to prevent race, between unlock of meta_main_tab_lock
+ * and the table locking outside the meta_main_tab_lock
+ */
+ tb = db_ref(meta_main_tab[slot].u.tb);
+ }
+ meta_main_tab_unlock(slot);
+ }
+ else if (is_atom(id)) {
+ erts_smp_rwmtx_t* rwlock;
+ struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&rwlock);
+ erts_smp_rwmtx_rlock(rwlock);
+ if (bucket->pu.tb != NULL) {
+ if (is_atom(bucket->u.name_atom)) { /* single */
+ if (bucket->u.name_atom == id) {
+ tb = db_ref(bucket->pu.tb);
+ }
+ }
+ else { /* multi */
+ Uint cnt = unsigned_val(bucket->u.mcnt);
+ Uint i;
+ for (i=0; i<cnt; i++) {
+ if (bucket->pu.mvec[i].u.name_atom == id) {
+ tb = db_ref(bucket->pu.mvec[i].pu.tb);
+ break;
+ }
+ }
+ }
+ }
+ erts_smp_rwmtx_runlock(rwlock);
+ }
+ if (tb) {
+ db_lock_take_over_ref(tb, kind);
+ if (tb->common.id == id && ((tb->common.status & what) != 0 ||
+ p->id == tb->common.owner)) {
+ return tb;
+ }
+ db_unlock(tb, kind);
+ }
+ return NULL;
+}
+
+/* Requires meta_main_tab_locks[slot] locked.
+*/
+static ERTS_INLINE void free_slot(int slot)
+{
+ ASSERT(!IS_SLOT_FREE(slot));
+ erts_smp_spin_lock(&meta_main_tab_main_lock);
+ SET_NEXT_FREE_SLOT(slot,meta_main_tab_first_free);
+ meta_main_tab_first_free = slot;
+ meta_main_tab_cnt--;
+ erts_smp_spin_unlock(&meta_main_tab_main_lock);
+}
+
+static int insert_named_tab(Eterm name_atom, DbTable* tb)
+{
+ int ret = 0;
+ erts_smp_rwmtx_t* rwlock;
+ struct meta_name_tab_entry* new_entry;
+ struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
+ &rwlock);
+
+ erts_smp_rwmtx_rwlock(rwlock);
+
+ if (bucket->pu.tb == NULL) { /* empty */
+ new_entry = bucket;
+ }
+ else {
+ struct meta_name_tab_entry* entries;
+ Uint cnt;
+ if (is_atom(bucket->u.name_atom)) { /* single */
+ size_t size;
+ if (bucket->u.name_atom == name_atom) {
+ goto done;
+ }
+ cnt = 2;
+ size = sizeof(struct meta_name_tab_entry)*cnt;
+ entries = erts_db_alloc_nt(ERTS_ALC_T_DB_NTAB_ENT, size);
+ ERTS_ETS_MISC_MEM_ADD(size);
+ new_entry = &entries[0];
+ entries[1] = *bucket;
+ }
+ else { /* multi */
+ size_t size, old_size;
+ Uint i;
+ cnt = unsigned_val(bucket->u.mcnt);
+ for (i=0; i<cnt; i++) {
+ if (bucket->pu.mvec[i].u.name_atom == name_atom) {
+ goto done;
+ }
+ }
+ old_size = sizeof(struct meta_name_tab_entry)*cnt;
+ size = sizeof(struct meta_name_tab_entry)*(cnt+1);
+ entries = erts_db_realloc_nt(ERTS_ALC_T_DB_NTAB_ENT,
+ bucket->pu.mvec,
+ old_size,
+ size);
+ ERTS_ETS_MISC_MEM_ADD(size-old_size);
+ new_entry = &entries[cnt];
+ cnt++;
+ }
+ bucket->pu.mvec = entries;
+ bucket->u.mcnt = make_small(cnt);
+ }
+ new_entry->pu.tb = tb;
+ new_entry->u.name_atom = name_atom;
+ ret = 1; /* Ok */
+
+done:
+ erts_smp_rwmtx_rwunlock(rwlock);
+ return ret;
+}
+
+static int remove_named_tab(Eterm name_atom)
+{
+ int ret = 0;
+ erts_smp_rwmtx_t* rwlock;
+ struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
+ &rwlock);
+ erts_smp_rwmtx_rwlock(rwlock);
+ if (bucket->pu.tb == NULL) {
+ goto done;
+ }
+ else if (is_atom(bucket->u.name_atom)) { /* single */
+ if (bucket->u.name_atom != name_atom) {
+ goto done;
+ }
+ bucket->pu.tb = NULL;
+ }
+ else { /* multi */
+ Uint cnt = unsigned_val(bucket->u.mcnt);
+ Uint i = 0;
+ for (;;) {
+ if (bucket->pu.mvec[i].u.name_atom == name_atom) {
+ break;
+ }
+ if (++i >= cnt) {
+ goto done;
+ }
+ }
+ if (cnt == 2) { /* multi -> single */
+ size_t size;
+ struct meta_name_tab_entry* entries = bucket->pu.mvec;
+ *bucket = entries[1-i];
+ size = sizeof(struct meta_name_tab_entry)*cnt;
+ erts_db_free_nt(ERTS_ALC_T_DB_NTAB_ENT, entries, size);
+ ERTS_ETS_MISC_MEM_ADD(-size);
+ ASSERT(is_atom(bucket->u.name_atom));
+ }
+ else {
+ size_t size, old_size;
+ ASSERT(cnt > 2);
+ bucket->u.mcnt = make_small(--cnt);
+ if (i != cnt) {
+ /* reposition last one before realloc destroys it */
+ bucket->pu.mvec[i] = bucket->pu.mvec[cnt];
+ }
+ old_size = sizeof(struct meta_name_tab_entry)*(cnt+1);
+ size = sizeof(struct meta_name_tab_entry)*cnt;
+ bucket->pu.mvec = erts_db_realloc_nt(ERTS_ALC_T_DB_NTAB_ENT,
+ bucket->pu.mvec,
+ old_size,
+ size);
+ ERTS_ETS_MISC_MEM_ADD(size - old_size);
+
+ }
+ }
+ ret = 1; /* Ok */
+
+done:
+ erts_smp_rwmtx_rwunlock(rwlock);
+ return ret;
+}
+
+/* Do a fast fixation of a hash table.
+** Must be matched by a local unfix before releasing table lock.
+*/
+static ERTS_INLINE void local_fix_table(DbTable* tb)
+{
+ erts_refc_inc(&tb->common.fixref, 1);
+}
+static ERTS_INLINE void local_unfix_table(DbTable* tb)
+{
+ if (erts_refc_dectest(&tb->common.fixref, 0) == 0) {
+ ASSERT(IS_HASH_TABLE(tb->common.status));
+ db_unfix_table_hash(&(tb->hash));
+ }
+}
+
+
+/*
+ * BIFs.
+ */
+
+BIF_RETTYPE ets_safe_fixtable_2(BIF_ALIST_2)
+{
+ DbTable *tb;
+ db_lock_kind_t kind;
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,
+ "ets:safe_fixtable(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
+ BIF_ARG_1, BIF_ARG_2, BIF_P->id,
+ BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+#endif
+ kind = (BIF_ARG_2 == am_true) ? LCK_READ : LCK_WRITE_REC;
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, kind)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (BIF_ARG_2 == am_true) {
+ fix_table_locked(BIF_P, tb);
+ }
+ else if (BIF_ARG_2 == am_false) {
+ if (IS_FIXED(tb)) {
+ unfix_table_locked(BIF_P, tb, &kind);
+ }
+ }
+ else {
+ db_unlock(tb, kind);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ db_unlock(tb, kind);
+ BIF_RET(am_true);
+}
+
+
+/*
+** Returns the first Key in a table
+*/
+BIF_RETTYPE ets_first_1(BIF_ALIST_1)
+{
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
+
+ if (!tb) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ cret = tb->common.meth->db_first(BIF_P, tb, &ret);
+
+ db_unlock(tb, LCK_READ);
+
+ if (cret != DB_ERROR_NONE) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(ret);
+}
+
+/*
+** The next BIF, given a key, return the "next" key
+*/
+BIF_RETTYPE ets_next_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
+
+ if (!tb) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ cret = tb->common.meth->db_next(BIF_P, tb, BIF_ARG_2, &ret);
+
+ db_unlock(tb, LCK_READ);
+
+ if (cret != DB_ERROR_NONE) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(ret);
+}
+
+/*
+** Returns the last Key in a table
+*/
+BIF_RETTYPE ets_last_1(BIF_ALIST_1)
+{
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
+
+ if (!tb) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ cret = tb->common.meth->db_last(BIF_P, tb, &ret);
+
+ db_unlock(tb, LCK_READ);
+
+ if (cret != DB_ERROR_NONE) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(ret);
+}
+
+/*
+** The prev BIF, given a key, return the "previous" key
+*/
+BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
+
+ if (!tb) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ cret = tb->common.meth->db_prev(BIF_P,tb,BIF_ARG_2,&ret);
+
+ db_unlock(tb, LCK_READ);
+
+ if (cret != DB_ERROR_NONE) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(ret);
+}
+
+/*
+** update_element(Tab, Key, {Pos, Value})
+** update_element(Tab, Key, [{Pos, Value}])
+*/
+BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
+{
+ DbTable* tb;
+ int cret = DB_ERROR_BADITEM;
+ Eterm list;
+ Eterm iter;
+ Eterm cell[2];
+ DbUpdateHandle handle;
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
+ goto bail_out;
+ }
+ if (is_tuple(BIF_ARG_3)) {
+ list = CONS(cell, BIF_ARG_3, NIL);
+ }
+ else {
+ list = BIF_ARG_3;
+ }
+
+ if (!tb->common.meth->db_lookup_dbterm(tb, BIF_ARG_2, &handle)) {
+ cret = DB_ERROR_BADKEY;
+ goto bail_out;
+ }
+
+ /* First verify that list is ok to avoid nasty rollback scenarios
+ */
+ for (iter=list ; is_not_nil(iter); iter = CDR(list_val(iter))) {
+ Eterm pv;
+ Eterm* pvp;
+ Sint position;
+
+ if (is_not_list(iter)) {
+ goto finalize;
+ }
+ pv = CAR(list_val(iter)); /* {Pos,Value} */
+ if (is_not_tuple(pv)) {
+ goto finalize;
+ }
+ pvp = tuple_val(pv);
+ if (arityval(*pvp) != 2 || !is_small(pvp[1])) {
+ goto finalize;
+ }
+ position = signed_val(pvp[1]);
+ if (position < 1 || position == tb->common.keypos ||
+ position > arityval(handle.dbterm->tpl[0])) {
+ goto finalize;
+ }
+ }
+ /* The point of no return, no failures from here on.
+ */
+ cret = DB_ERROR_NONE;
+
+ for (iter=list ; is_not_nil(iter); iter = CDR(list_val(iter))) {
+ Eterm* pvp = tuple_val(CAR(list_val(iter))); /* {Pos,Value} */
+ db_do_update_element(&handle, signed_val(pvp[1]), pvp[2]);
+ }
+
+finalize:
+ tb->common.meth->db_finalize_dbterm(&handle);
+
+bail_out:
+ db_unlock(tb, LCK_WRITE_REC);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(am_true);
+ case DB_ERROR_BADKEY:
+ BIF_RET(am_false);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ break;
+ }
+}
+
+/*
+** update_counter(Tab, Key, Incr)
+** update_counter(Tab, Key, {Upop})
+** update_counter(Tab, Key, [{Upop}])
+** Upop = {Pos,Incr} | {Pos,Incr,Threshold,WarpTo}
+** Returns new value(s) (integer or [integer])
+*/
+BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
+{
+ DbTable* tb;
+ int cret = DB_ERROR_BADITEM;
+ Eterm upop_list;
+ int list_size;
+ Eterm ret; /* int or [int] */
+ Eterm* ret_list_currp = NULL;
+ Eterm* ret_list_prevp = NULL;
+ Eterm iter;
+ Eterm cell[2];
+ Eterm tuple[3];
+ DbUpdateHandle handle;
+ Uint halloc_size = 0; /* overestimated heap usage */
+ Eterm* htop; /* actual heap usage */
+ Eterm* hstart;
+ Eterm* hend;
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
+ goto bail_out;
+ }
+ if (is_integer(BIF_ARG_3)) { /* Incr */
+ upop_list = CONS(cell, TUPLE2(tuple, make_small(tb->common.keypos+1),
+ BIF_ARG_3), NIL);
+ }
+ else if (is_tuple(BIF_ARG_3)) { /* {Upop} */
+ upop_list = CONS(cell, BIF_ARG_3, NIL);
+ }
+ else { /* [{Upop}] (probably) */
+ upop_list = BIF_ARG_3;
+ ret_list_prevp = &ret;
+ }
+
+ if (!tb->common.meth->db_lookup_dbterm(tb, BIF_ARG_2, &handle)) {
+ goto bail_out; /* key not found */
+ }
+
+ /* First verify that list is ok to avoid nasty rollback scenarios
+ */
+ list_size = 0;
+ for (iter=upop_list ; is_not_nil(iter); iter = CDR(list_val(iter)),
+ list_size += 2) {
+ Eterm upop;
+ Eterm* tpl;
+ Sint position;
+ Eterm incr, warp, oldcnt;
+
+ if (is_not_list(iter)) {
+ goto finalize;
+ }
+ upop = CAR(list_val(iter));
+ if (is_not_tuple(upop)) {
+ goto finalize;
+ }
+ tpl = tuple_val(upop);
+ switch (arityval(*tpl)) {
+ case 4: /* threshold specified */
+ if (is_not_integer(tpl[3])) {
+ goto finalize;
+ }
+ warp = tpl[4];
+ if (is_big(warp)) {
+ halloc_size += BIG_NEED_SIZE(big_arity(warp));
+ }
+ else if (is_not_small(warp)) {
+ goto finalize;
+ }
+ /* Fall through */
+ case 2:
+ if (!is_small(tpl[1])) {
+ goto finalize;
+ }
+ incr = tpl[2];
+ if (is_big(incr)) {
+ halloc_size += BIG_NEED_SIZE(big_arity(incr));
+ }
+ else if (is_not_small(incr)) {
+ goto finalize;
+ }
+ position = signed_val(tpl[1]);
+ if (position < 1 || position == tb->common.keypos ||
+ position > arityval(handle.dbterm->tpl[0])) {
+ goto finalize;
+ }
+ oldcnt = handle.dbterm->tpl[position];
+ if (is_big(oldcnt)) {
+ halloc_size += BIG_NEED_SIZE(big_arity(oldcnt));
+ }
+ else if (is_not_small(oldcnt)) {
+ goto finalize;
+ }
+ break;
+ default:
+ goto finalize;
+ }
+ halloc_size += 2; /* worst growth case: small(0)+small(0)=big(2) */
+ }
+
+ /* The point of no return, no failures from here on.
+ */
+ cret = DB_ERROR_NONE;
+
+ if (ret_list_prevp) { /* Prepare to return a list */
+ ret = NIL;
+ halloc_size += list_size;
+ hstart = HAlloc(BIF_P, halloc_size);
+ ret_list_currp = hstart;
+ htop = hstart + list_size;
+ hend = hstart + halloc_size;
+ }
+ else {
+ hstart = htop = HAlloc(BIF_P, halloc_size);
+ }
+ hend = hstart + halloc_size;
+
+ for (iter=upop_list ; is_not_nil(iter); iter = CDR(list_val(iter))) {
+
+ Eterm* tpl = tuple_val(CAR(list_val(iter)));
+ Sint position = signed_val(tpl[1]);
+ Eterm incr = tpl[2];
+ Eterm oldcnt = handle.dbterm->tpl[position];
+ Eterm newcnt = db_add_counter(&htop, oldcnt, incr);
+
+ if (newcnt == NIL) {
+ cret = DB_ERROR_SYSRES; /* Can only happen if BIG_ARITY_MAX */
+ ret = NIL; /* is reached, ie should not happen */
+ htop = hstart;
+ break;
+ }
+ ASSERT(is_integer(newcnt));
+
+ if (arityval(*tpl) == 4) { /* Maybe warp it */
+ Eterm threshold = tpl[3];
+ if ((cmp(incr,make_small(0)) < 0) ? /* negative increment? */
+ (cmp(newcnt,threshold) < 0) : /* if negative, check if below */
+ (cmp(newcnt,threshold) > 0)) { /* else check if above threshold */
+
+ newcnt = tpl[4];
+ }
+ }
+
+ db_do_update_element(&handle,position,newcnt);
+
+ if (ret_list_prevp) {
+ *ret_list_prevp = CONS(ret_list_currp,newcnt,NIL);
+ ret_list_prevp = &CDR(ret_list_currp);
+ ret_list_currp += 2;
+ }
+ else {
+ ret = newcnt;
+ break;
+ }
+ }
+
+ ASSERT(is_integer(ret) || is_nil(ret) ||
+ (is_list(ret) && (list_val(ret)+list_size)==ret_list_currp));
+ ASSERT(htop <= hend);
+
+ HRelease(BIF_P,hend,htop);
+
+finalize:
+ tb->common.meth->db_finalize_dbterm(&handle);
+
+bail_out:
+ db_unlock(tb, LCK_WRITE_REC);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(ret);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ break;
+ }
+}
+
+/*
+** The put BIF
+*/
+BIF_RETTYPE ets_insert_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ int cret = DB_ERROR_NONE;
+ Eterm lst;
+ DbTableMethod* meth;
+ db_lock_kind_t kind;
+
+ CHECK_TABLES();
+
+ /* Write lock table if more than one object to keep atomicy */
+ kind = ((is_list(BIF_ARG_2) && CDR(list_val(BIF_ARG_2)) != NIL)
+ ? LCK_WRITE : LCK_WRITE_REC);
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (BIF_ARG_2 == NIL) {
+ db_unlock(tb, kind);
+ BIF_RET(am_true);
+ }
+ meth = tb->common.meth;
+ if (is_list(BIF_ARG_2)) {
+ for (lst = BIF_ARG_2; is_list(lst); lst = CDR(list_val(lst))) {
+ if (is_not_tuple(CAR(list_val(lst))) ||
+ (arityval(*tuple_val(CAR(list_val(lst)))) < tb->common.keypos)) {
+ goto badarg;
+ }
+ }
+ if (lst != NIL) {
+ goto badarg;
+ }
+ for (lst = BIF_ARG_2; is_list(lst); lst = CDR(list_val(lst))) {
+ cret = meth->db_put(tb, CAR(list_val(lst)), 0);
+ if (cret != DB_ERROR_NONE)
+ break;
+ }
+ } else {
+ if (is_not_tuple(BIF_ARG_2) ||
+ (arityval(*tuple_val(BIF_ARG_2)) < tb->common.keypos)) {
+ goto badarg;
+ }
+ cret = meth->db_put(tb, BIF_ARG_2, 0);
+ }
+
+ db_unlock(tb, kind);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(am_true);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ badarg:
+ db_unlock(tb, kind);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
+/*
+** The put-if-not-already-there BIF...
+*/
+BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ int cret = DB_ERROR_NONE;
+ Eterm ret = am_true;
+ Eterm obj;
+ db_lock_kind_t kind;
+
+ CHECK_TABLES();
+
+ if (is_list(BIF_ARG_2)) {
+ if (CDR(list_val(BIF_ARG_2)) != NIL) {
+ Eterm lst;
+ Eterm lookup_ret;
+ DbTableMethod* meth;
+
+ /* More than one object, use LCK_WRITE to keep atomicy */
+ kind = LCK_WRITE;
+ tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind);
+ if (tb == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ meth = tb->common.meth;
+ for (lst = BIF_ARG_2; is_list(lst); lst = CDR(list_val(lst))) {
+ if (is_not_tuple(CAR(list_val(lst)))
+ || (arityval(*tuple_val(CAR(list_val(lst))))
+ < tb->common.keypos)) {
+ goto badarg;
+ }
+ }
+ if (lst != NIL) {
+ goto badarg;
+ }
+ for (lst = BIF_ARG_2; is_list(lst); lst = CDR(list_val(lst))) {
+ cret = meth->db_member(tb, TERM_GETKEY(tb,CAR(list_val(lst))),
+ &lookup_ret);
+ if ((cret != DB_ERROR_NONE) || (lookup_ret != am_false)) {
+ ret = am_false;
+ goto done;
+ }
+ }
+
+ for (lst = BIF_ARG_2; is_list(lst); lst = CDR(list_val(lst))) {
+ cret = meth->db_put(tb,CAR(list_val(lst)), 0);
+ if (cret != DB_ERROR_NONE)
+ break;
+ }
+ goto done;
+ }
+ obj = CAR(list_val(BIF_ARG_2));
+ }
+ else {
+ obj = BIF_ARG_2;
+ }
+ /* Only one object (or NIL)
+ */
+ kind = LCK_WRITE_REC;
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (BIF_ARG_2 == NIL) {
+ db_unlock(tb, kind);
+ BIF_RET(am_true);
+ }
+ if (is_not_tuple(obj)
+ || (arityval(*tuple_val(obj)) < tb->common.keypos)) {
+ goto badarg;
+ }
+ cret = tb->common.meth->db_put(tb, obj,
+ 1); /* key_clash_fail */
+
+done:
+ db_unlock(tb, kind);
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(ret);
+ case DB_ERROR_BADKEY:
+ BIF_RET(am_false);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ badarg:
+ db_unlock(tb, kind);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/*
+** Rename a (possibly) named table
+*/
+
+BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ Eterm ret;
+
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,
+ "ets:rename(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
+ BIF_ARG_1, BIF_ARG_2, BIF_P->id,
+ BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+#endif
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (is_not_atom(BIF_ARG_2)) {
+ goto badarg;
+ }
+
+ if (is_not_atom(tb->common.id)) { /* Not a named table */
+ tb->common.the_name = BIF_ARG_2;
+ goto done;
+ }
+
+ if (!insert_named_tab(BIF_ARG_2,tb)) {
+ goto badarg;
+ }
+ if (!remove_named_tab(tb->common.id)) {
+ erl_exit(1,"Could not find named tab %s", tb->common.id);
+ }
+
+ tb->common.id = tb->common.the_name = BIF_ARG_2;
+
+ done:
+ ret = tb->common.id;
+ db_unlock(tb, LCK_WRITE);
+ BIF_RET(ret);
+ badarg:
+ db_unlock(tb, LCK_WRITE);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
+/*
+** The create table BIF
+** Args: (Name, Properties)
+*/
+
+BIF_RETTYPE ets_new_2(BIF_ALIST_2)
+{
+ DbTable* tb = NULL;
+ int slot;
+ Eterm list;
+ Eterm val;
+ Eterm ret;
+ Eterm heir;
+ Eterm heir_data;
+ Uint32 status;
+ Sint keypos;
+ int is_named, is_fine_locked;
+ int cret;
+ Eterm meta_tuple[3];
+ DbTableMethod* meth;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (is_not_nil(BIF_ARG_2) && is_not_list(BIF_ARG_2)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ status = DB_NORMAL | DB_SET | DB_PROTECTED;
+ keypos = 1;
+ is_named = 0;
+ is_fine_locked = 0;
+ heir = am_none;
+ heir_data = am_undefined;
+
+ list = BIF_ARG_2;
+ while(is_list(list)) {
+ val = CAR(list_val(list));
+ if (val == am_bag) {
+ status |= DB_BAG;
+ status &= ~(DB_SET | DB_DUPLICATE_BAG | DB_ORDERED_SET);
+ }
+ else if (val == am_duplicate_bag) {
+ status |= DB_DUPLICATE_BAG;
+ status &= ~(DB_SET | DB_BAG | DB_ORDERED_SET);
+ }
+ else if (val == am_ordered_set) {
+ status |= DB_ORDERED_SET;
+ status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG);
+ }
+ /*TT*/
+ else if (is_tuple(val)) {
+ Eterm *tp = tuple_val(val);
+ if (arityval(tp[0]) == 2) {
+ if (tp[1] == am_keypos
+ && is_small(tp[2]) && (signed_val(tp[2]) > 0)) {
+ keypos = signed_val(tp[2]);
+ }
+ else if (tp[1] == am_write_concurrency) {
+ if (tp[2] == am_true) {
+ is_fine_locked = 1;
+ } else if (tp[2] == am_false) {
+ is_fine_locked = 0;
+ } else break;
+ }
+ else if (tp[1] == am_heir && tp[2] == am_none) {
+ heir = am_none;
+ heir_data = am_undefined;
+ }
+ else break;
+ }
+ else if (arityval(tp[0]) == 3 && tp[1] == am_heir
+ && is_internal_pid(tp[2])) {
+ heir = tp[2];
+ heir_data = tp[3];
+ }
+ else break;
+ }
+ else if (val == am_public) {
+ status |= DB_PUBLIC;
+ status &= ~(DB_PROTECTED|DB_PRIVATE);
+ }
+ else if (val == am_private) {
+ status |= DB_PRIVATE;
+ status &= ~(DB_PROTECTED|DB_PUBLIC);
+ }
+ else if (val == am_named_table) {
+ is_named = 1;
+ }
+ else if (val == am_set || val == am_protected)
+ ;
+ else break;
+
+ list = CDR(list_val(list));
+ }
+ if (is_not_nil(list)) { /* bad opt or not a well formed list */
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (IS_HASH_TABLE(status)) {
+ meth = &db_hash;
+ #ifdef ERTS_SMP
+ if (is_fine_locked && !(status & DB_PRIVATE)) {
+ status |= DB_FINE_LOCKED;
+ }
+ #endif
+ }
+ else if (IS_TREE_TABLE(status)) {
+ meth = &db_tree;
+ }
+ else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ /* we create table outside any table lock
+ * and take the unusal cost of destroy table if it
+ * fails to find a slot
+ */
+ {
+ DbTable init_tb;
+
+ erts_smp_atomic_init(&init_tb.common.memory_size, 0);
+ tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
+ &init_tb, sizeof(DbTable));
+ ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
+ erts_smp_atomic_init(&tb->common.memory_size,
+ erts_smp_atomic_read(&init_tb.common.memory_size));
+ }
+
+ tb->common.meth = meth;
+ tb->common.the_name = BIF_ARG_1;
+ tb->common.status = status;
+#ifdef ERTS_SMP
+ tb->common.type = status & ERTS_ETS_TABLE_TYPES;
+ /* Note, 'type' is *read only* from now on... */
+#endif
+ db_init_lock(tb, "db_tab", "db_tab_fix");
+ tb->common.keypos = keypos;
+ tb->common.owner = BIF_P->id;
+ set_heir(BIF_P, tb, heir, heir_data);
+
+ erts_smp_atomic_init(&tb->common.nitems, 0);
+
+ tb->common.fixations = NULL;
+
+ cret = meth->db_create(BIF_P, tb);
+ ASSERT(cret == DB_ERROR_NONE);
+
+ erts_smp_spin_lock(&meta_main_tab_main_lock);
+
+ if (meta_main_tab_cnt >= db_max_tabs) {
+ erts_smp_spin_unlock(&meta_main_tab_main_lock);
+ erts_send_error_to_logger_str(BIF_P->group_leader,
+ "** Too many db tables **\n");
+ free_heir_data(tb);
+ tb->common.meth->db_free_table(tb);
+ erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable));
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ }
+
+ slot = meta_main_tab_first_free;
+ ASSERT(slot>=0 && slot<db_max_tabs);
+ meta_main_tab_first_free = GET_NEXT_FREE_SLOT(slot);
+ meta_main_tab_cnt++;
+
+ if (is_named) {
+ ret = BIF_ARG_1;
+ }
+ else {
+ ret = make_small(slot | meta_main_tab_seq_cnt);
+ meta_main_tab_seq_cnt += meta_main_tab_seq_incr;
+ ASSERT((unsigned_val(ret) & meta_main_tab_slot_mask) == slot);
+ }
+ erts_smp_spin_unlock(&meta_main_tab_main_lock);
+
+ tb->common.id = ret;
+ tb->common.slot = slot; /* store slot for erase */
+
+ meta_main_tab_lock(slot);
+ meta_main_tab[slot].u.tb = tb;
+ ASSERT(IS_SLOT_ALIVE(slot));
+ meta_main_tab_unlock(slot);
+
+ if (is_named && !insert_named_tab(BIF_ARG_1, tb)) {
+ meta_main_tab_lock(slot);
+ free_slot(slot);
+ meta_main_tab_unlock(slot);
+
+ db_lock_take_over_ref(tb,LCK_WRITE);
+ free_heir_data(tb);
+ tb->common.meth->db_free_table(tb);
+ db_unlock(tb,LCK_WRITE);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ BIF_P->flags |= F_USING_DB; /* So we can remove tb if p dies */
+
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,
+ "ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n",
+ BIF_ARG_1, BIF_ARG_2, ret, BIF_P->id,
+ BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+ erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n",
+ erts_smp_atomic_read(&meta_pid_to_tab->common.memory_size));
+ erts_fprintf(stderr, "ets: new: meta_pid_to_fixed_tab common.memory_size = %ld\n",
+ erts_smp_atomic_read(&meta_pid_to_fixed_tab->common.memory_size));
+#endif
+
+ db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
+ if (db_put_hash(meta_pid_to_tab,
+ TUPLE2(meta_tuple, BIF_P->id, make_small(slot)),
+ 0) != DB_ERROR_NONE) {
+ erl_exit(1,"Could not update ets metadata.");
+ }
+ db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+
+ BIF_RET(ret);
+}
+
+/*
+** The lookup BIF
+*/
+BIF_RETTYPE ets_lookup_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ cret = tb->common.meth->db_get(BIF_P, tb, BIF_ARG_2, &ret);
+
+ db_unlock(tb, LCK_READ);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(ret);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+}
+
+/*
+** The lookup BIF
+*/
+BIF_RETTYPE ets_member_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ cret = tb->common.meth->db_member(tb, BIF_ARG_2, &ret);
+
+ db_unlock(tb, LCK_READ);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(ret);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+}
+
+/*
+** Get an element from a term
+** get_element_3(Tab, Key, Index)
+** return the element or a list of elements if bag
+*/
+BIF_RETTYPE ets_lookup_element_3(BIF_ALIST_3)
+{
+ DbTable* tb;
+ Sint index;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if (is_not_small(BIF_ARG_3) || ((index = signed_val(BIF_ARG_3)) < 1)) {
+ db_unlock(tb, LCK_READ);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ cret = tb->common.meth->db_get_element(BIF_P, tb,
+ BIF_ARG_2, index, &ret);
+ db_unlock(tb, LCK_READ);
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(ret);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+/*
+ * BIF to erase a whole table and release all memory it holds
+ */
+BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
+{
+ int trap;
+ DbTable* tb;
+
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,
+ "ets:delete(%T); Process: %T, initial: %T:%T/%bpu\n",
+ BIF_ARG_1, BIF_P->id,
+ BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+#endif
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ /*
+ * Clear all access bits to prevent any ets operation to access the
+ * table while it is being deleted.
+ */
+ tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
+ tb->common.status |= DB_DELETE;
+
+ meta_main_tab_lock(tb->common.slot);
+ /* We must keep the slot, to be found by db_proc_dead() if process dies */
+ MARK_SLOT_DEAD(tb->common.slot);
+ meta_main_tab_unlock(tb->common.slot);
+ if (is_atom(tb->common.id)) {
+ remove_named_tab(tb->common.id);
+ }
+
+ if (tb->common.owner != BIF_P->id) {
+ Eterm meta_tuple[3];
+
+ /*
+ * The table is being deleted by a process other than its owner.
+ * To make sure that the table will be completely deleted if the
+ * current process will be killed (e.g. by an EXIT signal), we will
+ * now transfer the ownership to the current process.
+ */
+ db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
+ db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
+ make_small(tb->common.slot));
+
+ BIF_P->flags |= F_USING_DB;
+ tb->common.owner = BIF_P->id;
+
+ db_put_hash(meta_pid_to_tab,
+ TUPLE2(meta_tuple,BIF_P->id,make_small(tb->common.slot)),
+ 0);
+ db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+ }
+ /* disable inheritance */
+ free_heir_data(tb);
+ tb->common.heir = am_none;
+
+ free_fixations_locked(tb);
+
+ trap = free_table_cont(BIF_P, tb, 1, 1);
+ db_unlock(tb, LCK_WRITE);
+ if (trap) {
+ /*
+ * Package the DbTable* pointer into a bignum so that it can be safely
+ * passed through a trap. We used to pass the DbTable* pointer directly
+ * (it looks like an continuation pointer), but that is will crash the
+ * emulator if this BIF is call traced.
+ */
+ Eterm *hp = HAlloc(BIF_P, 2);
+ hp[0] = make_pos_bignum_header(1);
+ hp[1] = (Eterm) tb;
+ BIF_TRAP1(&ets_delete_continue_exp, BIF_P, make_big(hp));
+ }
+ else {
+ BIF_RET(am_true);
+ }
+}
+
+/*
+** BIF ets:give_away(Tab, Pid, GiftData)
+*/
+BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
+{
+ Process* to_proc = NULL;
+ ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
+ Eterm buf[5];
+ Eterm to_pid = BIF_ARG_2;
+ Eterm from_pid;
+ DbTable* tb = NULL;
+
+ if (!is_internal_pid(to_pid)) {
+ goto badarg;
+ }
+ to_proc = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, to_pid, to_locks);
+ if (to_proc == NULL) {
+ goto badarg;
+ }
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
+ || tb->common.owner != BIF_P->id) {
+ goto badarg;
+ }
+ from_pid = tb->common.owner;
+ if (to_pid == from_pid) {
+ goto badarg; /* or should we be idempotent? return false maybe */
+ }
+
+ db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
+ db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
+ make_small(tb->common.slot));
+
+ to_proc->flags |= F_USING_DB;
+ tb->common.owner = to_pid;
+
+ db_put_hash(meta_pid_to_tab,
+ TUPLE2(buf,to_pid,make_small(tb->common.slot)),
+ 0);
+ db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+
+ db_unlock(tb,LCK_WRITE);
+ erts_send_message(BIF_P, to_proc, &to_locks,
+ TUPLE4(buf, am_ETS_TRANSFER, tb->common.id, from_pid, BIF_ARG_3),
+ 0);
+ erts_smp_proc_unlock(to_proc, to_locks);
+ BIF_RET(am_true);
+
+badarg:
+ if (to_proc != NULL && to_proc != BIF_P) erts_smp_proc_unlock(to_proc, to_locks);
+ if (tb != NULL) db_unlock(tb, LCK_WRITE);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
+{
+ DbTable* tb = NULL;
+ Eterm* tp;
+ Eterm opt;
+ Eterm heir = THE_NON_VALUE;
+ Eterm heir_data = THE_NON_VALUE;
+ Uint32 protection = 0;
+ Eterm fakelist[2];
+ Eterm tail;
+
+ for (tail = is_tuple(BIF_ARG_2) ? CONS(fakelist, BIF_ARG_2, NIL) : BIF_ARG_2;
+ is_list(tail);
+ tail = CDR(list_val(tail))) {
+
+ opt = CAR(list_val(tail));
+ if (!is_tuple(opt) || (tp = tuple_val(opt), arityval(tp[0]) < 2)) {
+ goto badarg;
+ }
+
+ switch (tp[1]) {
+ case am_heir:
+ if (heir != THE_NON_VALUE) goto badarg;
+ heir = tp[2];
+ if (arityval(tp[0]) == 2 && heir == am_none) {
+ heir_data = am_undefined;
+ }
+ else if (arityval(tp[0]) == 3 && is_internal_pid(heir)) {
+ heir_data = tp[3];
+ }
+ else goto badarg;
+ break;
+
+ case am_protection:
+ if (arityval(tp[0]) != 2 || protection != 0) goto badarg;
+ switch (tp[2]) {
+ case am_private: protection = DB_PRIVATE; break;
+ case am_protected: protection = DB_PROTECTED; break;
+ case am_public: protection = DB_PUBLIC; break;
+ default: goto badarg;
+ }
+ break;
+
+ default: goto badarg;
+ }
+ }
+
+ if (tail != NIL
+ || (tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
+ || tb->common.owner != BIF_P->id) {
+ goto badarg;
+ }
+
+ if (heir_data != THE_NON_VALUE) {
+ free_heir_data(tb);
+ set_heir(BIF_P, tb, heir, heir_data);
+ }
+ if (protection) {
+ tb->common.status &= ~(DB_PRIVATE|DB_PROTECTED|DB_PUBLIC);
+ tb->common.status |= protection;
+ }
+
+ db_unlock (tb,LCK_WRITE);
+ BIF_RET(am_true);
+
+badarg:
+ if (tb != NULL) {
+ db_unlock(tb,LCK_WRITE);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+/*
+** BIF to erase a whole table and release all memory it holds
+*/
+BIF_RETTYPE ets_delete_all_objects_1(BIF_ALIST_1)
+{
+ DbTable* tb;
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ tb->common.meth->db_delete_all_objects(BIF_P, tb);
+
+ db_unlock(tb, LCK_WRITE);
+
+ BIF_RET(am_true);
+}
+
+/*
+** Erase an object with given key, or maybe several objects if we have a bag
+** Called as db_erase(Tab, Key), where Key is element 1 of the
+** object(s) we want to erase
+*/
+BIF_RETTYPE ets_delete_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ cret = tb->common.meth->db_erase(tb,BIF_ARG_2,&ret);
+
+ db_unlock(tb, LCK_WRITE_REC);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(ret);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+/*
+** Erase a specific object, or maybe several objects if we have a bag
+*/
+BIF_RETTYPE ets_delete_object_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if (is_not_tuple(BIF_ARG_2) ||
+ (arityval(*tuple_val(BIF_ARG_2)) < tb->common.keypos)) {
+ db_unlock(tb, LCK_WRITE_REC);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ cret = tb->common.meth->db_erase_object(tb, BIF_ARG_2, &ret);
+ db_unlock(tb, LCK_WRITE_REC);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(ret);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+/*
+** This is for trapping, cannot be called directly.
+*/
+static BIF_RETTYPE ets_select_delete_1(Process *p, Eterm a1)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ Eterm *tptr;
+ db_lock_kind_t kind = LCK_WRITE_REC;
+
+ CHECK_TABLES();
+ ASSERT(is_tuple(a1));
+ tptr = tuple_val(a1);
+ ASSERT(arityval(*tptr) >= 1);
+
+ if ((tb = db_get_table(p, tptr[1], DB_WRITE, kind)) == NULL) {
+ BIF_ERROR(p,BADARG);
+ }
+
+ cret = tb->common.meth->db_select_delete_continue(p,tb,a1,&ret);
+
+ if(!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
+ unfix_table_locked(p, tb, &kind);
+ }
+
+ db_unlock(tb, kind);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
+ break;
+ }
+ erts_match_set_release_result(p);
+
+ return result;
+}
+
+
+BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ enum DbIterSafety safety;
+
+ CHECK_TABLES();
+
+ if(eq(BIF_ARG_2, ms_delete_all)) {
+ int nitems;
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ nitems = erts_smp_atomic_read(&tb->common.nitems);
+ tb->common.meth->db_delete_all_objects(BIF_P, tb);
+ db_unlock(tb, LCK_WRITE);
+ BIF_RET(erts_make_integer(nitems,BIF_P));
+ }
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+ cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_2, &ret);
+
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P,tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_WRITE_REC);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(BIF_P);
+
+ return result;
+}
+
+/*
+** Return a list of tables on this node
+*/
+BIF_RETTYPE ets_all_0(BIF_ALIST_0)
+{
+ DbTable* tb;
+ Eterm previous;
+ int i, j;
+ Eterm* hp;
+ Eterm* hendp;
+ int t_tabs_cnt;
+ int t_max_tabs;
+
+ erts_smp_spin_lock(&meta_main_tab_main_lock);
+ t_tabs_cnt = meta_main_tab_cnt;
+ t_max_tabs = db_max_tabs;
+ erts_smp_spin_unlock(&meta_main_tab_main_lock);
+
+ hp = HAlloc(BIF_P, 2*t_tabs_cnt);
+ hendp = hp + 2*t_tabs_cnt;
+
+ previous = NIL;
+ j = 0;
+ for(i = 0; (i < t_max_tabs && j < t_tabs_cnt); i++) {
+ meta_main_tab_lock(i);
+ if (IS_SLOT_ALIVE(i)) {
+ j++;
+ tb = meta_main_tab[i].u.tb;
+ previous = CONS(hp, tb->common.id, previous);
+ hp += 2;
+ }
+ meta_main_tab_unlock(i);
+ }
+ HRelease(BIF_P, hendp, hp);
+ BIF_RET(previous);
+}
+
+
+/*
+** db_slot(Db, Slot) -> [Items].
+*/
+BIF_RETTYPE ets_slot_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ /* The slot number is checked in table specific code. */
+ cret = tb->common.meth->db_slot(BIF_P, tb, BIF_ARG_2, &ret);
+ db_unlock(tb, LCK_READ);
+ switch (cret) {
+ case DB_ERROR_NONE:
+ BIF_RET(ret);
+ case DB_ERROR_SYSRES:
+ BIF_ERROR(BIF_P, SYSTEM_LIMIT);
+ default:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+/*
+** The match BIF, called as ets:match(Table, Pattern), ets:match(Continuation) or ets:match(Table,Pattern,ChunkSize).
+*/
+
+BIF_RETTYPE ets_match_1(BIF_ALIST_1)
+{
+ return ets_select_1(BIF_P, BIF_ARG_1);
+}
+
+BIF_RETTYPE ets_match_2(BIF_ALIST_2)
+{
+ Eterm ms;
+ Eterm buff[8];
+ Eterm *hp = buff;
+ /*hp = HAlloc(BIF_P, 8);*/
+ ms = CONS(hp, am_DollarDollar, NIL);
+ hp += 2;
+ ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
+ hp += 4;
+ ms = CONS(hp, ms, NIL);
+ return ets_select_2(BIF_P, BIF_ARG_1, ms);
+}
+
+BIF_RETTYPE ets_match_3(BIF_ALIST_3)
+{
+ Eterm ms;
+ Eterm buff[8];
+ Eterm *hp = buff;
+ /*hp = HAlloc(BIF_P, 8);*/
+ ms = CONS(hp, am_DollarDollar, NIL);
+ hp += 2;
+ ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
+ hp += 4;
+ ms = CONS(hp, ms, NIL);
+ return ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+}
+
+
+BIF_RETTYPE ets_select_3(BIF_ALIST_3)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ Sint chunk_size;
+ enum DbIterSafety safety;
+
+ CHECK_TABLES();
+
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+ cret = tb->common.meth->db_select_chunk(BIF_P, tb,
+ BIF_ARG_2, chunk_size,
+ 0 /* not reversed */,
+ &ret);
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P, tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_READ);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(BIF_P);
+
+ return result;
+}
+
+
+/* We get here instead of in the real BIF when trapping */
+static BIF_RETTYPE ets_select_trap_1(Process *p, Eterm a1)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ Eterm *tptr;
+ db_lock_kind_t kind = LCK_READ;
+
+ CHECK_TABLES();
+
+ tptr = tuple_val(a1);
+ ASSERT(arityval(*tptr) >= 1)
+
+ if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
+ BIF_ERROR(p, BADARG);
+ }
+
+ cret = tb->common.meth->db_select_continue(p, tb, a1,
+ &ret);
+
+ if (!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
+ unfix_table_locked(p, tb, &kind);
+ }
+ db_unlock(tb, kind);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, p, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(p);
+
+ return result;
+}
+
+
+BIF_RETTYPE ets_select_1(BIF_ALIST_1)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ Eterm *tptr;
+ enum DbIterSafety safety;
+
+ CHECK_TABLES();
+
+ /*
+ * Make sure that the table exists.
+ */
+
+ if (!is_tuple(BIF_ARG_1)) {
+ if (BIF_ARG_1 == am_EOT) {
+ BIF_RET(am_EOT);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ tptr = tuple_val(BIF_ARG_1);
+ if (arityval(*tptr) < 1 ||
+ (tb = db_get_table(BIF_P, tptr[1], DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+
+ cret = tb->common.meth->db_select_continue(BIF_P,tb,
+ BIF_ARG_1, &ret);
+
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P, tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_READ);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(BIF_P);
+
+ return result;
+}
+
+BIF_RETTYPE ets_select_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ enum DbIterSafety safety;
+ Eterm ret;
+
+ CHECK_TABLES();
+
+ /*
+ * Make sure that the table exists.
+ */
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+
+ cret = tb->common.meth->db_select(BIF_P, tb, BIF_ARG_2,
+ 0, &ret);
+
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P, tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_READ);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(BIF_P);
+
+ return result;
+}
+
+/* We get here instead of in the real BIF when trapping */
+static BIF_RETTYPE ets_select_count_1(Process *p, Eterm a1)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ Eterm ret;
+ Eterm *tptr;
+ db_lock_kind_t kind = LCK_READ;
+
+ CHECK_TABLES();
+
+ tptr = tuple_val(a1);
+ ASSERT(arityval(*tptr) >= 1)
+ if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
+ BIF_ERROR(p, BADARG);
+ }
+
+ cret = tb->common.meth->db_select_count_continue(p, tb, a1, &ret);
+
+ if (!DID_TRAP(p,ret) && ITERATION_SAFETY(p,tb) != ITER_SAFE) {
+ unfix_table_locked(p, tb, &kind);
+ }
+ db_unlock(tb, kind);
+
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, p, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, p, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(p);
+
+ return result;
+}
+
+BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ enum DbIterSafety safety;
+ Eterm ret;
+
+ CHECK_TABLES();
+ /*
+ * Make sure that the table exists.
+ */
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+ cret = tb->common.meth->db_select_count(BIF_P,tb,BIF_ARG_2, &ret);
+
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P, tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_READ);
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+
+ erts_match_set_release_result(BIF_P);
+
+ return result;
+}
+
+
+BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ enum DbIterSafety safety;
+ Eterm ret;
+ Sint chunk_size;
+
+ CHECK_TABLES();
+ /*
+ * Make sure that the table exists.
+ */
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ db_unlock(tb, LCK_READ);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+ cret = tb->common.meth->db_select_chunk(BIF_P,tb,
+ BIF_ARG_2, chunk_size,
+ 1 /* reversed */, &ret);
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P, tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_READ);
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+ erts_match_set_release_result(BIF_P);
+ return result;
+}
+
+BIF_RETTYPE ets_select_reverse_1(BIF_ALIST_1)
+{
+ return ets_select_1(BIF_P, BIF_ARG_1);
+}
+
+BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE result;
+ DbTable* tb;
+ int cret;
+ enum DbIterSafety safety;
+ Eterm ret;
+
+ CHECK_TABLES();
+ /*
+ * Make sure that the table exists.
+ */
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ safety = ITERATION_SAFETY(BIF_P,tb);
+ if (safety == ITER_UNSAFE) {
+ local_fix_table(tb);
+ }
+ cret = tb->common.meth->db_select(BIF_P,tb,BIF_ARG_2,
+ 1 /*reversed*/, &ret);
+
+ if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
+ fix_table_locked(BIF_P, tb);
+ }
+ if (safety == ITER_UNSAFE) {
+ local_unfix_table(tb);
+ }
+ db_unlock(tb, LCK_READ);
+ switch (cret) {
+ case DB_ERROR_NONE:
+ ERTS_BIF_PREP_RET(result, ret);
+ break;
+ case DB_ERROR_SYSRES:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, SYSTEM_LIMIT);
+ break;
+ default:
+ ERTS_BIF_PREP_ERROR(result, BIF_P, BADARG);
+ break;
+ }
+ erts_match_set_release_result(BIF_P);
+ return result;
+}
+
+
+/*
+** ets:match_object(Continuation), ets:match_object(Table, Pattern), ets:match_object(Table,Pattern,ChunkSize)
+*/
+BIF_RETTYPE ets_match_object_1(BIF_ALIST_1)
+{
+ return ets_select_1(BIF_P, BIF_ARG_1);
+}
+
+BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
+{
+ Eterm ms;
+ Eterm buff[8];
+ Eterm *hp = buff;
+ /*hp = HAlloc(BIF_P, 8);*/
+ ms = CONS(hp, am_DollarUnderscore, NIL);
+ hp += 2;
+ ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
+ hp += 4;
+ ms = CONS(hp, ms, NIL);
+ return ets_select_2(BIF_P, BIF_ARG_1, ms);
+}
+
+BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
+{
+ Eterm ms;
+ Eterm buff[8];
+ Eterm *hp = buff;
+ /*hp = HAlloc(BIF_P, 8);*/
+ ms = CONS(hp, am_DollarUnderscore, NIL);
+ hp += 2;
+ ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
+ hp += 4;
+ ms = CONS(hp, ms, NIL);
+ return ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+}
+
+/*
+ * BIF to extract information about a particular table.
+ */
+
+BIF_RETTYPE ets_info_1(BIF_ALIST_1)
+{
+ static Eterm fields[] = {am_protection, am_keypos, am_type, am_named_table,
+ am_node, am_size, am_name, am_heir, am_owner, am_memory};
+ Eterm results[sizeof(fields)/sizeof(Eterm)];
+ DbTable* tb;
+ Eterm res;
+ int i;
+ Eterm* hp;
+ /*Process* rp = NULL;*/
+ Eterm owner;
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
+ if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ BIF_RET(am_undefined);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ owner = tb->common.owner;
+
+ /* If/when we implement lockless private tables:
+ if ((tb->common.status & DB_PRIVATE) && owner != BIF_P->id) {
+ db_unlock(tb, LCK_READ);
+ rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
+ owner, ERTS_PROC_LOCK_MAIN);
+ if (rp == NULL) {
+ BIF_RET(am_undefined);
+ }
+ if (rp == ERTS_PROC_LOCK_BUSY) {
+ ERTS_BIF_YIELD1(bif_export[BIF_ets_info_1], BIF_P, BIF_ARG_1);
+ }
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL
+ || tb->common.owner != owner) {
+ if (BIF_P != rp)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ BIF_RET(am_undefined);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ }*/
+ for (i = 0; i < sizeof(fields)/sizeof(Eterm); i++) {
+ results[i] = table_info(BIF_P, tb, fields[i]);
+ ASSERT(is_value(results[i]));
+ }
+ db_unlock(tb, LCK_READ);
+
+ /*if (rp != NULL && rp != BIF_P)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/
+
+ hp = HAlloc(BIF_P, 5*sizeof(fields)/sizeof(Eterm));
+ res = NIL;
+ for (i = 0; i < sizeof(fields)/sizeof(Eterm); i++) {
+ Eterm tuple;
+ tuple = TUPLE2(hp, fields[i], results[i]);
+ hp += 3;
+ res = CONS(hp, tuple, res);
+ hp += 2;
+ }
+ BIF_RET(res);
+}
+
+/*
+ * BIF to extract information about a particular table.
+ */
+
+BIF_RETTYPE ets_info_2(BIF_ALIST_2)
+{
+ DbTable* tb;
+ Eterm ret = THE_NON_VALUE;
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
+ if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ BIF_RET(am_undefined);
+ }
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ ret = table_info(BIF_P, tb, BIF_ARG_2);
+ db_unlock(tb, LCK_READ);
+ if (is_non_value(ret)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ BIF_RET(ret);
+}
+
+
+BIF_RETTYPE ets_is_compiled_ms_1(BIF_ALIST_1)
+{
+ if (erts_db_is_compiled_ms(BIF_ARG_1)) {
+ BIF_RET(am_true);
+ } else {
+ BIF_RET(am_false);
+ }
+}
+
+BIF_RETTYPE ets_match_spec_compile_1(BIF_ALIST_1)
+{
+ Binary *mp = db_match_set_compile(BIF_P, BIF_ARG_1, DCOMP_TABLE);
+ Eterm *hp;
+ if (mp == NULL) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+
+ BIF_RET(erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mp));
+}
+
+BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
+{
+ Eterm ret = BIF_ARG_3;
+ int i = 0;
+ Eterm *hp;
+ Eterm lst;
+ ProcBin *bp;
+ Binary *mp;
+ Eterm res;
+ Uint32 dummy;
+ Uint sz;
+
+ if (!(is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) || !is_binary(BIF_ARG_2)) {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ bp = (ProcBin*) binary_val(BIF_ARG_2);
+ if (thing_subtag(bp->thing_word) != REFC_BINARY_SUBTAG) {
+ goto error;
+ }
+ mp = bp->val;
+ if (!IsMatchProgBinary(mp)) {
+ goto error;
+ }
+
+ if (BIF_ARG_1 == NIL) {
+ BIF_RET(BIF_ARG_3);
+ }
+ for (lst = BIF_ARG_1; is_list(lst); lst = CDR(list_val(lst))) {
+ if (++i > CONTEXT_REDS) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP3(bif_export[BIF_ets_match_spec_run_r_3],
+ BIF_P,lst,BIF_ARG_2,ret);
+ }
+ res = db_prog_match(BIF_P, mp, CAR(list_val(lst)), 0, &dummy);
+ if (is_value(res)) {
+ sz = size_object(res);
+ hp = HAlloc(BIF_P, sz + 2);
+ res = copy_struct(res, sz, &hp, &MSO(BIF_P));
+ ret = CONS(hp,res,ret);
+ /*hp += 2;*/
+ }
+ }
+ if (lst != NIL) {
+ goto error;
+ }
+ BIF_RET2(ret,i);
+}
+
+
+/*
+** External interface (NOT BIF's)
+*/
+
+
+/* Init the db */
+
+void init_db(void)
+{
+ DbTable init_tb;
+ int i;
+ extern Eterm* em_apply_bif;
+ Eterm *hp;
+ unsigned bits;
+ size_t size;
+
+#ifdef ERTS_SMP
+ for (i=0; i<META_MAIN_TAB_LOCK_CNT; i++) {
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_smp_spinlock_init_x(&meta_main_tab_locks[i].lck, "meta_main_tab_slot", make_small(i));
+#else
+ erts_smp_spinlock_init(&meta_main_tab_locks[i].lck, "meta_main_tab_slot");
+#endif
+ }
+ erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
+ for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_smp_rwmtx_init_x(&meta_name_tab_rwlocks[i].lck, "meta_name_tab", make_small(i));
+#else
+ erts_smp_rwmtx_init(&meta_name_tab_rwlocks[i].lck, "meta_name_tab");
+#endif
+ }
+#endif
+
+ erts_smp_atomic_init(&erts_ets_misc_mem_size, 0);
+ db_initialize_util();
+
+ if (user_requested_db_max_tabs < DB_DEF_MAX_TABS)
+ db_max_tabs = DB_DEF_MAX_TABS;
+ else
+ db_max_tabs = user_requested_db_max_tabs;
+
+ bits = erts_fit_in_bits(db_max_tabs-1);
+ if (bits > SMALL_BITS) {
+ erl_exit(1,"Max limit for ets tabled too high %u (max %u).",
+ db_max_tabs, 1L<<SMALL_BITS);
+ }
+ meta_main_tab_slot_mask = (1L<<bits) - 1;
+ meta_main_tab_seq_incr = (1L<<bits);
+
+ size = sizeof(*meta_main_tab)*db_max_tabs;
+ meta_main_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
+ ERTS_ETS_MISC_MEM_ADD(size);
+
+ meta_main_tab_cnt = 0;
+ for (i=1; i<db_max_tabs; i++) {
+ SET_NEXT_FREE_SLOT(i-1,i);
+ }
+ SET_NEXT_FREE_SLOT(db_max_tabs-1, (Uint)-1);
+ meta_main_tab_first_free = 0;
+
+ meta_name_tab_mask = (1L<<(bits-1)) - 1; /* At least half the size of main tab */
+ size = sizeof(struct meta_name_tab_entry)*(meta_name_tab_mask+1);
+ meta_name_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
+ ERTS_ETS_MISC_MEM_ADD(size);
+
+ for (i=0; i<=meta_name_tab_mask; i++) {
+ meta_name_tab[i].pu.tb = NULL;
+ meta_name_tab[i].u.name_atom = NIL;
+ }
+
+ db_initialize_hash();
+ db_initialize_tree();
+
+ /*TT*/
+ /* Create meta table invertion. */
+ erts_smp_atomic_init(&init_tb.common.memory_size, 0);
+ meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
+ &init_tb,
+ sizeof(DbTable));
+ ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
+ erts_smp_atomic_init(&meta_pid_to_tab->common.memory_size,
+ erts_smp_atomic_read(&init_tb.common.memory_size));
+
+ meta_pid_to_tab->common.id = NIL;
+ meta_pid_to_tab->common.the_name = am_true;
+ meta_pid_to_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
+#ifdef ERTS_SMP
+ meta_pid_to_tab->common.type
+ = meta_pid_to_tab->common.status & ERTS_ETS_TABLE_TYPES;
+ /* Note, 'type' is *read only* from now on... */
+ meta_pid_to_tab->common.is_thread_safe = 0;
+#endif
+ meta_pid_to_tab->common.keypos = 1;
+ meta_pid_to_tab->common.owner = NIL;
+ erts_smp_atomic_init(&meta_pid_to_tab->common.nitems, 0);
+ meta_pid_to_tab->common.slot = -1;
+ meta_pid_to_tab->common.meth = &db_hash;
+
+ erts_refc_init(&meta_pid_to_tab->common.ref, 1);
+ erts_refc_init(&meta_pid_to_tab->common.fixref, 0);
+ /* Neither rwlock or fixlock used
+ db_init_lock(meta_pid_to_tab, "meta_pid_to_tab", "meta_pid_to_tab_FIX");*/
+
+ if (db_create_hash(NULL, meta_pid_to_tab) != DB_ERROR_NONE) {
+ erl_exit(1,"Unable to create ets metadata tables.");
+ }
+
+ erts_smp_atomic_set(&init_tb.common.memory_size, 0);
+ meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
+ &init_tb,
+ sizeof(DbTable));
+ ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
+ erts_smp_atomic_init(&meta_pid_to_fixed_tab->common.memory_size,
+ erts_smp_atomic_read(&init_tb.common.memory_size));
+
+ meta_pid_to_fixed_tab->common.id = NIL;
+ meta_pid_to_fixed_tab->common.the_name = am_true;
+ meta_pid_to_fixed_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
+#ifdef ERTS_SMP
+ meta_pid_to_fixed_tab->common.type
+ = meta_pid_to_fixed_tab->common.status & ERTS_ETS_TABLE_TYPES;
+ /* Note, 'type' is *read only* from now on... */
+ meta_pid_to_fixed_tab->common.is_thread_safe = 0;
+#endif
+ meta_pid_to_fixed_tab->common.keypos = 1;
+ meta_pid_to_fixed_tab->common.owner = NIL;
+ erts_smp_atomic_init(&meta_pid_to_fixed_tab->common.nitems, 0);
+ meta_pid_to_fixed_tab->common.slot = -1;
+ meta_pid_to_fixed_tab->common.meth = &db_hash;
+
+ erts_refc_init(&meta_pid_to_fixed_tab->common.ref, 1);
+ erts_refc_init(&meta_pid_to_fixed_tab->common.fixref, 0);
+ /* Neither rwlock or fixlock used
+ db_init_lock(meta_pid_to_fixed_tab, "meta_pid_to_fixed_tab", "meta_pid_to_fixed_tab_FIX");*/
+
+ if (db_create_hash(NULL, meta_pid_to_fixed_tab) != DB_ERROR_NONE) {
+ erl_exit(1,"Unable to create ets metadata tables.");
+ }
+
+ /* Non visual BIF to trap to. */
+ memset(&ets_select_delete_continue_exp, 0, sizeof(Export));
+ ets_select_delete_continue_exp.address =
+ &ets_select_delete_continue_exp.code[3];
+ ets_select_delete_continue_exp.code[0] = am_ets;
+ ets_select_delete_continue_exp.code[1] = am_atom_put("delete_trap",11);
+ ets_select_delete_continue_exp.code[2] = 1;
+ ets_select_delete_continue_exp.code[3] =
+ (Eterm) em_apply_bif;
+ ets_select_delete_continue_exp.code[4] =
+ (Eterm) &ets_select_delete_1;
+
+ /* Non visual BIF to trap to. */
+ memset(&ets_select_count_continue_exp, 0, sizeof(Export));
+ ets_select_count_continue_exp.address =
+ &ets_select_count_continue_exp.code[3];
+ ets_select_count_continue_exp.code[0] = am_ets;
+ ets_select_count_continue_exp.code[1] = am_atom_put("count_trap",11);
+ ets_select_count_continue_exp.code[2] = 1;
+ ets_select_count_continue_exp.code[3] =
+ (Eterm) em_apply_bif;
+ ets_select_count_continue_exp.code[4] =
+ (Eterm) &ets_select_count_1;
+
+ /* Non visual BIF to trap to. */
+ memset(&ets_select_continue_exp, 0, sizeof(Export));
+ ets_select_continue_exp.address =
+ &ets_select_continue_exp.code[3];
+ ets_select_continue_exp.code[0] = am_ets;
+ ets_select_continue_exp.code[1] = am_atom_put("select_trap",11);
+ ets_select_continue_exp.code[2] = 1;
+ ets_select_continue_exp.code[3] =
+ (Eterm) em_apply_bif;
+ ets_select_continue_exp.code[4] =
+ (Eterm) &ets_select_trap_1;
+
+ /* Non visual BIF to trap to. */
+ memset(&ets_delete_continue_exp, 0, sizeof(Export));
+ ets_delete_continue_exp.address = &ets_delete_continue_exp.code[3];
+ ets_delete_continue_exp.code[0] = am_ets;
+ ets_delete_continue_exp.code[1] = am_atom_put("delete_trap",11);
+ ets_delete_continue_exp.code[2] = 1;
+ ets_delete_continue_exp.code[3] = (Eterm) em_apply_bif;
+ ets_delete_continue_exp.code[4] = (Eterm) &ets_delete_trap;
+
+ hp = ms_delete_all_buff;
+ ms_delete_all = CONS(hp, am_true, NIL);
+ hp += 2;
+ ms_delete_all = TUPLE3(hp,am_Underscore,NIL,ms_delete_all);
+ hp +=4;
+ ms_delete_all = CONS(hp, ms_delete_all,NIL);
+}
+
+#define ARRAY_CHUNK 100
+
+typedef enum {
+ ErtsDbProcCleanupProgressTables,
+ ErtsDbProcCleanupProgressFixations,
+ ErtsDbProcCleanupProgressDone,
+} ErtsDbProcCleanupProgress;
+
+typedef enum {
+ ErtsDbProcCleanupOpGetTables,
+ ErtsDbProcCleanupOpDeleteTables,
+ ErtsDbProcCleanupOpGetFixations,
+ ErtsDbProcCleanupOpDeleteFixations,
+ ErtsDbProcCleanupOpDone
+} ErtsDbProcCleanupOperation;
+
+typedef struct {
+ ErtsDbProcCleanupProgress progress;
+ ErtsDbProcCleanupOperation op;
+ struct {
+ Eterm arr[ARRAY_CHUNK];
+ int size;
+ int ix;
+ int clean_ix;
+ } slots;
+} ErtsDbProcCleanupState;
+
+
+static void
+proc_exit_cleanup_tables_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
+{
+ ASSERT(state->slots.clean_ix <= state->slots.ix);
+ if (state->slots.clean_ix < state->slots.ix) {
+ db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
+ if (state->slots.size < ARRAY_CHUNK
+ && state->slots.ix == state->slots.size) {
+ Eterm dummy;
+ db_erase_hash(meta_pid_to_tab,pid,&dummy);
+ }
+ else {
+ int ix;
+ /* Need to erase each explicitly */
+ for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
+ db_erase_bag_exact2(meta_pid_to_tab,
+ pid,
+ state->slots.arr[ix]);
+ }
+ db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+ state->slots.clean_ix = state->slots.ix;
+ }
+}
+
+static void
+proc_exit_cleanup_fixations_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
+{
+ ASSERT(state->slots.clean_ix <= state->slots.ix);
+ if (state->slots.clean_ix < state->slots.ix) {
+ db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ if (state->slots.size < ARRAY_CHUNK
+ && state->slots.ix == state->slots.size) {
+ Eterm dummy;
+ db_erase_hash(meta_pid_to_fixed_tab,pid,&dummy);
+ }
+ else {
+ int ix;
+ /* Need to erase each explicitly */
+ for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
+ db_erase_bag_exact2(meta_pid_to_fixed_tab,
+ pid,
+ state->slots.arr[ix]);
+ }
+ db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ state->slots.clean_ix = state->slots.ix;
+ }
+}
+
+/* In: Table LCK_WRITE
+** Return TRUE : ok, table not mine and NOT locked anymore.
+** Return FALSE: failed, table still mine (LCK_WRITE)
+*/
+static int give_away_to_heir(Process* p, DbTable* tb)
+{
+ Process* to_proc;
+ ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
+ Eterm buf[5];
+ Eterm to_pid;
+ Eterm heir_data;
+
+ ASSERT(tb->common.owner == p->id);
+ ASSERT(is_internal_pid(tb->common.heir));
+ ASSERT(tb->common.heir != p->id);
+retry:
+ to_pid = tb->common.heir;
+ to_proc = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN,
+ to_pid, to_locks,
+ ERTS_P2P_FLG_TRY_LOCK);
+ if (to_proc == ERTS_PROC_LOCK_BUSY) {
+ db_ref(tb); /* while unlocked */
+ db_unlock(tb,LCK_WRITE);
+ to_proc = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
+ to_pid, to_locks);
+ db_lock(tb,LCK_WRITE);
+ tb = db_unref(tb);
+ ASSERT(tb != NULL);
+
+ if (tb->common.owner != p->id) {
+ if (to_proc != NULL ) {
+ erts_smp_proc_unlock(to_proc, to_locks);
+ }
+ db_unlock(tb,LCK_WRITE);
+ return !0; /* ok, someone already gave my table away */
+ }
+ if (tb->common.heir != to_pid) { /* someone changed the heir */
+ if (to_proc != NULL ) {
+ erts_smp_proc_unlock(to_proc, to_locks);
+ }
+ if (to_pid == p->id || to_pid == am_none) {
+ return 0; /* no real heir, table still mine */
+ }
+ goto retry;
+ }
+ }
+ if (to_proc == NULL) {
+ return 0; /* heir not alive, table still mine */
+ }
+ if (erts_cmp_timeval(&to_proc->started, &tb->common.heir_started) != 0) {
+ erts_smp_proc_unlock(to_proc, to_locks);
+ return 0; /* heir dead and pid reused, table still mine */
+ }
+ db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
+ db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
+ make_small(tb->common.slot));
+
+ to_proc->flags |= F_USING_DB;
+ tb->common.owner = to_pid;
+
+ db_put_hash(meta_pid_to_tab,
+ TUPLE2(buf,to_pid,make_small(tb->common.slot)),
+ 0);
+ db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+
+ db_unlock(tb,LCK_WRITE);
+ heir_data = tb->common.heir_data;
+ if (!is_immed(heir_data)) {
+ Eterm* tpv = DBTERM_BUF((DbTerm*)heir_data); /* tuple_val */
+ ASSERT(arityval(*tpv) == 1);
+ heir_data = tpv[1];
+ }
+ erts_send_message(p, to_proc, &to_locks,
+ TUPLE4(buf, am_ETS_TRANSFER, tb->common.id, p->id, heir_data),
+ 0);
+ erts_smp_proc_unlock(to_proc, to_locks);
+ return !0;
+}
+
+/*
+ * erts_db_process_exiting() is called when a process terminates.
+ * It returns 0 when completely done, and !0 when it wants to
+ * yield. c_p->u.exit_data can hold a pointer to a state while
+ * yielding.
+ */
+#define ERTS_DB_INTERNAL_ERROR(LSTR) \
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d:erts_db_process_exiting(): " LSTR "\n", \
+ __FILE__, __LINE__)
+
+int
+erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
+{
+ ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.exit_data;
+ Eterm pid = c_p->id;
+ ErtsDbProcCleanupState default_state;
+ int ret;
+
+ if (!state) {
+ state = &default_state;
+ state->progress = ErtsDbProcCleanupProgressTables;
+ state->op = ErtsDbProcCleanupOpGetTables;
+ }
+
+ while (!0) {
+ switch (state->op) {
+ case ErtsDbProcCleanupOpGetTables:
+ state->slots.size = ARRAY_CHUNK;
+ db_meta_lock(meta_pid_to_tab, LCK_READ);
+ ret = db_get_element_array(meta_pid_to_tab,
+ pid,
+ 2,
+ state->slots.arr,
+ &state->slots.size);
+ db_meta_unlock(meta_pid_to_tab, LCK_READ);
+ if (ret == DB_ERROR_BADKEY) {
+ /* Done with tables; now fixations */
+ state->progress = ErtsDbProcCleanupProgressFixations;
+ state->op = ErtsDbProcCleanupOpGetFixations;
+ break;
+ } else if (ret != DB_ERROR_NONE) {
+ ERTS_DB_INTERNAL_ERROR("Inconsistent ets table metadata");
+ }
+
+ state->slots.ix = 0;
+ state->slots.clean_ix = 0;
+ state->op = ErtsDbProcCleanupOpDeleteTables;
+ /* Fall through */
+
+ case ErtsDbProcCleanupOpDeleteTables:
+
+ while (state->slots.ix < state->slots.size) {
+ DbTable *tb = NULL;
+ Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
+ meta_main_tab_lock(ix);
+ if (!IS_SLOT_FREE(ix)) {
+ tb = db_ref(GET_ANY_SLOT_TAB(ix));
+ ASSERT(tb);
+ }
+ meta_main_tab_unlock(ix);
+ if (tb) {
+ int do_yield;
+ db_lock_take_over_ref(tb, LCK_WRITE);
+ /* Ownership may have changed since
+ we looked up the table. */
+ if (tb->common.owner != pid) {
+ do_yield = 0;
+ db_unlock(tb, LCK_WRITE);
+ }
+ else if (tb->common.heir != am_none
+ && tb->common.heir != pid
+ && give_away_to_heir(c_p, tb)) {
+ do_yield = 0;
+ }
+ else {
+ int first_call;
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,
+ "erts_db_process_exiting(); Table: %T, "
+ "Process: %T\n",
+ tb->common.id, pid);
+#endif
+ first_call = (tb->common.status & DB_DELETE) == 0;
+ if (first_call) {
+ /* Clear all access bits. */
+ tb->common.status &= ~(DB_PROTECTED
+ | DB_PUBLIC
+ | DB_PRIVATE);
+ tb->common.status |= DB_DELETE;
+
+ if (is_atom(tb->common.id))
+ remove_named_tab(tb->common.id);
+
+ free_heir_data(tb);
+ free_fixations_locked(tb);
+ }
+
+ do_yield = free_table_cont(c_p, tb, first_call, 0);
+ db_unlock(tb, LCK_WRITE);
+ }
+ if (do_yield)
+ goto yield;
+ }
+ state->slots.ix++;
+ if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
+ goto yield;
+ }
+
+ proc_exit_cleanup_tables_meta_data(pid, state);
+ state->op = ErtsDbProcCleanupOpGetTables;
+ break;
+
+ case ErtsDbProcCleanupOpGetFixations:
+ state->slots.size = ARRAY_CHUNK;
+ db_meta_lock(meta_pid_to_fixed_tab, LCK_READ);
+ ret = db_get_element_array(meta_pid_to_fixed_tab,
+ pid,
+ 2,
+ state->slots.arr,
+ &state->slots.size);
+ db_meta_unlock(meta_pid_to_fixed_tab, LCK_READ);
+
+ if (ret == DB_ERROR_BADKEY) {
+ /* Done */
+ state->progress = ErtsDbProcCleanupProgressDone;
+ state->op = ErtsDbProcCleanupOpDone;
+ break;
+ } else if (ret != DB_ERROR_NONE) {
+ ERTS_DB_INTERNAL_ERROR("Inconsistent ets fix table metadata");
+ }
+
+ state->slots.ix = 0;
+ state->slots.clean_ix = 0;
+ state->op = ErtsDbProcCleanupOpDeleteFixations;
+ /* Fall through */
+
+ case ErtsDbProcCleanupOpDeleteFixations:
+
+ while (state->slots.ix < state->slots.size) {
+ DbTable *tb = NULL;
+ Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
+ meta_main_tab_lock(ix);
+ if (IS_SLOT_ALIVE(ix)) {
+ tb = db_ref(meta_main_tab[ix].u.tb);
+ ASSERT(tb);
+ }
+ meta_main_tab_unlock(ix);
+ if (tb) {
+ int reds;
+ DbFixation** pp;
+
+ db_lock_take_over_ref(tb, LCK_WRITE_REC);
+ #ifdef ERTS_SMP
+ erts_smp_mtx_lock(&tb->common.fixlock);
+ #endif
+ reds = 10;
+
+ for (pp = &tb->common.fixations; *pp != NULL;
+ pp = &(*pp)->next) {
+ if ((*pp)->pid == pid) {
+ DbFixation* fix = *pp;
+ erts_refc_add(&tb->common.fixref,-fix->counter,0);
+ *pp = fix->next;
+ erts_db_free(ERTS_ALC_T_DB_FIXATION,
+ tb, fix, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ break;
+ }
+ }
+ #ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+ #endif
+ if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
+ db_unfix_table_hash(&(tb->hash));
+ reds += 40;
+ }
+ db_unlock(tb, LCK_WRITE_REC);
+ BUMP_REDS(c_p, reds);
+ }
+ state->slots.ix++;
+ if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
+ goto yield;
+ }
+
+ proc_exit_cleanup_fixations_meta_data(pid, state);
+ state->op = ErtsDbProcCleanupOpGetFixations;
+ break;
+
+ case ErtsDbProcCleanupOpDone:
+
+ if (state != &default_state)
+ erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
+ c_p->u.exit_data = NULL;
+ return 0;
+
+ default:
+ ERTS_DB_INTERNAL_ERROR("Bad internal state");
+ }
+ }
+
+ yield:
+
+ switch (state->progress) {
+ case ErtsDbProcCleanupProgressTables:
+ proc_exit_cleanup_tables_meta_data(pid, state);
+ break;
+ case ErtsDbProcCleanupProgressFixations:
+ proc_exit_cleanup_fixations_meta_data(pid, state);
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(c_p->u.exit_data == (void *) state
+ || state == &default_state);
+
+ if (state == &default_state) {
+ c_p->u.exit_data = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
+ sizeof(ErtsDbProcCleanupState));
+ sys_memcpy(c_p->u.exit_data,
+ (void*) state,
+ sizeof(ErtsDbProcCleanupState));
+ }
+
+ return !0;
+}
+
+/* SMP note: table only need to be LCK_READ locked */
+static void fix_table_locked(Process* p, DbTable* tb)
+{
+ DbFixation *fix;
+ Eterm meta_tuple[3];
+
+#ifdef ERTS_SMP
+ erts_smp_mtx_lock(&tb->common.fixlock);
+#endif
+ erts_refc_inc(&tb->common.fixref,1);
+ fix = tb->common.fixations;
+ if (fix == NULL) {
+ get_now(&(tb->common.megasec),
+ &(tb->common.sec),
+ &(tb->common.microsec));
+ }
+ else {
+ for (; fix != NULL; fix = fix->next) {
+ if (fix->pid == p->id) {
+ ++(fix->counter);
+#ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+#endif
+ return;
+ }
+ }
+ }
+ fix = (DbFixation *) erts_db_alloc(ERTS_ALC_T_DB_FIXATION,
+ tb, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation));
+ fix->pid = p->id;
+ fix->counter = 1;
+ fix->next = tb->common.fixations;
+ tb->common.fixations = fix;
+#ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+#endif
+ p->flags |= F_USING_DB;
+ db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ if (db_put_hash(meta_pid_to_fixed_tab,
+ TUPLE2(meta_tuple, p->id, make_small(tb->common.slot)),
+ 0) != DB_ERROR_NONE) {
+ erl_exit(1,"Could not insert ets metadata in safe_fixtable.");
+ }
+ db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+}
+
+/* SMP note: May re-lock table
+*/
+static void unfix_table_locked(Process* p, DbTable* tb,
+ db_lock_kind_t* kind_p)
+{
+ DbFixation** pp;
+
+#ifdef ERTS_SMP
+ erts_smp_mtx_lock(&tb->common.fixlock);
+#endif
+ for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) {
+ if ((*pp)->pid == p->id) {
+ DbFixation* fix = *pp;
+ erts_refc_dec(&tb->common.fixref,0);
+ --(fix->counter);
+ ASSERT(fix->counter >= 0);
+ if (fix->counter > 0) {
+ break;
+ }
+ *pp = fix->next;
+#ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+#endif
+ db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ db_erase_bag_exact2(meta_pid_to_fixed_tab,
+ p->id, make_small(tb->common.slot));
+ db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ erts_db_free(ERTS_ALC_T_DB_FIXATION,
+ tb, (void *) fix, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ goto unlocked;
+ }
+ }
+#ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+#endif
+unlocked:
+
+ if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
+ && erts_smp_atomic_read(&tb->hash.fixdel) != (long)NULL) {
+#ifdef ERTS_SMP
+ if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
+ /* Must have write lock while purging pseudo-deleted (OTP-8166) */
+ erts_smp_rwmtx_runlock(&tb->common.rwlock);
+ erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ *kind_p = LCK_WRITE;
+ if (tb->common.status & DB_DELETE) return;
+ }
+#endif
+ db_unfix_table_hash(&(tb->hash));
+ }
+}
+
+/* Assume that tb is WRITE locked */
+static void free_fixations_locked(DbTable *tb)
+{
+ DbFixation *fix;
+ DbFixation *next_fix;
+
+ fix = tb->common.fixations;
+ while (fix != NULL) {
+ next_fix = fix->next;
+ db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ db_erase_bag_exact2(meta_pid_to_fixed_tab,
+ fix->pid,
+ make_small(tb->common.slot));
+ db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ erts_db_free(ERTS_ALC_T_DB_FIXATION,
+ tb, (void *) fix, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+
+ fix = next_fix;
+ }
+ tb->common.fixations = NULL;
+}
+
+static void set_heir(Process* me, DbTable* tb, Eterm heir, Eterm heir_data)
+{
+ tb->common.heir = heir;
+ if (heir == am_none) {
+ return;
+ }
+ if (heir == me->id) {
+ tb->common.heir_started = me->started;
+ }
+ else {
+ Process* heir_proc= erts_pid2proc_opt(me, ERTS_PROC_LOCK_MAIN, heir,
+ 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ if (heir_proc != NULL) {
+ tb->common.heir_started = heir_proc->started;
+ erts_smp_proc_dec_refc(heir_proc);
+ } else {
+ tb->common.heir = am_none;
+ }
+ }
+
+ if (!is_immed(heir_data)) {
+ Eterm tmp[2];
+ /* Make a dummy 1-tuple around data to use db_get_term() */
+ heir_data = (Eterm) db_get_term(&tb->common, NULL, 0,
+ TUPLE1(tmp,heir_data));
+ ASSERT(!is_immed(heir_data));
+ }
+ tb->common.heir_data = heir_data;
+}
+
+static void free_heir_data(DbTable* tb)
+{
+ if (tb->common.heir != am_none && !is_immed(tb->common.heir_data)) {
+ DbTerm* p = (DbTerm*) tb->common.heir_data;
+ db_free_term_data(p);
+ erts_db_free(ERTS_ALC_T_DB_TERM, tb, (void *)p,
+ sizeof(DbTerm) + (p->size-1)*sizeof(Eterm));
+ }
+ #ifdef DEBUG
+ tb->common.heir_data = am_undefined;
+ #endif
+}
+
+static BIF_RETTYPE ets_delete_trap(Process *p, Eterm cont)
+{
+ int trap;
+ Eterm* ptr = big_val(cont);
+ DbTable *tb = (DbTable *) ptr[1];
+
+ ASSERT(*ptr == make_pos_bignum_header(1));
+
+ db_lock(tb, LCK_WRITE);
+ trap = free_table_cont(p, tb, 0, 1);
+ db_unlock(tb, LCK_WRITE);
+
+ if (trap) {
+ BIF_TRAP1(&ets_delete_continue_exp, p, cont);
+ }
+ else {
+ BIF_RET(am_true);
+ }
+}
+
+
+/*
+ * free_table_cont() returns 0 when done and !0 when more work is needed.
+ */
+static int free_table_cont(Process *p,
+ DbTable *tb,
+ int first,
+ int clean_meta_tab)
+{
+ Eterm result;
+
+#ifdef HARDDEBUG
+ if (!first) {
+ erts_fprintf(stderr,"ets: free_table_cont %T (continue)\r\n",
+ tb->common.id);
+ }
+#endif
+
+ result = tb->common.meth->db_free_table_continue(tb);
+
+ if (result == 0) {
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,"ets: free_table_cont %T (continue begin)\r\n",
+ tb->common.id);
+#endif
+ /* More work to be done. Let other processes work and call us again. */
+ BUMP_ALL_REDS(p);
+ return !0;
+ }
+ else {
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,"ets: free_table_cont %T (continue end)\r\n",
+ tb->common.id);
+#endif
+ /* Completely done - we will not get called again. */
+ meta_main_tab_lock(tb->common.slot);
+ free_slot(tb->common.slot);
+ meta_main_tab_unlock(tb->common.slot);
+
+ if (clean_meta_tab) {
+ db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
+ db_erase_bag_exact2(meta_pid_to_tab,tb->common.owner,
+ make_small(tb->common.slot));
+ db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+ }
+ db_unref(tb);
+ BUMP_REDS(p, 100);
+ return 0;
+ }
+}
+
+static Eterm table_info(Process* p, DbTable* tb, Eterm What)
+{
+ Eterm ret = THE_NON_VALUE;
+
+ if (What == am_size) {
+ ret = make_small(erts_smp_atomic_read(&tb->common.nitems));
+ } else if (What == am_type) {
+ if (tb->common.status & DB_SET) {
+ ret = am_set;
+ } else if (tb->common.status & DB_DUPLICATE_BAG) {
+ ret = am_duplicate_bag;
+ } else if (tb->common.status & DB_ORDERED_SET) {
+ ret = am_ordered_set;
+ } else { /*TT*/
+ ASSERT(tb->common.status & DB_BAG);
+ ret = am_bag;
+ }
+ } else if (What == am_memory) {
+ Uint words = (Uint) ((erts_smp_atomic_read(&tb->common.memory_size)
+ + sizeof(Uint)
+ - 1)
+ / sizeof(Uint));
+ ret = erts_make_integer(words, p);
+ } else if (What == am_owner) {
+ ret = tb->common.owner;
+ } else if (What == am_heir) {
+ ret = tb->common.heir;
+ } else if (What == am_protection) {
+ if (tb->common.status & DB_PRIVATE)
+ ret = am_private;
+ else if (tb->common.status & DB_PROTECTED)
+ ret = am_protected;
+ else if (tb->common.status & DB_PUBLIC)
+ ret = am_public;
+ } else if (What == am_name) {
+ ret = tb->common.the_name;
+ } else if (What == am_keypos) {
+ ret = make_small(tb->common.keypos);
+ } else if (What == am_node) {
+ ret = erts_this_dist_entry->sysname;
+ } else if (What == am_named_table) {
+ ret = is_atom(tb->common.id) ? am_true : am_false;
+ /*
+ * For debugging purposes
+ */
+ } else if (What == am_data) {
+ print_table(ERTS_PRINT_STDOUT, NULL, 1, tb);
+ ret = am_true;
+ } else if (What == am_atom_put("fixed",5)) {
+ if (IS_FIXED(tb))
+ ret = am_true;
+ else
+ ret = am_false;
+ } else if (What == am_atom_put("kept_objects",12)) {
+ ret = make_small(IS_HASH_TABLE(tb->common.status)
+ ? db_kept_items_hash(&tb->hash) : 0);
+ } else if (What == am_atom_put("safe_fixed",10)) {
+#ifdef ERTS_SMP
+ erts_smp_mtx_lock(&tb->common.fixlock);
+#endif
+ if (IS_FIXED(tb)) {
+ Uint need;
+ Eterm *hp;
+ Eterm tpl, lst;
+ DbFixation *fix;
+ need = 7;
+ for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
+ need += 5;
+ }
+ hp = HAlloc(p, need);
+ lst = NIL;
+ for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
+ tpl = TUPLE2(hp,fix->pid,make_small(fix->counter));
+ hp += 3;
+ lst = CONS(hp,tpl,lst);
+ hp += 2;
+ }
+ tpl = TUPLE3(hp,
+ make_small(tb->common.megasec),
+ make_small(tb->common.sec),
+ make_small(tb->common.microsec));
+ hp += 4;
+ ret = TUPLE2(hp, tpl, lst);
+ } else {
+ ret = am_false;
+ }
+#ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+#endif
+ } else if (What == am_atom_put("stats",5)) {
+ if (IS_HASH_TABLE(tb->common.status)) {
+ FloatDef f;
+ DbHashStats stats;
+ Eterm avg, std_dev_real, std_dev_exp;
+ Eterm* hp;
+
+ db_calc_stats_hash(&tb->hash, &stats);
+ hp = HAlloc(p, 1 + 6 + FLOAT_SIZE_OBJECT*3);
+ f.fd = stats.avg_chain_len;
+ avg = make_float(hp);
+ PUT_DOUBLE(f, hp);
+ hp += FLOAT_SIZE_OBJECT;
+
+ f.fd = stats.std_dev_chain_len;
+ std_dev_real = make_float(hp);
+ PUT_DOUBLE(f, hp);
+ hp += FLOAT_SIZE_OBJECT;
+
+ f.fd = stats.std_dev_expected;
+ std_dev_exp = make_float(hp);
+ PUT_DOUBLE(f, hp);
+ hp += FLOAT_SIZE_OBJECT;
+ ret = TUPLE6(hp, make_small(erts_smp_atomic_read(&tb->hash.nactive)),
+ avg, std_dev_real, std_dev_exp,
+ make_small(stats.min_chain_len),
+ make_small(stats.max_chain_len));
+ }
+ else {
+ ret = am_false;
+ }
+ }
+ return ret;
+}
+
+static void print_table(int to, void *to_arg, int show, DbTable* tb)
+{
+ erts_print(to, to_arg, "Table: %T\n", tb->common.id);
+ erts_print(to, to_arg, "Name: %T\n", tb->common.the_name);
+
+ tb->common.meth->db_print(to, to_arg, show, tb);
+
+ erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read(&tb->common.nitems));
+ erts_print(to, to_arg, "Words: %bpu\n",
+ (Uint) ((erts_smp_atomic_read(&tb->common.memory_size)
+ + sizeof(Uint)
+ - 1)
+ / sizeof(Uint)));
+}
+
+void db_info(int to, void *to_arg, int show) /* Called by break handler */
+{
+ int i;
+ for (i=0; i < db_max_tabs; i++)
+ if (IS_SLOT_ALIVE(i)) {
+ erts_print(to, to_arg, "=ets:%T\n", meta_main_tab[i].u.tb->common.owner);
+ erts_print(to, to_arg, "Slot: %d\n", i);
+ print_table(to, to_arg, show, meta_main_tab[i].u.tb);
+ }
+#ifdef DEBUG
+ erts_print(to, to_arg, "=internal_ets: Process to table index\n");
+ print_table(to, to_arg, show, meta_pid_to_tab);
+ erts_print(to, to_arg, "=internal_ets: Process to fixation index\n");
+ print_table(to, to_arg, show, meta_pid_to_fixed_tab);
+#endif
+}
+
+Uint
+erts_get_ets_misc_mem_size(void)
+{
+ /* Memory not allocated in ets_alloc */
+ return (Uint) erts_smp_atomic_read(&erts_ets_misc_mem_size);
+}
+
+/* SMP Note: May only be used when system is locked */
+void
+erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg)
+{
+ int i, j;
+ j = 0;
+ for(i = 0; (i < db_max_tabs && j < meta_main_tab_cnt); i++) {
+ if (IS_SLOT_ALIVE(i)) {
+ j++;
+ (*func)(meta_main_tab[i].u.tb, arg);
+ }
+ }
+ ASSERT(j == meta_main_tab_cnt);
+}
+
+/* SMP Note: May only be used when system is locked */
+void
+erts_db_foreach_offheap(DbTable *tb,
+ void (*func)(ErlOffHeap *, void *),
+ void *arg)
+{
+ tb->common.meth->db_foreach_offheap(tb, func, arg);
+}
+
+/*
+ * For testing of meta tables only.
+ *
+ * Given a name atom (as returned from ets:new/2), return a list of 'cnt'
+ * number of other names that will hash to the same bucket in meta_name_tab.
+ *
+ * WARNING: Will bloat the atom table!
+ */
+Eterm
+erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
+{
+ Eterm list = NIL;
+ Eterm* hp = HAlloc(p,cnt*2);
+ Uint index = atom_val(name) & meta_name_tab_mask;
+
+ while (cnt) {
+ if (index != atom_val(name)) {
+ while (index >= atom_table_size()) {
+ char tmp[20];
+ erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
+ am_atom_put(tmp,strlen(tmp));
+ }
+ list = CONS(hp, make_atom(index), list);
+ hp += 2;
+ --cnt;
+ }
+ index += meta_name_tab_mask + 1;
+ }
+ return list;
+}
+
+
+#ifdef HARDDEBUG /* Here comes some debug functions */
+
+void db_check_tables(void)
+{
+#ifdef ERTS_SMP
+ return;
+#else
+ int i;
+
+ for (i = 0; i < db_max_tabs; i++) {
+ if (IS_SLOT_ALIVE(i)) {
+ DbTable* tb = meta_main_tab[i].t;
+ tb->common.meth->db_check_table(tb);
+ }
+ }
+#endif
+}
+
+#endif /* HARDDEBUG */
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
new file mode 100644
index 0000000000..7da28fad29
--- /dev/null
+++ b/erts/emulator/beam/erl_db.h
@@ -0,0 +1,247 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * This file now contains only the definitions needed for the
+ * meta table.
+ *
+ */
+
+#ifndef __DB_H__
+#define __DB_H__
+
+#include "sys.h"
+#include "bif.h"
+
+#include "erl_db_util.h" /* Flags */
+#include "erl_db_hash.h" /* DbTableHash */
+#include "erl_db_tree.h" /* DbTableTree */
+/*TT*/
+
+Uint erts_get_ets_misc_mem_size(void);
+
+/*
+ * So, the structure for a database table, NB this is only
+ * interesting in db.c.
+ */
+union db_table {
+ DbTableCommon common; /* Any type of db table */
+ DbTableHash hash; /* Linear hash array specific data */
+ DbTableTree tree; /* AVL tree specific data */
+ /*TT*/
+};
+
+#define DB_DEF_MAX_TABS 2053 /* Superseeded by environment variable
+ "ERL_MAX_ETS_TABLES" */
+#define ERL_MAX_ETS_TABLES_ENV "ERL_MAX_ETS_TABLES"
+
+void init_db(void);
+int erts_db_process_exiting(Process *, ErtsProcLocks);
+void db_info(int, void *, int);
+void erts_db_foreach_table(void (*)(DbTable *, void *), void *);
+void erts_db_foreach_offheap(DbTable *,
+ void (*func)(ErlOffHeap *, void *),
+ void *);
+
+extern int user_requested_db_max_tabs; /* set in erl_init */
+extern int erts_ets_realloc_always_moves; /* set in erl_init */
+extern Export ets_select_delete_continue_exp;
+extern Export ets_select_count_continue_exp;
+extern Export ets_select_continue_exp;
+extern erts_smp_atomic_t erts_ets_misc_mem_size;
+
+Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
+
+#endif
+
+#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
+#define ERTS_HAVE_DB_INTERNAL__
+
+#include "erl_alloc.h"
+
+/*
+ * _fnf : Failure Not Fatal (same as for erts_alloc/erts_realloc/erts_free)
+ * _nt : No Table (i.e. memory not associated with a specific table)
+ */
+
+#define ERTS_DB_ALC_MEM_UPDATE_(TAB, FREE_SZ, ALLOC_SZ) \
+do { \
+ long sz__ = ((long) (ALLOC_SZ)) - ((long) (FREE_SZ)); \
+ ASSERT((TAB)); \
+ erts_smp_atomic_add(&(TAB)->common.memory_size, sz__); \
+} while (0)
+
+#define ERTS_ETS_MISC_MEM_ADD(SZ) \
+ erts_smp_atomic_add(&erts_ets_misc_mem_size, (SZ));
+
+ERTS_GLB_INLINE void *erts_db_alloc(ErtsAlcType_t type,
+ DbTable *tab,
+ Uint size);
+ERTS_GLB_INLINE void *erts_db_alloc_fnf(ErtsAlcType_t type,
+ DbTable *tab,
+ Uint size);
+ERTS_GLB_INLINE void *erts_db_alloc_nt(ErtsAlcType_t type, Uint size);
+ERTS_GLB_INLINE void *erts_db_alloc_fnf_nt(ErtsAlcType_t type, Uint size);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void *
+erts_db_alloc(ErtsAlcType_t type, DbTable *tab, Uint size)
+{
+ void *res = erts_alloc(type, size);
+ ERTS_DB_ALC_MEM_UPDATE_(tab, 0, size);
+ return res;
+}
+
+ERTS_GLB_INLINE void *
+erts_db_alloc_fnf(ErtsAlcType_t type, DbTable *tab, Uint size)
+{
+ void *res = erts_alloc_fnf(type, size);
+ if (!res)
+ return NULL;
+ ERTS_DB_ALC_MEM_UPDATE_(tab, 0, size);
+ return res;
+}
+
+ERTS_GLB_INLINE void *
+erts_db_alloc_nt(ErtsAlcType_t type, Uint size)
+{
+ void *res = erts_alloc(type, size);
+ return res;
+}
+
+ERTS_GLB_INLINE void *
+erts_db_alloc_fnf_nt(ErtsAlcType_t type, Uint size)
+{
+ void *res = erts_alloc_fnf(type, size);
+ if (!res)
+ return NULL;
+ return res;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+ERTS_GLB_INLINE void *erts_db_realloc(ErtsAlcType_t type,
+ DbTable *tab,
+ void *ptr,
+ Uint old_size,
+ Uint size);
+ERTS_GLB_INLINE void *erts_db_realloc_fnf(ErtsAlcType_t type,
+ DbTable *tab,
+ void *ptr,
+ Uint old_size,
+ Uint size);
+ERTS_GLB_INLINE void *erts_db_realloc_nt(ErtsAlcType_t type,
+ void *ptr,
+ Uint old_size,
+ Uint size);
+ERTS_GLB_INLINE void *erts_db_realloc_fnf_nt(ErtsAlcType_t type,
+ void *ptr,
+ Uint old_size,
+ Uint size);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void *
+erts_db_realloc(ErtsAlcType_t type, DbTable *tab, void *ptr,
+ Uint old_size, Uint size)
+{
+ void *res;
+ ASSERT(!ptr || old_size == ERTS_ALC_DBG_BLK_SZ(ptr));
+ res = erts_realloc(type, ptr, size);
+ ERTS_DB_ALC_MEM_UPDATE_(tab, old_size, size);
+ return res;
+}
+
+ERTS_GLB_INLINE void *
+erts_db_realloc_fnf(ErtsAlcType_t type, DbTable *tab, void *ptr,
+ Uint old_size, Uint size)
+{
+ void *res;
+ ASSERT(!ptr || old_size == ERTS_ALC_DBG_BLK_SZ(ptr));
+ res = erts_realloc_fnf(type, ptr, size);
+ if (!res)
+ return NULL;
+ ERTS_DB_ALC_MEM_UPDATE_(tab, old_size, size);
+ return res;
+}
+
+ERTS_GLB_INLINE void *
+erts_db_realloc_nt(ErtsAlcType_t type, void *ptr,
+ Uint old_size, Uint size)
+{
+ void *res;
+ ASSERT(!ptr || old_size == ERTS_ALC_DBG_BLK_SZ(ptr));
+ res = erts_realloc(type, ptr, size);
+ return res;
+}
+
+ERTS_GLB_INLINE void *
+erts_db_realloc_fnf_nt(ErtsAlcType_t type, void *ptr,
+ Uint old_size, Uint size)
+{
+ void *res;
+ ASSERT(!ptr || old_size == ERTS_ALC_DBG_BLK_SZ(ptr));
+ res = erts_realloc_fnf(type, ptr, size);
+ if (!res)
+ return NULL;
+ return res;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+ERTS_GLB_INLINE void erts_db_free(ErtsAlcType_t type,
+ DbTable *tab,
+ void *ptr,
+ Uint size);
+
+ERTS_GLB_INLINE void erts_db_free_nt(ErtsAlcType_t type,
+ void *ptr,
+ Uint size);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size)
+{
+ ASSERT(ptr != 0);
+ ASSERT(size == ERTS_ALC_DBG_BLK_SZ(ptr));
+ ERTS_DB_ALC_MEM_UPDATE_(tab, size, 0);
+
+ ASSERT(((void *) tab) != ptr
+ || erts_smp_atomic_read(&tab->common.memory_size) == 0);
+
+ erts_free(type, ptr);
+}
+
+ERTS_GLB_INLINE void
+erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size)
+{
+ ASSERT(ptr != 0);
+ ASSERT(size == ERTS_ALC_DBG_BLK_SZ(ptr));
+
+ erts_free(type, ptr);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#undef ERTS_DB_ALC_MEM_UPDATE_
+
+#endif /* #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) */
+
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
new file mode 100644
index 0000000000..dea45053df
--- /dev/null
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -0,0 +1,2868 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** Implementation of unordered ETS tables.
+** The tables are implemented as linear dynamic hash tables.
+*/
+
+/* SMP:
+** The hash table supports two different locking "modes",
+** coarse grained and fine grained locking.
+**
+** Coarse grained locking relies entirely on the caller (erl_db.c) to obtain
+** the right kind of lock on the entire table depending on operation (reading
+** or writing). No further locking is then done by the table itself.
+**
+** Fine grained locking is supported by this code to allow concurrent updates
+** (and reading) to different parts of the table. This works by keeping one
+** rw-mtx for every N'th bucket. Even dynamic growing and shrinking by
+** rehashing buckets can be done without exclusive table lock.
+**
+** A table will support fine grained locking if it is created with flag
+** DB_FINE_LOCKED set. The table variable is_thread_safe will then indicate
+** if operations need to obtain fine grained locks or not. Some operations
+** will for example always use exclusive table lock to guarantee
+** a higher level of atomicy.
+*/
+
+/* FIXATION:
+** Fixating the table, by ets:safe_fixtable or as done by select-operations,
+** guarantees two things in current implementation.
+** (1) Keys will not *totaly* disappear from the table. A key can thus be used
+** as an iterator to find the next key in iteration sequence. Note however
+** that this does not mean that (pointers to) table objects are guaranteed
+** to be maintained while the table is fixated. A BAG or DBAG may actually
+** remove objects as long as there is at least one object left in the table
+** with the same key (alive or pseudo-deleted).
+** (2) Objects will not be moved between buckets due to table grow/shrink.
+** This will guarantee that iterations do not miss keys or get double-hits.
+**
+** With fine grained locking, a concurrent thread can fixate the table at any
+** time. A "dangerous" operation (delete or move) therefore needs to check
+** if the table is fixated while write-locking the bucket.
+*/
+
+/*
+#ifdef DEBUG
+#define HARDDEBUG 1
+#endif
+*/
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#define ERTS_WANT_DB_INTERNAL__
+#include "erl_db.h"
+#include "bif.h"
+#include "big.h"
+#include "export.h"
+#include "erl_binary.h"
+
+#include "erl_db_hash.h"
+
+#ifdef MYDEBUG /* Will fail test case ets_SUITE:memory */
+# define IF_DEBUG(x) x
+# define MY_ASSERT(x) ASSERT(x)
+#else
+# define IF_DEBUG(x)
+# define MY_ASSERT(x)
+#endif
+
+/*
+ * The following symbols can be manipulated to "tune" the linear hash array
+ */
+#define CHAIN_LEN 6 /* Medium bucket chain len */
+
+/* Number of slots per segment */
+#define SEGSZ_EXP 8
+#define SEGSZ (1 << SEGSZ_EXP)
+#define SEGSZ_MASK (SEGSZ-1)
+
+#define NSEG_1 2 /* Size of first segment table (must be at least 2) */
+#define NSEG_2 256 /* Size of second segment table */
+#define NSEG_INC 128 /* Number of segments to grow after that */
+
+#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read(&(tb)->segtab))
+#define NACTIVE(tb) ((int)erts_smp_atomic_read(&(tb)->nactive))
+#define NITEMS(tb) ((int)erts_smp_atomic_read(&(tb)->common.nitems))
+
+#define BUCKET(tb, i) SEGTAB(tb)[(i) >> SEGSZ_EXP]->buckets[(i) & SEGSZ_MASK]
+
+/*
+ * When deleting a table, the number of records to delete.
+ * Approximate number, because we must delete entire buckets.
+ */
+#define DELETE_RECORD_LIMIT 10000
+
+/* Calculate slot index from hash value.
+** RLOCK_HASH or WLOCK_HASH must be done before.
+*/
+static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
+{
+ Uint mask = erts_smp_atomic_read(&tb->szm);
+ Uint ix = hval & mask;
+ if (ix >= erts_smp_atomic_read(&tb->nactive)) {
+ ix &= mask>>1;
+ ASSERT(ix < erts_smp_atomic_read(&tb->nactive));
+ }
+ return ix;
+}
+
+/* Remember a slot containing a pseudo-deleted item (INVALID_HASH)
+*/
+static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
+{
+ long was_next;
+ long exp_next;
+ FixedDeletion* fixd = (FixedDeletion*) erts_db_alloc(ERTS_ALC_T_DB_FIX_DEL,
+ (DbTable *) tb,
+ sizeof(FixedDeletion));
+ ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion));
+ fixd->slot = ix;
+ was_next = erts_smp_atomic_read(&tb->fixdel);
+ do { /* Lockless atomic insertion in linked list: */
+ exp_next = was_next;
+ fixd->next = (FixedDeletion*) exp_next;
+ was_next = erts_smp_atomic_cmpxchg(&tb->fixdel, (long)fixd, exp_next);
+ }while (was_next != exp_next);
+}
+
+
+#define MAX_HASH 0xEFFFFFFFUL
+#define INVALID_HASH 0xFFFFFFFFUL
+
+/* optimised version of make_hash (normal case? atomic key) */
+#define MAKE_HASH(term) \
+ ((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
+ make_hash2(term)) % MAX_HASH)
+
+#ifdef ERTS_SMP
+# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
+# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck)
+
+/* Fine grained read lock */
+static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
+{
+ if (tb->common.is_thread_safe) {
+ return NULL;
+ } else {
+ erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval);
+ ASSERT(tb->common.type & DB_FINE_LOCKED);
+ erts_smp_rwmtx_rlock(lck);
+ return lck;
+ }
+}
+/* Fine grained write lock */
+static ERTS_INLINE erts_smp_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval)
+{
+ if (tb->common.is_thread_safe) {
+ return NULL;
+ } else {
+ erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval);
+ ASSERT(tb->common.type & DB_FINE_LOCKED);
+ erts_smp_rwmtx_rwlock(lck);
+ return lck;
+ }
+}
+
+static ERTS_INLINE void RUNLOCK_HASH(erts_smp_rwmtx_t* lck)
+{
+ if (lck != NULL) {
+ erts_smp_rwmtx_runlock(lck);
+ }
+}
+
+static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck)
+{
+ if (lck != NULL) {
+ erts_smp_rwmtx_rwunlock(lck);
+ }
+}
+#else /* ERTS_SMP */
+# define RLOCK_HASH(tb,hval) NULL
+# define WLOCK_HASH(tb,hval) NULL
+# define RUNLOCK_HASH(lck) ((void)lck)
+# define WUNLOCK_HASH(lck) ((void)lck)
+#endif /* ERTS_SMP */
+
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+# define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd))
+# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval)))
+# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rwlocked(lck))
+# define IS_TAB_WLOCKED(tb) erts_smp_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock)
+#else
+# define IS_HASH_RLOCKED(tb,hval) (1)
+# define IS_HASH_WLOCKED(tb,hval) (1)
+# define IS_TAB_WLOCKED(tb) (1)
+#endif
+
+
+/* Iteration helper
+** Returns "next" slot index or 0 if EOT reached.
+** Slot READ locks updated accordingly, unlocked if EOT.
+*/
+static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix,
+ erts_smp_rwmtx_t** lck_ptr)
+{
+#ifdef ERTS_SMP
+ ix += DB_HASH_LOCK_CNT;
+ if (ix < NACTIVE(tb)) return ix;
+ RUNLOCK_HASH(*lck_ptr);
+ ix = (ix + 1) & DB_HASH_LOCK_MASK;
+ if (ix != 0) *lck_ptr = RLOCK_HASH(tb,ix);
+ return ix;
+#else
+ return (++ix < NACTIVE(tb)) ? ix : 0;
+#endif
+}
+/* Same as next_slot but with WRITE locking */
+static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix,
+ erts_smp_rwmtx_t** lck_ptr)
+{
+#ifdef ERTS_SMP
+ ix += DB_HASH_LOCK_CNT;
+ if (ix < NACTIVE(tb)) return ix;
+ WUNLOCK_HASH(*lck_ptr);
+ ix = (ix + 1) & DB_HASH_LOCK_MASK;
+ if (ix != 0) *lck_ptr = WLOCK_HASH(tb,ix);
+ return ix;
+#else
+ return next_slot(tb,ix,lck_ptr);
+#endif
+}
+
+
+/*
+ * tplp is an untagged pointer to a tuple we know is large enough
+ * and dth is a pointer to a DbTableHash.
+ */
+#define GETKEY(dth, tplp) (*((tplp) + (dth)->common.keypos))
+
+/*
+ * Some special binary flags
+ */
+#define BIN_FLAG_ALL_OBJECTS BIN_FLAG_USR1
+
+/*
+ * Size calculations
+ */
+#define SIZ_OVERHEAD ((sizeof(HashDbTerm)/sizeof(Eterm)) - 1)
+#define SIZ_DBTERM(HDT) (SIZ_OVERHEAD + (HDT)->dbterm.size)
+
+/*
+ * Local types
+ */
+struct mp_prefound {
+ HashDbTerm** bucket;
+ int ix;
+};
+
+struct mp_info {
+ int all_objects; /* True if complete objects are always
+ * returned from the match_spec (can use
+ * copy_shallow on the return value) */
+ int something_can_match; /* The match_spec is not "impossible" */
+ int key_given;
+ struct mp_prefound dlists[10]; /* Default list of "pre-found" buckets */
+ struct mp_prefound* lists; /* Buckets to search if keys are given,
+ * = dlists initially */
+ unsigned num_lists; /* Number of elements in "lists",
+ * = 0 initially */
+ Binary *mp; /* The compiled match program */
+};
+
+/* A table segment */
+struct segment {
+ HashDbTerm* buckets[SEGSZ];
+#ifdef MYDEBUG
+ int is_ext_segment;
+#endif
+};
+
+/* A segment that also contains a segment table */
+struct ext_segment {
+ struct segment s; /* The segment itself. Must be first */
+
+ struct segment** prev_segtab; /* Used when table is shrinking */
+ int nsegs; /* Size of segtab */
+ struct segment* segtab[1]; /* The segment table */
+};
+#define SIZEOF_EXTSEG(NSEGS) \
+ (sizeof(struct ext_segment) - sizeof(struct segment*) + sizeof(struct segment*)*(NSEGS))
+
+#ifdef DEBUG
+# include <stddef.h> /* offsetof */
+# define EXTSEG(SEGTAB_PTR) \
+ ((struct ext_segment*) (((char*)(SEGTAB_PTR)) - offsetof(struct ext_segment,segtab)))
+#endif
+
+
+/* How the table segments relate to each other:
+
+ ext_segment: ext_segment: "plain" segment
+ #=================# #================# #=============#
+ | bucket[0] |<--+ +------->| bucket[256] | +->| bucket[512] |
+ | bucket[1] | | | | [257] | | | [513] |
+ : : | | : : | : :
+ | bucket[255] | | | | [511] | | | [767] |
+ |-----------------| | | |----------------| | #=============#
+ | prev_segtab=NULL| | | +--<---prev_segtab | |
+ | nsegs = 2 | | | | | nsegs = 256 | |
++->| segtab[0] -->-------+---|---|--<---segtab[0] |<-+ |
+| | segtab[1] -->-----------+---|--<---segtab[1] | | |
+| #=================# | | segtab[2] -->-----|--+ ext_segment:
+| | : : | #================#
++----------------<---------------+ | segtab[255] ->----|----->| bucket[255*256]|
+ #================# | | |
+ | : :
+ | |----------------|
+ +----<---prev_segtab |
+ : :
+*/
+
+
+/*
+** Forward decl's (static functions)
+*/
+static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
+ struct segment** old_segtab);
+static int alloc_seg(DbTableHash *tb);
+static int free_seg(DbTableHash *tb, int free_records);
+static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
+ HashDbTerm *list);
+static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
+ HashValue hval, HashDbTerm *list);
+static void shrink(DbTableHash* tb, int nactive);
+static void grow(DbTableHash* tb, int nactive);
+static void free_term(DbTableHash *tb, HashDbTerm* p);
+static Eterm put_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2);
+static HashDbTerm* get_term(DbTableHash* tb, HashDbTerm* old,
+ Eterm obj, HashValue hval);
+static int analyze_pattern(DbTableHash *tb, Eterm pattern,
+ struct mp_info *mpi);
+
+/*
+ * Method interface functions
+ */
+static int db_first_hash(Process *p,
+ DbTable *tbl,
+ Eterm *ret);
+
+static int db_next_hash(Process *p,
+ DbTable *tbl,
+ Eterm key,
+ Eterm *ret);
+
+static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret);
+
+static int db_get_element_hash(Process *p, DbTable *tbl,
+ Eterm key, int ndex, Eterm *ret);
+
+static int db_erase_object_hash(DbTable *tbl, Eterm object,Eterm *ret);
+
+static int db_slot_hash(Process *p, DbTable *tbl,
+ Eterm slot_term, Eterm *ret);
+
+static int db_select_chunk_hash(Process *p, DbTable *tbl,
+ Eterm pattern, Sint chunk_size,
+ int reverse, Eterm *ret);
+static int db_select_hash(Process *p, DbTable *tbl,
+ Eterm pattern, int reverse, Eterm *ret);
+static int db_select_count_hash(Process *p, DbTable *tbl,
+ Eterm pattern, Eterm *ret);
+static int db_select_delete_hash(Process *p, DbTable *tbl,
+ Eterm pattern, Eterm *ret);
+
+static int db_select_continue_hash(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+
+static int db_select_count_continue_hash(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+
+static int db_select_delete_continue_hash(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+static void db_print_hash(int to,
+ void *to_arg,
+ int show,
+ DbTable *tbl);
+static int db_free_table_hash(DbTable *tbl);
+
+static int db_free_table_continue_hash(DbTable *tbl);
+
+
+static void db_foreach_offheap_hash(DbTable *,
+ void (*)(ErlOffHeap *, void *),
+ void *);
+
+static int db_delete_all_objects_hash(Process* p, DbTable* tbl);
+#ifdef HARDDEBUG
+static void db_check_table_hash(DbTableHash *tb);
+#endif
+static int db_lookup_dbterm_hash(DbTable *tbl, Eterm key, DbUpdateHandle* handle);
+static void db_finalize_dbterm_hash(DbUpdateHandle* handle);
+
+static ERTS_INLINE void try_shrink(DbTableHash* tb)
+{
+ int nactive = NACTIVE(tb);
+ if (nactive > SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN)
+ && !IS_FIXED(tb)) {
+ shrink(tb, nactive);
+ }
+}
+
+/* Is this a live object (not pseodo-deleted) with the specified key?
+*/
+static ERTS_INLINE int has_live_key(DbTableHash* tb, HashDbTerm* b,
+ Eterm key, HashValue hval)
+{
+ if (b->hvalue != hval) return 0;
+ else {
+ Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
+ return EQ(key,itemKey);
+ }
+}
+
+/* Has this object the specified key? Can be pseudo-deleted.
+*/
+static ERTS_INLINE int has_key(DbTableHash* tb, HashDbTerm* b,
+ Eterm key, HashValue hval)
+{
+ if (b->hvalue != hval && b->hvalue != INVALID_HASH) return 0;
+ else {
+ Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
+ return EQ(key,itemKey);
+ }
+}
+
+
+/*
+** External interface
+*/
+DbTableMethod db_hash =
+{
+ db_create_hash,
+ db_first_hash,
+ db_next_hash,
+ db_first_hash, /* last == first */
+ db_next_hash, /* prev == next */
+ db_put_hash,
+ db_get_hash,
+ db_get_element_hash,
+ db_member_hash,
+ db_erase_hash,
+ db_erase_object_hash,
+ db_slot_hash,
+ db_select_chunk_hash,
+ db_select_hash,
+ db_select_delete_hash,
+ db_select_continue_hash, /* hmm continue_hash? */
+ db_select_delete_continue_hash,
+ db_select_count_hash,
+ db_select_count_continue_hash,
+ db_delete_all_objects_hash,
+ db_free_table_hash,
+ db_free_table_continue_hash,
+ db_print_hash,
+ db_foreach_offheap_hash,
+#ifdef HARDDEBUG
+ db_check_table_hash,
+#else
+ NULL,
+#endif
+ db_lookup_dbterm_hash,
+ db_finalize_dbterm_hash
+};
+
+#ifdef DEBUG
+/* Wait a while to provoke race and get code coverage */
+static void DEBUG_WAIT(void)
+{
+ unsigned long spin = 1UL << 20;
+ while (--spin);
+}
+#else
+# define DEBUG_WAIT()
+#endif
+
+/* Rare case of restoring the rest of the fixdel list
+ when "unfixer" gets interrupted by "fixer" */
+static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
+{
+ /*int tries = 0;*/
+ DEBUG_WAIT();
+ if (erts_smp_atomic_cmpxchg(&tb->fixdel, (long)fixdel,
+ (long)NULL) != (long)NULL) {
+ /* Oboy, must join lists */
+ FixedDeletion* last = fixdel;
+ long was_tail;
+ long exp_tail;
+
+ while (last->next != NULL) last = last->next;
+ was_tail = erts_smp_atomic_read(&tb->fixdel);
+ do { /* Lockless atomic list insertion */
+ exp_tail = was_tail;
+ last->next = (FixedDeletion*) exp_tail;
+ /*++tries;*/
+ DEBUG_WAIT();
+ was_tail = erts_smp_atomic_cmpxchg(&tb->fixdel, (long)fixdel,
+ exp_tail);
+ }while (was_tail != exp_tail);
+ }
+ /*erts_fprintf(stderr,"erl_db_hash: restore_fixdel tries=%d\r\n", tries);*/
+}
+/*
+** Table interface routines ie what's called by the bif's
+*/
+
+void db_unfix_table_hash(DbTableHash *tb)
+{
+ FixedDeletion* fixdel;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
+ || (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock)
+ && !tb->common.is_thread_safe));
+restart:
+ fixdel = (FixedDeletion*) erts_smp_atomic_xchg(&tb->fixdel, (long)NULL);
+ while (fixdel != NULL) {
+ FixedDeletion *fx = fixdel;
+ int ix = fx->slot;
+ HashDbTerm **bp;
+ HashDbTerm *b;
+ erts_smp_rwmtx_t* lck = WLOCK_HASH(tb,ix);
+
+ if (IS_FIXED(tb)) { /* interrupted by fixer */
+ WUNLOCK_HASH(lck);
+ restore_fixdel(tb,fixdel);
+ if (!IS_FIXED(tb)) {
+ goto restart; /* unfixed again! */
+ }
+ return;
+ }
+ if (ix < NACTIVE(tb)) {
+ bp = &BUCKET(tb, ix);
+ b = *bp;
+
+ while (b != NULL) {
+ if (b->hvalue == INVALID_HASH) {
+ *bp = b->next;
+ free_term(tb, b);
+ b = *bp;
+ } else {
+ bp = &b->next;
+ b = b->next;
+ }
+ }
+ }
+ /* else slot has been joined and purged by shrink() */
+ WUNLOCK_HASH(lck);
+ fixdel = fx->next;
+ erts_db_free(ERTS_ALC_T_DB_FIX_DEL,
+ (DbTable *) tb,
+ (void *) fx,
+ sizeof(FixedDeletion));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+ }
+
+ /* ToDo: Maybe try grow/shrink the table as well */
+}
+
+/* Only used by tests
+*/
+Uint db_kept_items_hash(DbTableHash *tb)
+{
+ Uint kept_items = 0;
+ Uint ix = 0;
+ erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix);
+ HashDbTerm* b;
+ do {
+ for (b = BUCKET(tb, ix); b != NULL; b = b->next) {
+ if (b->hvalue == INVALID_HASH) {
+ ++kept_items;
+ }
+ }
+ ix = next_slot(tb, ix, &lck);
+ }while (ix);
+ return kept_items;
+}
+
+int db_create_hash(Process *p, DbTable *tbl)
+{
+ DbTableHash *tb = &tbl->hash;
+
+ erts_smp_atomic_init(&tb->szm, SEGSZ_MASK);
+ erts_smp_atomic_init(&tb->nactive, SEGSZ);
+ erts_smp_atomic_init(&tb->fixdel, (long)NULL);
+ erts_smp_atomic_init(&tb->segtab, (long) alloc_ext_seg(tb,0,NULL)->segtab);
+ tb->nsegs = NSEG_1;
+ tb->nslots = SEGSZ;
+
+ erts_smp_atomic_init(&tb->is_resizing, 0);
+#ifdef ERTS_SMP
+ if (tb->common.type & DB_FINE_LOCKED) {
+ int i;
+ tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
+ (DbTable *) tb,
+ sizeof(DbTableHashFineLocks));
+ for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
+ #ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_rwmtx_init_x(&tb->locks->lck_vec[i].lck, "db_hash_slot", tb->common.the_name);
+ #else
+ erts_rwmtx_init(&tb->locks->lck_vec[i].lck, "db_hash_slot");
+ #endif
+ }
+ /* This important property is needed to guarantee that the buckets
+ * involved in a grow/shrink operation it protected by the same lock:
+ */
+ ASSERT(erts_smp_atomic_read(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
+ }
+ else { /* coarse locking */
+ tb->locks = NULL;
+ }
+#endif /* ERST_SMP */
+ return DB_ERROR_NONE;
+}
+
+static int db_first_hash(Process *p, DbTable *tbl, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ Uint ix = 0;
+ erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix);
+ HashDbTerm* list;
+
+ for (;;) {
+ list = BUCKET(tb,ix);
+ if (list != NULL) {
+ if (list->hvalue == INVALID_HASH) {
+ list = next(tb,&ix,&lck,list);
+ }
+ break;
+ }
+ if ((ix=next_slot(tb,ix,&lck)) == 0) {
+ list = NULL;
+ break;
+ }
+ }
+ if (list != NULL) {
+ Eterm key = GETKEY(tb, list->dbterm.tpl);
+
+ COPY_OBJECT(key, p, ret);
+ RUNLOCK_HASH(lck);
+ }
+ else {
+ *ret = am_EOT;
+ }
+ return DB_ERROR_NONE;
+}
+
+
+static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashValue hval;
+ Uint ix;
+ HashDbTerm* b;
+ erts_smp_rwmtx_t* lck;
+
+ hval = MAKE_HASH(key);
+ lck = RLOCK_HASH(tb,hval);
+ ix = hash_to_ix(tb, hval);
+ b = BUCKET(tb, ix);
+
+ for (;;) {
+ if (b == NULL) {
+ RUNLOCK_HASH(lck);
+ return DB_ERROR_BADKEY;
+ }
+ if (has_key(tb, b, key, hval)) {
+ break;
+ }
+ b = b->next;
+ }
+ /* Key found */
+
+ b = next(tb, &ix, &lck, b);
+ if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
+ while (b != 0) {
+ if (!has_live_key(tb, b, key, hval)) {
+ break;
+ }
+ b = next(tb, &ix, &lck, b);
+ }
+ }
+ if (b == NULL) {
+ *ret = am_EOT;
+ }
+ else {
+ COPY_OBJECT(GETKEY(tb, b->dbterm.tpl), p, ret);
+ RUNLOCK_HASH(lck);
+ }
+ return DB_ERROR_NONE;
+}
+
+int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashValue hval;
+ int ix;
+ Eterm key;
+ HashDbTerm** bp;
+ HashDbTerm* b;
+ HashDbTerm* q;
+ erts_smp_rwmtx_t* lck;
+ int nitems;
+ int ret = DB_ERROR_NONE;
+
+ key = GETKEY(tb, tuple_val(obj));
+ hval = MAKE_HASH(key);
+ lck = WLOCK_HASH(tb, hval);
+ ix = hash_to_ix(tb, hval);
+ bp = &BUCKET(tb, ix);
+ b = *bp;
+
+ for (;;) {
+ if (b == NULL) {
+ goto Lnew;
+ }
+ if (has_key(tb,b,key,hval)) {
+ break;
+ }
+ bp = &b->next;
+ b = b->next;
+ }
+ /* Key found
+ */
+ if (tb->common.status & DB_SET) {
+ HashDbTerm* bnext = b->next;
+ if (b->hvalue == INVALID_HASH) {
+ erts_smp_atomic_inc(&tb->common.nitems);
+ }
+ else if (key_clash_fail) {
+ ret = DB_ERROR_BADKEY;
+ goto Ldone;
+ }
+ q = get_term(tb, b, obj, hval);
+ q->next = bnext;
+ q->hvalue = hval; /* In case of INVALID_HASH */
+ *bp = q;
+ goto Ldone;
+ }
+ else if (key_clash_fail) { /* && (DB_BAG || DB_DUPLICATE_BAG) */
+ q = b;
+ do {
+ if (q->hvalue != INVALID_HASH) {
+ ret = DB_ERROR_BADKEY;
+ goto Ldone;
+ }
+ q = q->next;
+ }while (q != NULL && has_key(tb,q,key,hval));
+ }
+ else if (tb->common.status & DB_BAG) {
+ HashDbTerm** qp = bp;
+ q = b;
+ do {
+ if (eq(make_tuple(q->dbterm.tpl), obj)) {
+ if (q->hvalue == INVALID_HASH) {
+ erts_smp_atomic_inc(&tb->common.nitems);
+ q->hvalue = hval;
+ if (q != b) { /* must move to preserve key insertion order */
+ *qp = q->next;
+ q->next = b;
+ *bp = q;
+ }
+ }
+ goto Ldone;
+ }
+ qp = &q->next;
+ q = *qp;
+ }while (q != NULL && has_key(tb,q,key,hval));
+ }
+ /*else DB_DUPLICATE_BAG */
+
+Lnew:
+ q = get_term(tb, NULL, obj, hval);
+ q->next = b;
+ *bp = q;
+ nitems = erts_smp_atomic_inctest(&tb->common.nitems);
+ WUNLOCK_HASH(lck);
+ {
+ int nactive = NACTIVE(tb);
+ if (nitems > nactive * (CHAIN_LEN+1) && !IS_FIXED(tb)) {
+ grow(tb, nactive);
+ }
+ }
+ CHECK_TABLES();
+ return DB_ERROR_NONE;
+
+Ldone:
+ WUNLOCK_HASH(lck);
+ return ret;
+}
+
+int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashValue hval;
+ int ix;
+ HashDbTerm* b1;
+ erts_smp_rwmtx_t* lck;
+
+ hval = MAKE_HASH(key);
+ lck = RLOCK_HASH(tb,hval);
+ ix = hash_to_ix(tb, hval);
+ b1 = BUCKET(tb, ix);
+
+ while(b1 != 0) {
+ if (has_live_key(tb,b1,key,hval)) {
+ HashDbTerm* b2 = b1->next;
+ Eterm copy;
+
+ if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
+ while(b2 != NULL && has_key(tb,b2,key,hval))
+ b2 = b2->next;
+ }
+ copy = put_term_list(p, b1, b2);
+ CHECK_TABLES();
+ *ret = copy;
+ goto done;
+ }
+ b1 = b1->next;
+ }
+ *ret = NIL;
+done:
+ RUNLOCK_HASH(lck);
+ return DB_ERROR_NONE;
+}
+
+int db_get_element_array(DbTable *tbl,
+ Eterm key,
+ int ndex,
+ Eterm *ret,
+ int *num_ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashValue hval;
+ int ix;
+ HashDbTerm* b1;
+ int num = 0;
+ int retval;
+ erts_smp_rwmtx_t* lck;
+
+ ASSERT(!IS_FIXED(tbl)); /* no support for fixed tables here */
+
+ hval = MAKE_HASH(key);
+ lck = RLOCK_HASH(tb, hval);
+ ix = hash_to_ix(tb, hval);
+ b1 = BUCKET(tb, ix);
+
+ while(b1 != 0) {
+ if (has_live_key(tb,b1,key,hval)) {
+ if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
+ HashDbTerm* b;
+ HashDbTerm* b2 = b1->next;
+
+ while(b2 != NULL && has_live_key(tb,b2,key,hval)) {
+ if (ndex > arityval(b2->dbterm.tpl[0])) {
+ retval = DB_ERROR_BADITEM;
+ goto done;
+ }
+ b2 = b2->next;
+ }
+
+ b = b1;
+ while(b != b2) {
+ if (num < *num_ret) {
+ ret[num++] = b->dbterm.tpl[ndex];
+ } else {
+ retval = DB_ERROR_NONE;
+ goto done;
+ }
+ b = b->next;
+ }
+ *num_ret = num;
+ }
+ else {
+ ASSERT(*num_ret > 0);
+ ret[0] = b1->dbterm.tpl[ndex];
+ *num_ret = 1;
+ }
+ retval = DB_ERROR_NONE;
+ goto done;
+ }
+ b1 = b1->next;
+ }
+ retval = DB_ERROR_BADKEY;
+done:
+ RUNLOCK_HASH(lck);
+ return retval;
+}
+
+
+static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashValue hval;
+ int ix;
+ HashDbTerm* b1;
+ erts_smp_rwmtx_t* lck;
+
+ hval = MAKE_HASH(key);
+ ix = hash_to_ix(tb, hval);
+ lck = RLOCK_HASH(tb, hval);
+ b1 = BUCKET(tb, ix);
+
+ while(b1 != 0) {
+ if (has_live_key(tb,b1,key,hval)) {
+ *ret = am_true;
+ goto done;
+ }
+ b1 = b1->next;
+ }
+ *ret = am_false;
+done:
+ RUNLOCK_HASH(lck);
+ return DB_ERROR_NONE;
+}
+
+static int db_get_element_hash(Process *p, DbTable *tbl,
+ Eterm key,
+ int ndex,
+ Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashValue hval;
+ int ix;
+ HashDbTerm* b1;
+ erts_smp_rwmtx_t* lck;
+ int retval;
+
+ hval = MAKE_HASH(key);
+ lck = RLOCK_HASH(tb, hval);
+ ix = hash_to_ix(tb, hval);
+ b1 = BUCKET(tb, ix);
+
+
+ while(b1 != 0) {
+ if (has_live_key(tb,b1,key,hval)) {
+ Eterm copy;
+
+ if (ndex > arityval(b1->dbterm.tpl[0])) {
+ retval = DB_ERROR_BADITEM;
+ goto done;
+ }
+
+ if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
+ HashDbTerm* b;
+ HashDbTerm* b2 = b1->next;
+ Eterm elem_list = NIL;
+
+ while(b2 != NULL && has_key(tb,b2,key,hval)) {
+ if (ndex > arityval(b2->dbterm.tpl[0])
+ && b2->hvalue != INVALID_HASH) {
+ retval = DB_ERROR_BADITEM;
+ goto done;
+ }
+ b2 = b2->next;
+ }
+
+ b = b1;
+ while(b != b2) {
+ if (b->hvalue != INVALID_HASH) {
+ Eterm *hp;
+ Uint sz = size_object(b->dbterm.tpl[ndex])+2;
+
+ hp = HAlloc(p, sz);
+ copy = copy_struct(b->dbterm.tpl[ndex], sz-2, &hp, &MSO(p));
+ elem_list = CONS(hp, copy, elem_list);
+ hp += 2;
+ }
+ b = b->next;
+ }
+ *ret = elem_list;
+ }
+ else {
+ COPY_OBJECT(b1->dbterm.tpl[ndex], p, &copy);
+ *ret = copy;
+ }
+ retval = DB_ERROR_NONE;
+ goto done;
+ }
+ b1 = b1->next;
+ }
+ retval = DB_ERROR_BADKEY;
+done:
+ RUNLOCK_HASH(lck);
+ return retval;
+}
+
+/*
+ * Very internal interface, removes elements of arity two from
+ * BAG. Used for the PID meta table
+ */
+int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashValue hval;
+ int ix;
+ HashDbTerm** bp;
+ HashDbTerm* b;
+ erts_smp_rwmtx_t* lck;
+ int found = 0;
+
+ hval = MAKE_HASH(key);
+ lck = WLOCK_HASH(tb,hval);
+ ix = hash_to_ix(tb, hval);
+ bp = &BUCKET(tb, ix);
+ b = *bp;
+
+ ASSERT(!IS_FIXED(tb));
+ ASSERT((tb->common.status & DB_BAG));
+
+ while(b != 0) {
+ if (has_live_key(tb,b,key,hval)) {
+ found = 1;
+ if ((arityval(b->dbterm.tpl[0]) == 2) &&
+ EQ(value, b->dbterm.tpl[2])) {
+ *bp = b->next;
+ free_term(tb, b);
+ erts_smp_atomic_dec(&tb->common.nitems);
+ b = *bp;
+ break;
+ }
+ } else if (found) {
+ break;
+ }
+ bp = &b->next;
+ b = b->next;
+ }
+ WUNLOCK_HASH(lck);
+ if (found) {
+ try_shrink(tb);
+ }
+ return DB_ERROR_NONE;
+}
+
+/*
+** NB, this is for the db_erase/2 bif.
+*/
+int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashValue hval;
+ int ix;
+ HashDbTerm** bp;
+ HashDbTerm* b;
+ erts_smp_rwmtx_t* lck;
+ int nitems_diff = 0;
+
+ hval = MAKE_HASH(key);
+ lck = WLOCK_HASH(tb,hval);
+ ix = hash_to_ix(tb, hval);
+ bp = &BUCKET(tb, ix);
+ b = *bp;
+
+ while(b != 0) {
+ if (has_live_key(tb,b,key,hval)) {
+ --nitems_diff;
+ if (nitems_diff == -1 && IS_FIXED(tb)) {
+ /* Pseudo remove (no need to keep several of same key) */
+ add_fixed_deletion(tb, ix);
+ b->hvalue = INVALID_HASH;
+ } else {
+ *bp = b->next;
+ free_term(tb, b);
+ b = *bp;
+ continue;
+ }
+ }
+ else {
+ if (nitems_diff && b->hvalue != INVALID_HASH)
+ break;
+ }
+ bp = &b->next;
+ b = b->next;
+ }
+ WUNLOCK_HASH(lck);
+ if (nitems_diff) {
+ erts_smp_atomic_add(&tb->common.nitems, nitems_diff);
+ try_shrink(tb);
+ }
+ *ret = am_true;
+ return DB_ERROR_NONE;
+}
+
+/*
+** This is for the ets:delete_object BIF
+*/
+static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashValue hval;
+ int ix;
+ HashDbTerm** bp;
+ HashDbTerm* b;
+ erts_smp_rwmtx_t* lck;
+ int nitems_diff = 0;
+ int nkeys = 0;
+ Eterm key;
+
+ key = GETKEY(tb, tuple_val(object));
+ hval = MAKE_HASH(key);
+ lck = WLOCK_HASH(tb,hval);
+ ix = hash_to_ix(tb, hval);
+ bp = &BUCKET(tb, ix);
+ b = *bp;
+
+ while(b != 0) {
+ if (has_live_key(tb,b,key,hval)) {
+ ++nkeys;
+ if (eq(object, make_tuple(b->dbterm.tpl))) {
+ --nitems_diff;
+ if (nkeys==1 && IS_FIXED(tb)) { /* Pseudo remove */
+ add_fixed_deletion(tb,ix);
+ b->hvalue = INVALID_HASH;
+ bp = &b->next;
+ b = b->next;
+ } else {
+ *bp = b->next;
+ free_term(tb, b);
+ b = *bp;
+ }
+ if (tb->common.status & (DB_DUPLICATE_BAG)) {
+ continue;
+ } else {
+ break;
+ }
+ }
+ }
+ else if (nitems_diff && b->hvalue != INVALID_HASH) {
+ break;
+ }
+ bp = &b->next;
+ b = b->next;
+ }
+ WUNLOCK_HASH(lck);
+ if (nitems_diff) {
+ erts_smp_atomic_add(&tb->common.nitems, nitems_diff);
+ try_shrink(tb);
+ }
+ *ret = am_true;
+ return DB_ERROR_NONE;
+}
+
+
+static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ erts_smp_rwmtx_t* lck;
+ Sint slot;
+ int retval;
+ int nactive;
+
+ if (is_not_small(slot_term) || ((slot = signed_val(slot_term)) < 0)) {
+ return DB_ERROR_BADPARAM;
+ }
+ lck = RLOCK_HASH(tb, slot);
+ nactive = NACTIVE(tb);
+ if (slot < nactive) {
+ *ret = put_term_list(p, BUCKET(tb, slot), 0);
+ retval = DB_ERROR_NONE;
+ }
+ else if (slot == nactive) {
+ *ret = am_EOT;
+ retval = DB_ERROR_NONE;
+ }
+ else {
+ retval = DB_ERROR_BADPARAM;
+ }
+ RUNLOCK_HASH(lck);
+ return retval;
+}
+
+
+/*
+ * This is just here so I can take care of the return value
+ * that is to be sent during a trap (the BIF_TRAP macros explicitly returns)
+ */
+static BIF_RETTYPE bif_trap1(Export *bif,
+ Process *p,
+ Eterm p1)
+{
+ BIF_TRAP1(bif, p, p1);
+}
+
+/*
+ * Continue collecting select matches, this may happen either due to a trap
+ * or when the user calls ets:select/1
+ */
+static int db_select_continue_hash(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ Sint slot_ix;
+ Sint save_slot_ix;
+ Sint chunk_size;
+ int all_objects;
+ Binary *mp;
+ int num_left = 1000;
+ HashDbTerm *current = 0;
+ Eterm match_list;
+ Uint32 dummy;
+ unsigned sz;
+ Eterm *hp;
+ Eterm match_res;
+ Sint got;
+ Eterm *tptr;
+ erts_smp_rwmtx_t* lck;
+
+#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
+
+ /* Decode continuation. We know it's a tuple but not the arity or anything else */
+
+ tptr = tuple_val(continuation);
+
+ if (arityval(*tptr) != 6)
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+
+ if (!is_small(tptr[2]) || !is_small(tptr[3]) || !is_binary(tptr[4]) ||
+ !(is_list(tptr[5]) || tptr[5] == NIL) || !is_small(tptr[6]))
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ if ((chunk_size = signed_val(tptr[3])) < 0)
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ mp = ((ProcBin *) binary_val(tptr[4]))->val;
+ if (!IsMatchProgBinary(mp))
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
+ match_list = tptr[5];
+ if ((got = signed_val(tptr[6])) < 0)
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+
+ slot_ix = signed_val(tptr[2]);
+ if (slot_ix < 0 /* EOT */
+ || (chunk_size && got >= chunk_size)) {
+ goto done; /* Already got all or enough in the match_list */
+ }
+
+ lck = RLOCK_HASH(tb,slot_ix);
+ if (slot_ix >= NACTIVE(tb)) {
+ RUNLOCK_HASH(lck);
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ }
+
+ while ((current = BUCKET(tb,slot_ix)) == NULL) {
+ slot_ix = next_slot(tb, slot_ix, &lck);
+ if (slot_ix == 0) {
+ slot_ix = -1; /* EOT */
+ goto done;
+ }
+ }
+ for(;;) {
+ if (current->hvalue != INVALID_HASH &&
+ (match_res =
+ db_prog_match(p,mp,
+ make_tuple(current->dbterm.tpl),
+ 0,&dummy),
+ is_value(match_res))) {
+ if (all_objects) {
+ hp = HAlloc(p, current->dbterm.size + 2);
+ match_res = copy_shallow(DBTERM_BUF(&current->dbterm),
+ current->dbterm.size,
+ &hp,
+ &MSO(p));
+ } else {
+ sz = size_object(match_res);
+
+ hp = HAlloc(p, sz + 2);
+ match_res = copy_struct(match_res, sz, &hp, &MSO(p));
+ }
+ match_list = CONS(hp, match_res, match_list);
+ ++got;
+ }
+ --num_left;
+ save_slot_ix = slot_ix;
+ if ((current = next(tb, (Uint*)&slot_ix, &lck, current)) == NULL) {
+ slot_ix = -1; /* EOT */
+ break;
+ }
+ if (slot_ix != save_slot_ix) {
+ if (chunk_size && got >= chunk_size) {
+ RUNLOCK_HASH(lck);
+ break;
+ }
+ if (num_left <= 0 || MBUF(p)) {
+ /*
+ * We have either reached our limit, or just created some heap fragments.
+ * Since many heap fragments will make the GC slower, trap and GC now.
+ */
+ RUNLOCK_HASH(lck);
+ goto trap;
+ }
+ }
+ }
+done:
+ BUMP_REDS(p, 1000 - num_left);
+ if (chunk_size) {
+ Eterm continuation;
+ Eterm rest = NIL;
+ Sint rest_size = 0;
+
+ if (got > chunk_size) { /* Cannot write destructively here,
+ the list may have
+ been in user space */
+ rest = NIL;
+ hp = HAlloc(p, (got - chunk_size) * 2);
+ while (got-- > chunk_size) {
+ rest = CONS(hp, CAR(list_val(match_list)), rest);
+ hp += 2;
+ match_list = CDR(list_val(match_list));
+ ++rest_size;
+ }
+ }
+ if (rest != NIL || slot_ix >= 0) {
+ hp = HAlloc(p,3+7);
+ continuation = TUPLE6(hp, tptr[1], make_small(slot_ix),
+ tptr[3], tptr[4], rest,
+ make_small(rest_size));
+ hp += 7;
+ RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE);
+ } else {
+ if (match_list != NIL) {
+ hp = HAlloc(p, 3);
+ RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE);
+ } else {
+ RET_TO_BIF(am_EOT, DB_ERROR_NONE);
+ }
+ }
+ }
+ RET_TO_BIF(match_list,DB_ERROR_NONE);
+
+trap:
+ BUMP_ALL_REDS(p);
+
+ hp = HAlloc(p,7);
+ continuation = TUPLE6(hp, tptr[1], make_small(slot_ix), tptr[3],
+ tptr[4], match_list, make_small(got));
+ RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p,
+ continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+
+}
+
+static int db_select_hash(Process *p, DbTable *tbl,
+ Eterm pattern, int reverse,
+ Eterm *ret)
+{
+ return db_select_chunk_hash(p, tbl, pattern, 0, reverse, ret);
+}
+
+static int db_select_chunk_hash(Process *p, DbTable *tbl,
+ Eterm pattern, Sint chunk_size,
+ int reverse, /* not used */
+ Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ struct mp_info mpi;
+ Sint slot_ix;
+ HashDbTerm *current = 0;
+ unsigned current_list_pos = 0;
+ Eterm match_list;
+ Uint32 dummy;
+ Eterm match_res;
+ unsigned sz;
+ Eterm *hp;
+ int num_left = 1000;
+ Uint got = 0;
+ Eterm continuation;
+ int errcode;
+ Eterm mpb;
+ erts_smp_rwmtx_t* lck;
+
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ if (mpi.lists != mpi.dlists) { \
+ erts_free(ERTS_ALC_T_DB_SEL_LIST, \
+ (void *) mpi.lists); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+
+ if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(NIL,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ if (chunk_size) {
+ RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */
+ }
+ RET_TO_BIF(NIL, DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ if (!mpi.key_given) {
+ /* Run this code if pattern is variable or GETKEY(pattern) */
+ /* is a variable */
+ slot_ix = 0;
+ lck = RLOCK_HASH(tb,slot_ix);
+ for (;;) {
+ ASSERT(slot_ix < NACTIVE(tb));
+ if ((current = BUCKET(tb,slot_ix)) != NULL) {
+ break;
+ }
+ slot_ix = next_slot(tb,slot_ix,&lck);
+ if (slot_ix == 0) {
+ if (chunk_size) {
+ RET_TO_BIF(am_EOT, DB_ERROR_NONE); /* We're done */
+ }
+ RET_TO_BIF(NIL,DB_ERROR_NONE);
+ }
+ }
+ } else {
+ /* We have at least one */
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = RLOCK_HASH(tb, slot_ix);
+ current = *(mpi.lists[current_list_pos].bucket);
+ ASSERT(current == BUCKET(tb,slot_ix));
+ ++current_list_pos;
+ }
+
+ match_list = NIL;
+
+ for(;;) {
+ if (current != NULL) {
+ if (current->hvalue != INVALID_HASH) {
+ match_res = db_prog_match(p,mpi.mp,
+ make_tuple(current->dbterm.tpl),
+ 0,&dummy);
+ if (is_value(match_res)) {
+ if (mpi.all_objects) {
+ hp = HAlloc(p, current->dbterm.size + 2);
+ match_res = copy_shallow(DBTERM_BUF(&current->dbterm),
+ current->dbterm.size,
+ &hp,
+ &MSO(p));
+ } else {
+ sz = size_object(match_res);
+
+ hp = HAlloc(p, sz + 2);
+ match_res = copy_struct(match_res, sz, &hp, &MSO(p));
+ }
+ match_list = CONS(hp, match_res, match_list);
+ ++got;
+ }
+ }
+ current = current->next;
+ }
+ else if (mpi.key_given) { /* Key is bound */
+ RUNLOCK_HASH(lck);
+ if (current_list_pos == mpi.num_lists) {
+ slot_ix = -1; /* EOT */
+ goto done;
+ } else {
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = RLOCK_HASH(tb, slot_ix);
+ current = *(mpi.lists[current_list_pos].bucket);
+ ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
+ ++current_list_pos;
+ }
+ }
+ else { /* Key is variable */
+ --num_left;
+
+ if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) {
+ slot_ix = -1;
+ break;
+ }
+ if (chunk_size && got >= chunk_size) {
+ RUNLOCK_HASH(lck);
+ break;
+ }
+ if (num_left <= 0 || MBUF(p)) {
+ /*
+ * We have either reached our limit, or just created some heap fragments.
+ * Since many heap fragments will make the GC slower, trap and GC now.
+ */
+ RUNLOCK_HASH(lck);
+ goto trap;
+ }
+ current = BUCKET(tb,slot_ix);
+ }
+ }
+done:
+ BUMP_REDS(p, 1000 - num_left);
+ if (chunk_size) {
+ Eterm continuation;
+ Eterm rest = NIL;
+ Sint rest_size = 0;
+
+ if (mpi.all_objects)
+ (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ if (got > chunk_size) { /* Split list in return value and 'rest' */
+ Eterm tmp = match_list;
+ rest = match_list;
+ while (got-- > chunk_size + 1) {
+ tmp = CDR(list_val(tmp));
+ ++rest_size;
+ }
+ ++rest_size;
+ match_list = CDR(list_val(tmp));
+ CDR(list_val(tmp)) = NIL; /* Destructive, the list has never
+ been in 'user space' */
+ }
+ if (rest != NIL || slot_ix >= 0) { /* Need more calls */
+ hp = HAlloc(p,3+7+PROC_BIN_SIZE);
+ mpb =db_make_mp_binary(p,(mpi.mp),&hp);
+ if (mpi.all_objects)
+ (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ continuation = TUPLE6(hp, tb->common.id,make_small(slot_ix),
+ make_small(chunk_size),
+ mpb, rest,
+ make_small(rest_size));
+ mpi.mp = NULL; /*otherwise the return macro will destroy it */
+ hp += 7;
+ RET_TO_BIF(TUPLE2(hp, match_list, continuation),DB_ERROR_NONE);
+ } else { /* All data is exhausted */
+ if (match_list != NIL) { /* No more data to search but still a
+ result to return to the caller */
+ hp = HAlloc(p, 3);
+ RET_TO_BIF(TUPLE2(hp, match_list, am_EOT),DB_ERROR_NONE);
+ } else { /* Reached the end of the ttable with no data to return */
+ RET_TO_BIF(am_EOT, DB_ERROR_NONE);
+ }
+ }
+ }
+ RET_TO_BIF(match_list,DB_ERROR_NONE);
+trap:
+ BUMP_ALL_REDS(p);
+ if (mpi.all_objects)
+ (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ hp = HAlloc(p,7+PROC_BIN_SIZE);
+ mpb =db_make_mp_binary(p,(mpi.mp),&hp);
+ continuation = TUPLE6(hp, tb->common.id, make_small(slot_ix),
+ make_small(chunk_size),
+ mpb, match_list,
+ make_small(got));
+ mpi.mp = NULL; /*otherwise the return macro will destroy it */
+ RET_TO_BIF(bif_trap1(&ets_select_continue_exp, p,
+ continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+
+}
+
+static int db_select_count_hash(Process *p,
+ DbTable *tbl,
+ Eterm pattern,
+ Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ struct mp_info mpi;
+ Uint slot_ix = 0;
+ HashDbTerm* current = NULL;
+ unsigned current_list_pos = 0;
+ Uint32 dummy;
+ Eterm *hp;
+ int num_left = 1000;
+ Uint got = 0;
+ Eterm continuation;
+ int errcode;
+ Eterm egot;
+ Eterm mpb;
+ erts_smp_rwmtx_t* lck;
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ if (mpi.lists != mpi.dlists) { \
+ erts_free(ERTS_ALC_T_DB_SEL_LIST, \
+ (void *) mpi.lists); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+
+ if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(NIL,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ RET_TO_BIF(make_small(0), DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ if (!mpi.key_given) {
+ /* Run this code if pattern is variable or GETKEY(pattern) */
+ /* is a variable */
+ slot_ix = 0;
+ lck = RLOCK_HASH(tb,slot_ix);
+ current = BUCKET(tb,slot_ix);
+ } else {
+ /* We have at least one */
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = RLOCK_HASH(tb, slot_ix);
+ current = *(mpi.lists[current_list_pos].bucket);
+ ASSERT(current == BUCKET(tb,slot_ix));
+ ++current_list_pos;
+ }
+
+ for(;;) {
+ if (current != NULL) {
+ if (current->hvalue != INVALID_HASH) {
+ if (db_prog_match(p, mpi.mp, make_tuple(current->dbterm.tpl),
+ 0, &dummy) == am_true) {
+ ++got;
+ }
+ --num_left;
+ }
+ current = current->next;
+ }
+ else { /* next bucket */
+ if (mpi.key_given) { /* Key is bound */
+ RUNLOCK_HASH(lck);
+ if (current_list_pos == mpi.num_lists) {
+ goto done;
+ } else {
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = RLOCK_HASH(tb, slot_ix);
+ current = *(mpi.lists[current_list_pos].bucket);
+ ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
+ ++current_list_pos;
+ }
+ }
+ else {
+ if ((slot_ix=next_slot(tb,slot_ix,&lck)) == 0) {
+ goto done;
+ }
+ if (num_left <= 0) {
+ RUNLOCK_HASH(lck);
+ goto trap;
+ }
+ current = BUCKET(tb,slot_ix);
+ }
+ }
+ }
+done:
+ BUMP_REDS(p, 1000 - num_left);
+ RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
+trap:
+ BUMP_ALL_REDS(p);
+ if (IS_USMALL(0, got)) {
+ hp = HAlloc(p, PROC_BIN_SIZE + 5);
+ egot = make_small(got);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
+ egot = uint_to_big(got, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
+ mpb,
+ egot);
+ mpi.mp = NULL; /*otherwise the return macro will destroy it */
+ RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p,
+ continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+}
+
+static int db_select_delete_hash(Process *p,
+ DbTable *tbl,
+ Eterm pattern,
+ Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ struct mp_info mpi;
+ Uint slot_ix = 0;
+ HashDbTerm **current = NULL;
+ unsigned current_list_pos = 0;
+ Uint32 dummy;
+ Eterm *hp;
+ int num_left = 1000;
+ Uint got = 0;
+ Eterm continuation;
+ int errcode;
+ Uint last_pseudo_delete = (Uint)-1;
+ Eterm mpb;
+ Eterm egot;
+#ifdef ERTS_SMP
+ int fixated_by_me = tb->common.is_thread_safe ? 0 : 1; /* ToDo: something nicer */
+#else
+ int fixated_by_me = 0;
+#endif
+ erts_smp_rwmtx_t* lck;
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ if (mpi.lists != mpi.dlists) { \
+ erts_free(ERTS_ALC_T_DB_SEL_LIST, \
+ (void *) mpi.lists); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+
+ if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(NIL,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ RET_TO_BIF(make_small(0), DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ if (!mpi.key_given) {
+ /* Run this code if pattern is variable or GETKEY(pattern) */
+ /* is a variable */
+ lck = WLOCK_HASH(tb,slot_ix);
+ current = &BUCKET(tb,slot_ix);
+ } else {
+ /* We have at least one */
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = WLOCK_HASH(tb, slot_ix);
+ current = mpi.lists[current_list_pos++].bucket;
+ ASSERT(*current == BUCKET(tb,slot_ix));
+ }
+
+
+ for(;;) {
+ if ((*current) == NULL) {
+ if (mpi.key_given) { /* Key is bound */
+ WUNLOCK_HASH(lck);
+ if (current_list_pos == mpi.num_lists) {
+ goto done;
+ } else {
+ slot_ix = mpi.lists[current_list_pos].ix;
+ lck = WLOCK_HASH(tb, slot_ix);
+ current = mpi.lists[current_list_pos].bucket;
+ ASSERT(mpi.lists[current_list_pos].bucket == &BUCKET(tb,slot_ix));
+ ++current_list_pos;
+ }
+ } else {
+ if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) {
+ goto done;
+ }
+ if (num_left <= 0) {
+ WUNLOCK_HASH(lck);
+ goto trap;
+ }
+ current = &BUCKET(tb,slot_ix);
+ }
+ }
+ else if ((*current)->hvalue == INVALID_HASH) {
+ current = &((*current)->next);
+ }
+ else {
+ int did_erase = 0;
+ if ((db_prog_match(p,mpi.mp,
+ make_tuple((*current)->dbterm.tpl),
+ 0,&dummy)) == am_true) {
+ if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
+ if (slot_ix != last_pseudo_delete) {
+ add_fixed_deletion(tb, slot_ix);
+ last_pseudo_delete = slot_ix;
+ }
+ (*current)->hvalue = INVALID_HASH;
+ } else {
+ HashDbTerm *del = *current;
+ *current = (*current)->next;
+ free_term(tb, del);
+ did_erase = 1;
+ }
+ erts_smp_atomic_dec(&tb->common.nitems);
+ ++got;
+ }
+ --num_left;
+ if (!did_erase) {
+ current = &((*current)->next);
+ }
+ }
+ }
+done:
+ BUMP_REDS(p, 1000 - num_left);
+ if (got) {
+ try_shrink(tb);
+ }
+ RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
+trap:
+ BUMP_ALL_REDS(p);
+ if (IS_USMALL(0, got)) {
+ hp = HAlloc(p, PROC_BIN_SIZE + 5);
+ egot = make_small(got);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + PROC_BIN_SIZE + 5);
+ egot = uint_to_big(got, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
+ mpb,
+ egot);
+ mpi.mp = NULL; /*otherwise the return macro will destroy it */
+ RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p,
+ continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+
+}
+/*
+** This is called when select_delete traps
+*/
+static int db_select_delete_continue_hash(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ Uint slot_ix;
+ Uint last_pseudo_delete = (Uint)-1;
+ HashDbTerm **current = NULL;
+ Uint32 dummy;
+ Eterm *hp;
+ int num_left = 1000;
+ Uint got;
+ Eterm *tptr;
+ Binary *mp;
+ Eterm egot;
+ int fixated_by_me = ONLY_WRITER(p,tb) ? 0 : 1; /* ToDo: something nicer */
+ erts_smp_rwmtx_t* lck;
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+
+ tptr = tuple_val(continuation);
+ slot_ix = unsigned_val(tptr[2]);
+ mp = ((ProcBin *) binary_val(tptr[3]))->val;
+ if (is_big(tptr[4])) {
+ got = big_to_uint32(tptr[4]);
+ } else {
+ got = unsigned_val(tptr[4]);
+ }
+
+ lck = WLOCK_HASH(tb,slot_ix);
+ if (slot_ix >= NACTIVE(tb)) {
+ WUNLOCK_HASH(lck);
+ goto done;
+ }
+ current = &BUCKET(tb,slot_ix);
+
+ for(;;) {
+ if ((*current) == NULL) {
+ if ((slot_ix=next_slot_w(tb,slot_ix,&lck)) == 0) {
+ goto done;
+ }
+ if (num_left <= 0) {
+ WUNLOCK_HASH(lck);
+ goto trap;
+ }
+ current = &BUCKET(tb,slot_ix);
+ }
+ else if ((*current)->hvalue == INVALID_HASH) {
+ current = &((*current)->next);
+ }
+ else {
+ int did_erase = 0;
+ if ((db_prog_match(p,mp,make_tuple((*current)->dbterm.tpl),
+ 0,&dummy)) == am_true) {
+ if (NFIXED(tb) > fixated_by_me) { /* fixated by others? */
+ if (slot_ix != last_pseudo_delete) {
+ add_fixed_deletion(tb, slot_ix);
+ last_pseudo_delete = slot_ix;
+ }
+ (*current)->hvalue = INVALID_HASH;
+ } else {
+ HashDbTerm *del = *current;
+ *current = (*current)->next;
+ free_term(tb, del);
+ did_erase = 1;
+ }
+ erts_smp_atomic_dec(&tb->common.nitems);
+ ++got;
+ }
+
+ --num_left;
+ if (!did_erase) {
+ current = &((*current)->next);
+ }
+ }
+ }
+done:
+ BUMP_REDS(p, 1000 - num_left);
+ if (got) {
+ try_shrink(tb);
+ }
+ RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
+trap:
+ BUMP_ALL_REDS(p);
+ if (IS_USMALL(0, got)) {
+ hp = HAlloc(p, 5);
+ egot = make_small(got);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5);
+ egot = uint_to_big(got, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
+ tptr[3],
+ egot);
+ RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p,
+ continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+
+}
+
+/*
+** This is called when select_count traps
+*/
+static int db_select_count_continue_hash(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableHash *tb = &tbl->hash;
+ Uint slot_ix;
+ HashDbTerm* current;
+ Uint32 dummy;
+ Eterm *hp;
+ int num_left = 1000;
+ Uint got;
+ Eterm *tptr;
+ Binary *mp;
+ Eterm egot;
+ erts_smp_rwmtx_t* lck;
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+
+ tptr = tuple_val(continuation);
+ slot_ix = unsigned_val(tptr[2]);
+ mp = ((ProcBin *) binary_val(tptr[3]))->val;
+ if (is_big(tptr[4])) {
+ got = big_to_uint32(tptr[4]);
+ } else {
+ got = unsigned_val(tptr[4]);
+ }
+
+
+ lck = RLOCK_HASH(tb, slot_ix);
+ if (slot_ix >= NACTIVE(tb)) { /* Is this posible? */
+ RUNLOCK_HASH(lck);
+ goto done;
+ }
+ current = BUCKET(tb,slot_ix);
+
+ for(;;) {
+ if (current != NULL) {
+ if (current->hvalue == INVALID_HASH) {
+ current = current->next;
+ continue;
+ }
+ if (db_prog_match(p, mp, make_tuple(current->dbterm.tpl),
+ 0,&dummy) == am_true) {
+ ++got;
+ }
+ --num_left;
+ current = current->next;
+ }
+ else { /* next bucket */
+ if ((slot_ix = next_slot(tb,slot_ix,&lck)) == 0) {
+ goto done;
+ }
+ if (num_left <= 0) {
+ RUNLOCK_HASH(lck);
+ goto trap;
+ }
+ current = BUCKET(tb,slot_ix);
+ }
+ }
+done:
+ BUMP_REDS(p, 1000 - num_left);
+ RET_TO_BIF(erts_make_integer(got,p),DB_ERROR_NONE);
+trap:
+ BUMP_ALL_REDS(p);
+ if (IS_USMALL(0, got)) {
+ hp = HAlloc(p, 5);
+ egot = make_small(got);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + 5);
+ egot = uint_to_big(got, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
+ tptr[3],
+ egot);
+ RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p,
+ continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+
+}
+
+/*
+** Other interface routines (not directly coupled to one bif)
+*/
+
+void db_initialize_hash(void)
+{
+}
+
+
+int db_mark_all_deleted_hash(DbTable *tbl)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashDbTerm* list;
+ int i;
+
+ ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
+
+ for (i = 0; i < NACTIVE(tb); i++) {
+ if ((list = BUCKET(tb,i)) != NULL) {
+ add_fixed_deletion(tb, i);
+ do {
+ list->hvalue = INVALID_HASH;
+ list = list->next;
+ }while(list != NULL);
+ }
+ }
+ erts_smp_atomic_set(&tb->common.nitems, 0);
+ return DB_ERROR_NONE;
+}
+
+
+/* Display hash table contents (for dump) */
+static void db_print_hash(int to, void *to_arg, int show, DbTable *tbl)
+{
+ DbTableHash *tb = &tbl->hash;
+ int i;
+
+ erts_print(to, to_arg, "Buckets: %d \n", NACTIVE(tb));
+
+ if (show) {
+ for (i = 0; i < NACTIVE(tb); i++) {
+ HashDbTerm* list = BUCKET(tb,i);
+ if (list == NULL)
+ continue;
+ erts_print(to, to_arg, "%d: [", i);
+ while(list != 0) {
+ if (list->hvalue == INVALID_HASH)
+ erts_print(to, to_arg, "*");
+ erts_print(to, to_arg, "%T", make_tuple(list->dbterm.tpl));
+ if (list->next != 0)
+ erts_print(to, to_arg, ",");
+ list = list->next;
+ }
+ erts_print(to, to_arg, "]\n");
+ }
+ }
+}
+
+/* release all memory occupied by a single table */
+static int db_free_table_hash(DbTable *tbl)
+{
+ while (!db_free_table_continue_hash(tbl))
+ ;
+ return 0;
+}
+
+static int db_free_table_continue_hash(DbTable *tbl)
+{
+ DbTableHash *tb = &tbl->hash;
+ int done;
+ FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read(&tb->fixdel);
+ ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
+
+ done = 0;
+ while (fixdel != NULL) {
+ FixedDeletion *fx = fixdel;
+
+ fixdel = fx->next;
+ erts_db_free(ERTS_ALC_T_DB_FIX_DEL,
+ (DbTable *) tb,
+ (void *) fx,
+ sizeof(FixedDeletion));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+ if (++done >= 2*DELETE_RECORD_LIMIT) {
+ erts_smp_atomic_set(&tb->fixdel, (long)fixdel);
+ return 0; /* Not done */
+ }
+ }
+ erts_smp_atomic_set(&tb->fixdel, (long)NULL);
+
+ done /= 2;
+ while(tb->nslots != 0) {
+ free_seg(tb, 1);
+
+ /*
+ * If we have done enough work, get out here.
+ */
+ if (++done >= (DELETE_RECORD_LIMIT / CHAIN_LEN / SEGSZ)) {
+ return 0; /* Not done */
+ }
+ }
+#ifdef ERTS_SMP
+ if (tb->locks != NULL) {
+ int i;
+ for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
+ erts_rwmtx_destroy(GET_LOCK(tb,i));
+ }
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb,
+ (void*)tb->locks, sizeof(DbTableHashFineLocks));
+ tb->locks = NULL;
+ }
+#endif
+ ASSERT(erts_smp_atomic_read(&tb->common.memory_size) == sizeof(DbTable));
+ return 1; /* Done */
+}
+
+
+
+/*
+** Utility routines. (static)
+*/
+/*
+** For the select functions, analyzes the pattern and determines which
+** slots should be searched. Also compiles the match program
+*/
+static int analyze_pattern(DbTableHash *tb, Eterm pattern,
+ struct mp_info *mpi)
+{
+ Eterm *ptpl;
+ Eterm lst, tpl, ttpl;
+ Eterm *matches,*guards, *bodies;
+ Eterm sbuff[30];
+ Eterm *buff = sbuff;
+ Eterm key = NIL;
+ HashValue hval = NIL;
+ int num_heads = 0;
+ int i;
+
+ mpi->lists = mpi->dlists;
+ mpi->num_lists = 0;
+ mpi->key_given = 1;
+ mpi->something_can_match = 0;
+ mpi->all_objects = 1;
+ mpi->mp = NULL;
+
+ for (lst = pattern; is_list(lst); lst = CDR(list_val(lst)))
+ ++num_heads;
+
+ if (lst != NIL) {/* proper list... */
+ return DB_ERROR_BADPARAM;
+ }
+
+ if (num_heads > 10) {
+ buff = erts_alloc(ERTS_ALC_T_DB_TMP, sizeof(Eterm) * num_heads * 3);
+ mpi->lists = erts_alloc(ERTS_ALC_T_DB_SEL_LIST,
+ sizeof(*(mpi->lists)) * num_heads);
+ }
+
+ matches = buff;
+ guards = buff + num_heads;
+ bodies = buff + (num_heads * 2);
+
+ i = 0;
+ for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) {
+ Eterm body;
+ ttpl = CAR(list_val(lst));
+ if (!is_tuple(ttpl)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+ ptpl = tuple_val(ttpl);
+ if (ptpl[0] != make_arityval(3U)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+ matches[i] = tpl = ptpl[1];
+ guards[i] = ptpl[2];
+ bodies[i] = body = ptpl[3];
+ if (!is_list(body) || CDR(list_val(body)) != NIL ||
+ CAR(list_val(body)) != am_DollarUnderscore) {
+ mpi->all_objects = 0;
+ }
+ ++i;
+ if (!(mpi->key_given)) {
+ continue;
+ }
+ if (tpl == am_Underscore || db_is_variable(tpl) != -1) {
+ (mpi->key_given) = 0;
+ (mpi->something_can_match) = 1;
+ } else {
+ key = db_getkey(tb->common.keypos, tpl);
+ if (is_value(key)) {
+ if (!db_has_variable(key)) { /* Bound key */
+ int ix, search_slot;
+ HashDbTerm** bp;
+ erts_smp_rwmtx_t* lck;
+ hval = MAKE_HASH(key);
+ lck = RLOCK_HASH(tb,hval);
+ ix = hash_to_ix(tb, hval);
+ bp = &BUCKET(tb,ix);
+ if (lck == NULL) {
+ search_slot = search_list(tb,key,hval,*bp) != NULL;
+ } else {
+ /* No point to verify if key exist now as there may be
+ concurrent inserters/deleters anyway */
+ RUNLOCK_HASH(lck);
+ search_slot = 1;
+ }
+ if (search_slot) {
+ int j;
+ for (j=0; ; ++j) {
+ if (j == mpi->num_lists) {
+ mpi->lists[mpi->num_lists].bucket = bp;
+ mpi->lists[mpi->num_lists].ix = ix;
+ ++mpi->num_lists;
+ break;
+ }
+ if (mpi->lists[j].bucket == bp) {
+ ASSERT(mpi->lists[j].ix == ix);
+ break;
+ }
+ ASSERT(mpi->lists[j].ix != ix);
+ }
+ mpi->something_can_match = 1;
+ }
+ } else {
+ mpi->key_given = 0;
+ mpi->something_can_match = 1;
+ }
+ }
+ }
+ }
+
+ /*
+ * It would be nice not to compile the match_spec if nothing could match,
+ * but then the select calls would not fail like they should on bad
+ * match specs that happen to specify non existent keys etc.
+ */
+ if ((mpi->mp = db_match_compile(matches, guards, bodies,
+ num_heads, DCOMP_TABLE, NULL))
+ == NULL) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_NONE;
+}
+
+static struct ext_segment* alloc_ext_seg(DbTableHash* tb, unsigned seg_ix,
+ struct segment** old_segtab)
+{
+ int nsegs;
+ struct ext_segment* eseg;
+
+ switch (seg_ix) {
+ case 0: nsegs = NSEG_1; break;
+ case 1: nsegs = NSEG_2; break;
+ default: nsegs = seg_ix + NSEG_INC; break;
+ }
+ eseg = (struct ext_segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ SIZEOF_EXTSEG(nsegs));
+ ASSERT(eseg != NULL);
+ sys_memset(&eseg->s, 0, sizeof(struct segment));
+ IF_DEBUG(eseg->s.is_ext_segment = 1);
+ eseg->prev_segtab = old_segtab;
+ eseg->nsegs = nsegs;
+ if (old_segtab) {
+ ASSERT(nsegs > tb->nsegs);
+ sys_memcpy(eseg->segtab, old_segtab, tb->nsegs*sizeof(struct segment*));
+ }
+#ifdef DEBUG
+ sys_memset(&eseg->segtab[seg_ix], 0, (nsegs-seg_ix)*sizeof(struct segment*));
+#endif
+ eseg->segtab[seg_ix] = &eseg->s;
+ return eseg;
+}
+
+/* Extend table with one new segment
+*/
+static int alloc_seg(DbTableHash *tb)
+{
+ int seg_ix = tb->nslots >> SEGSZ_EXP;
+
+ if (seg_ix+1 == tb->nsegs) { /* New segtab needed (extended segment) */
+ struct segment** segtab = SEGTAB(tb);
+ struct ext_segment* seg = alloc_ext_seg(tb, seg_ix, segtab);
+ if (seg == NULL) return 0;
+ segtab[seg_ix] = &seg->s;
+ /* We don't use the new segtab until next call (see "shrink race") */
+ }
+ else { /* Just a new plain segment */
+ struct segment** segtab;
+ if (seg_ix == tb->nsegs) { /* Time to start use segtab from last call */
+ struct ext_segment* eseg;
+ eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
+ MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
+ erts_smp_atomic_set(&tb->segtab, (long) eseg->segtab);
+ tb->nsegs = eseg->nsegs;
+ }
+ ASSERT(seg_ix < tb->nsegs);
+ segtab = SEGTAB(tb);
+ ASSERT(segtab[seg_ix] == NULL);
+ segtab[seg_ix] = (struct segment*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG,
+ (DbTable *) tb,
+ sizeof(struct segment));
+ if (segtab[seg_ix] == NULL) return 0;
+ sys_memset(segtab[seg_ix], 0, sizeof(struct segment));
+ }
+ tb->nslots += SEGSZ;
+ return 1;
+}
+
+/* Shrink table by freeing the top segment
+** free_records: 1=free any records in segment, 0=assume segment is empty
+*/
+static int free_seg(DbTableHash *tb, int free_records)
+{
+ int seg_ix = (tb->nslots >> SEGSZ_EXP) - 1;
+ int bytes;
+ struct segment** segtab = SEGTAB(tb);
+ struct ext_segment* top = (struct ext_segment*) segtab[seg_ix];
+ int nrecords = 0;
+
+ ASSERT(top != NULL);
+#ifndef DEBUG
+ if (free_records)
+#endif
+ {
+ int i;
+ for (i=0; i<SEGSZ; ++i) {
+ HashDbTerm* p = top->s.buckets[i];
+ while(p != 0) {
+ HashDbTerm* nxt = p->next;
+ ASSERT(free_records); /* segment not empty as assumed? */
+ free_term(tb, p);
+ p = nxt;
+ ++nrecords;
+ }
+ }
+ }
+
+ /* The "shrink race":
+ * We must avoid deallocating an extended segment while its segtab may
+ * still be used by other threads.
+ * The trick is to stop use a segtab one call earlier. That is, stop use
+ * a segtab when the segment above it is deallocated. When the segtab is
+ * later deallocated, it has not been used for a very long time.
+ * It is even theoretically safe as we have by then rehashed the entire
+ * segment, seizing *all* locks, so there cannot exist any retarded threads
+ * still hanging in BUCKET macro with an old segtab pointer.
+ * For this to work, we must of course allocate a new segtab one call
+ * earlier in alloc_seg() as well. And this is also the reason why
+ * the minimum size of the first segtab is 2 and not 1 (NSEG_1).
+ */
+
+ if (seg_ix == tb->nsegs-1 || seg_ix==0) { /* Dealloc extended segment */
+ MY_ASSERT(top->s.is_ext_segment);
+ ASSERT(segtab != top->segtab || seg_ix==0);
+ bytes = SIZEOF_EXTSEG(top->nsegs);
+ }
+ else { /* Dealloc plain segment */
+ struct ext_segment* newtop = (struct ext_segment*) segtab[seg_ix-1];
+ MY_ASSERT(!top->s.is_ext_segment);
+
+ if (segtab == newtop->segtab) { /* New top segment is extended */
+ MY_ASSERT(newtop->s.is_ext_segment);
+ if (newtop->prev_segtab != NULL) {
+ /* Time to use a smaller segtab */
+ erts_smp_atomic_set(&tb->segtab, (long)newtop->prev_segtab);
+ tb->nsegs = seg_ix;
+ ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
+ }
+ else {
+ ASSERT(NSEG_1 > 2 && seg_ix==1);
+ }
+ }
+ bytes = sizeof(struct segment);
+ }
+
+ erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable *)tb,
+ (void*)top, bytes);
+#ifdef DEBUG
+ if (seg_ix > 0) {
+ if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL;
+ } else {
+ erts_smp_atomic_set(&tb->segtab, (long)NULL);
+ }
+#endif
+ tb->nslots -= SEGSZ;
+ ASSERT(tb->nslots >= 0);
+ return nrecords;
+}
+
+
+static HashDbTerm* get_term(DbTableHash* tb, HashDbTerm* old,
+ Eterm obj, HashValue hval)
+{
+ HashDbTerm* p = db_get_term((DbTableCommon *) tb,
+ (old != NULL) ? &(old->dbterm) : NULL,
+ ((char *) &(old->dbterm)) - ((char *) old),
+ obj);
+ p->hvalue = hval;
+ /*p->next = NULL;*/ /*No Need */
+ return p;
+}
+
+
+/*
+** Copy terms from ptr1 until ptr2
+** works for ptr1 == ptr2 == 0 => []
+** or ptr2 == 0
+*/
+static Eterm put_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2)
+{
+ int sz = 0;
+ HashDbTerm* ptr;
+ Eterm list = NIL;
+ Eterm copy;
+ Eterm *hp;
+
+ ptr = ptr1;
+ while(ptr != ptr2) {
+
+ if (ptr->hvalue != INVALID_HASH)
+ sz += ptr->dbterm.size + 2;
+
+ ptr = ptr->next;
+ }
+
+ hp = HAlloc(p, sz);
+
+ ptr = ptr1;
+ while(ptr != ptr2) {
+ if (ptr->hvalue != INVALID_HASH) {
+ copy = copy_shallow(DBTERM_BUF(&ptr->dbterm), ptr->dbterm.size, &hp, &MSO(p));
+ list = CONS(hp, copy, list);
+ hp += 2;
+ }
+ ptr = ptr->next;
+ }
+ return list;
+}
+
+static void free_term(DbTableHash *tb, HashDbTerm* p)
+{
+ db_free_term_data(&(p->dbterm));
+ erts_db_free(ERTS_ALC_T_DB_TERM,
+ (DbTable *) tb,
+ (void *) p,
+ SIZ_DBTERM(p)*sizeof(Eterm));
+}
+
+/* Grow table with one new bucket.
+** Allocate new segment if needed.
+*/
+static void grow(DbTableHash* tb, int nactive)
+{
+ HashDbTerm** pnext;
+ HashDbTerm** to_pnext;
+ HashDbTerm* p;
+ erts_smp_rwmtx_t* lck;
+ int from_ix;
+ int szm;
+
+ if (erts_smp_atomic_xchg(&tb->is_resizing, 1)) {
+ return; /* already in progress */
+ }
+ if (NACTIVE(tb) != nactive) {
+ goto abort; /* already done (race) */
+ }
+
+ /* Ensure that the slot nactive exists */
+ if (nactive == tb->nslots) {
+ /* Time to get a new segment */
+ ASSERT((nactive & SEGSZ_MASK) == 0);
+ if (!alloc_seg(tb)) goto abort;
+ }
+ ASSERT(nactive < tb->nslots);
+
+ szm = erts_smp_atomic_read(&tb->szm);
+ if (nactive <= szm) {
+ from_ix = nactive & (szm >> 1);
+ } else {
+ ASSERT(nactive == szm+1);
+ from_ix = 0;
+ szm = (szm<<1) | 1;
+ }
+
+ lck = WLOCK_HASH(tb, from_ix);
+ /* Now a final double check (with the from_ix lock held)
+ * that we did not get raced by a table fixer.
+ */
+ if (IS_FIXED(tb)) {
+ WUNLOCK_HASH(lck);
+ goto abort;
+ }
+ erts_smp_atomic_inc(&tb->nactive);
+ if (from_ix == 0) {
+ erts_smp_atomic_set(&tb->szm, szm);
+ }
+ erts_smp_atomic_set(&tb->is_resizing, 0);
+
+ /* Finally, let's split the bucket. We try to do it in a smart way
+ to keep link order and avoid unnecessary updates of next-pointers */
+ pnext = &BUCKET(tb, from_ix);
+ p = *pnext;
+ to_pnext = &BUCKET(tb, nactive);
+ while (p != NULL) {
+ if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
+ *pnext = p->next;
+ free_term(tb, p);
+ p = *pnext;
+ }
+ else {
+ int ix = p->hvalue & szm;
+ if (ix != from_ix) {
+ ASSERT(ix == (from_ix ^ ((szm+1)>>1)));
+ *to_pnext = p;
+ /* Swap "from" and "to": */
+ from_ix = ix;
+ to_pnext = pnext;
+ }
+ pnext = &p->next;
+ p = *pnext;
+ }
+ }
+ *to_pnext = NULL;
+
+ WUNLOCK_HASH(lck);
+ return;
+
+abort:
+ erts_smp_atomic_set(&tb->is_resizing, 0);
+}
+
+
+/* Shrink table by joining top bucket.
+** Remove top segment if it gets empty.
+*/
+static void shrink(DbTableHash* tb, int nactive)
+{
+ if (erts_smp_atomic_xchg(&tb->is_resizing, 1)) {
+ return; /* already in progress */
+ }
+ if (NACTIVE(tb) == nactive) {
+ erts_smp_rwmtx_t* lck;
+ int src_ix = nactive - 1;
+ int low_szm = erts_smp_atomic_read(&tb->szm) >> 1;
+ int dst_ix = src_ix & low_szm;
+
+ ASSERT(dst_ix < src_ix);
+ ASSERT(nactive > SEGSZ);
+ lck = WLOCK_HASH(tb, dst_ix);
+ /* Double check for racing table fixers */
+ if (!IS_FIXED(tb)) {
+ HashDbTerm** src_bp = &BUCKET(tb, src_ix);
+ HashDbTerm** dst_bp = &BUCKET(tb, dst_ix);
+ HashDbTerm** bp = src_bp;
+
+ /* Q: Why join lists by appending "dst" at the end of "src"?
+ A: Must step through "src" anyway to purge pseudo deleted. */
+ while(*bp != NULL) {
+ if ((*bp)->hvalue == INVALID_HASH) {
+ HashDbTerm* deleted = *bp;
+ *bp = deleted->next;
+ free_term(tb, deleted);
+ } else {
+ bp = &(*bp)->next;
+ }
+ }
+ *bp = *dst_bp;
+ *dst_bp = *src_bp;
+ *src_bp = NULL;
+
+ erts_smp_atomic_set(&tb->nactive, src_ix);
+ if (dst_ix == 0) {
+ erts_smp_atomic_set(&tb->szm, low_szm);
+ }
+ WUNLOCK_HASH(lck);
+
+ if (tb->nslots - src_ix >= SEGSZ) {
+ free_seg(tb, 0);
+ }
+ }
+ else {
+ WUNLOCK_HASH(lck);
+ }
+
+ }
+ /*else already done */
+ erts_smp_atomic_set(&tb->is_resizing, 0);
+}
+
+
+/* Search a list of tuples for a matching key */
+
+static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
+ HashValue hval, HashDbTerm *list)
+{
+ while (list != 0) {
+ if (has_live_key(tb,list,key,hval))
+ return list;
+ list = list->next;
+ }
+ return 0;
+}
+
+
+/* This function is called by the next AND the select BIF */
+/* It return the next live object in a table, NULL if no more */
+/* In-bucket: RLOCKED */
+/* Out-bucket: RLOCKED unless NULL */
+static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
+ HashDbTerm *list)
+{
+ int i;
+
+ ERTS_SMP_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr));
+
+ for (list = list->next; list != NULL; list = list->next) {
+ if (list->hvalue != INVALID_HASH)
+ return list;
+ }
+
+ i = *iptr;
+ while ((i=next_slot(tb, i, lck_ptr)) != 0) {
+
+ list = BUCKET(tb,i);
+ while (list != NULL) {
+ if (list->hvalue != INVALID_HASH) {
+ *iptr = i;
+ return list;
+ }
+ list = list->next;
+ }
+ }
+ /* *iptr = ??? */
+ return NULL;
+}
+
+static int db_lookup_dbterm_hash(DbTable *tbl, Eterm key, DbUpdateHandle* handle)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashDbTerm* b;
+ HashDbTerm** prevp;
+ int ix;
+ HashValue hval;
+ erts_smp_rwmtx_t* lck;
+
+ hval = MAKE_HASH(key);
+ lck = WLOCK_HASH(tb,hval);
+ ix = hash_to_ix(tb, hval);
+ prevp = &BUCKET(tb, ix);
+ b = *prevp;
+
+ while (b != 0) {
+ if (has_live_key(tb,b,key,hval)) {
+ handle->tb = tbl;
+ handle->bp = (void**) prevp;
+ handle->dbterm = &b->dbterm;
+ handle->new_size = b->dbterm.size;
+ handle->mustResize = 0;
+ handle->lck = lck;
+ /* KEEP hval WLOCKED, db_finalize_dbterm_hash will WUNLOCK */
+ return 1;
+ }
+ prevp = &b->next;
+ b = *prevp;
+ }
+ WUNLOCK_HASH(lck);
+ return 0;
+}
+
+/* Must be called after call to db_lookup_dbterm
+*/
+static void db_finalize_dbterm_hash(DbUpdateHandle* handle)
+{
+ DbTable* tbl = handle->tb;
+ HashDbTerm* oldp = (HashDbTerm*) *(handle->bp);
+ erts_smp_rwmtx_t* lck = (erts_smp_rwmtx_t*) handle->lck;
+
+ ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(&tbl->hash,lck)); /* locked by db_lookup_dbterm_hash */
+ ASSERT(&oldp->dbterm == handle->dbterm);
+
+ if (handle->mustResize) {
+ Eterm* top;
+ Eterm copy;
+ DbTerm* newDbTerm;
+ HashDbTerm* newp = erts_db_alloc(ERTS_ALC_T_DB_TERM, tbl,
+ sizeof(HashDbTerm)+sizeof(Eterm)*(handle->new_size-1));
+ sys_memcpy(newp, oldp, sizeof(HashDbTerm)-sizeof(DbTerm)); /* copy only hashtab header */
+ *(handle->bp) = newp;
+ newDbTerm = &newp->dbterm;
+
+ newDbTerm->size = handle->new_size;
+ newDbTerm->off_heap.mso = NULL;
+ newDbTerm->off_heap.externals = NULL;
+ #ifndef HYBRID /* FIND ME! */
+ newDbTerm->off_heap.funs = NULL;
+ #endif
+ newDbTerm->off_heap.overhead = 0;
+
+ /* make a flat copy */
+ top = DBTERM_BUF(newDbTerm);
+ copy = copy_struct(make_tuple(handle->dbterm->tpl),
+ handle->new_size,
+ &top, &newDbTerm->off_heap);
+ DBTERM_SET_TPL(newDbTerm,tuple_val(copy));
+
+ WUNLOCK_HASH(lck);
+
+ db_free_term_data(handle->dbterm);
+ erts_db_free(ERTS_ALC_T_DB_TERM, tbl,
+ (void *) (((char *) handle->dbterm) - (sizeof(HashDbTerm) - sizeof(DbTerm))),
+ sizeof(HashDbTerm) + sizeof(Eterm)*(handle->dbterm->size-1));
+ }
+ else {
+ WUNLOCK_HASH(lck);
+ }
+#ifdef DEBUG
+ handle->dbterm = 0;
+#endif
+ return;
+}
+
+static int db_delete_all_objects_hash(Process* p, DbTable* tbl)
+{
+ if (IS_FIXED(tbl)) {
+ db_mark_all_deleted_hash(tbl);
+ } else {
+ db_free_table_hash(tbl);
+ db_create_hash(p, tbl);
+ erts_smp_atomic_set(&tbl->hash.common.nitems, 0);
+ }
+ return 0;
+}
+
+void db_foreach_offheap_hash(DbTable *tbl,
+ void (*func)(ErlOffHeap *, void *),
+ void * arg)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashDbTerm* list;
+ int i;
+ int nactive = NACTIVE(tb);
+
+ for (i = 0; i < nactive; i++) {
+ list = BUCKET(tb,i);
+ while(list != 0) {
+ (*func)(&(list->dbterm.off_heap), arg);
+ list = list->next;
+ }
+ }
+}
+
+void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
+{
+ HashDbTerm* b;
+ erts_smp_rwmtx_t* lck;
+ int sum = 0;
+ int sq_sum = 0;
+ int ix;
+ int len;
+
+ stats->min_chain_len = INT_MAX;
+ stats->max_chain_len = 0;
+ ix = 0;
+ lck = RLOCK_HASH(tb,ix);
+ do {
+ len = 0;
+ for (b = BUCKET(tb,ix); b!=NULL; b=b->next) {
+ len++;
+ }
+ sum += len;
+ sq_sum += len*len;
+ if (len < stats->min_chain_len) stats->min_chain_len = len;
+ if (len > stats->max_chain_len) stats->max_chain_len = len;
+ ix = next_slot(tb,ix,&lck);
+ }while (ix);
+ stats->avg_chain_len = (float)sum / NACTIVE(tb);
+ stats->std_dev_chain_len = sqrt((sq_sum - stats->avg_chain_len*sum) / NACTIVE(tb));
+ /* Expected standard deviation from a good uniform hash function,
+ ie binomial distribution (not taking the linear hashing into acount) */
+ stats->std_dev_expected = sqrt(stats->avg_chain_len * (1 - 1.0/NACTIVE(tb)));
+}
+#ifdef HARDDEBUG
+
+void db_check_table_hash(DbTable *tbl)
+{
+ DbTableHash *tb = &tbl->hash;
+ HashDbTerm* list;
+ int j;
+
+ for (j = 0; j < tb->nactive; j++) {
+ if ((list = BUCKET(tb,j)) != 0) {
+ while (list != 0) {
+ if (!is_tuple(make_tuple(list->dbterm.tpl))) {
+ erl_exit(1, "Bad term in slot %d of ets table", j);
+ }
+ list = list->next;
+ }
+ }
+ }
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
new file mode 100644
index 0000000000..e0285fa5ed
--- /dev/null
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -0,0 +1,103 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef _DB_HASH_H
+#define _DB_HASH_H
+
+#include "erl_db_util.h" /* DbTerm & DbTableCommon */
+
+typedef struct fixed_deletion {
+ int slot;
+ struct fixed_deletion *next;
+} FixedDeletion;
+
+typedef struct hash_db_term {
+ struct hash_db_term* next; /* next bucket */
+ HashValue hvalue; /* stored hash value */
+ DbTerm dbterm; /* The actual term */
+} HashDbTerm;
+
+#define DB_HASH_LOCK_CNT 16
+typedef struct db_table_hash_fine_locks {
+ union {
+ erts_smp_rwmtx_t lck;
+ byte _cache_line_alignment[64];
+ }lck_vec[DB_HASH_LOCK_CNT];
+} DbTableHashFineLocks;
+
+typedef struct db_table_hash {
+ DbTableCommon common;
+
+ erts_smp_atomic_t segtab; /* The segment table (struct segment**) */
+ erts_smp_atomic_t szm; /* current size mask. */
+
+ /* SMP: nslots and nsegs are protected by is_resizing or table write lock */
+ int nslots; /* Total number of slots */
+ int nsegs; /* Size of segment table */
+
+ /* List of slots where elements have been deleted while table was fixed */
+ erts_smp_atomic_t fixdel; /* (FixedDeletion*) */
+ erts_smp_atomic_t nactive; /* Number of "active" slots */
+ erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
+#ifdef ERTS_SMP
+ DbTableHashFineLocks* locks;
+#endif
+} DbTableHash;
+
+
+/*
+** Function prototypes, looks the same (except the suffix) for all
+** table types. The process is always an [in out] parameter.
+*/
+void db_initialize_hash(void);
+void db_unfix_table_hash(DbTableHash *tb /* [in out] */);
+Uint db_kept_items_hash(DbTableHash *tb);
+
+/* Interface for meta pid table */
+int db_create_hash(Process *p,
+ DbTable *tbl /* [in out] */);
+
+int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail);
+
+int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret);
+
+int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret);
+
+int db_get_element_array(DbTable *tbl,
+ Eterm key,
+ int ndex,
+ Eterm *ret,
+ int *num_ret);
+
+int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value);
+
+/* not yet in method table */
+int db_mark_all_deleted_hash(DbTable *tbl);
+
+typedef struct {
+ float avg_chain_len;
+ float std_dev_chain_len;
+ float std_dev_expected;
+ int max_chain_len;
+ int min_chain_len;
+}DbHashStats;
+
+void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
+
+#endif /* _DB_HASH_H */
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
new file mode 100644
index 0000000000..d3a916d2d9
--- /dev/null
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -0,0 +1,3289 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** Implementation of ordered ETS tables.
+** The tables are implemented as AVL trees (Published by Adelson-Velski
+** and Landis). A nice source for learning about these trees is
+** Wirth's Algorithms + Datastructures = Programs.
+** The implementation here is however not made with recursion
+** as the examples in Wirths book are.
+*/
+
+/*
+#ifdef DEBUG
+#define HARDDEBUG 1
+#endif
+*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#define ERTS_WANT_DB_INTERNAL__
+#include "erl_db.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_binary.h"
+
+#include "erl_db_tree.h"
+
+
+
+#define GETKEY(dtt, tplp) (*((tplp) + (dtt)->common.keypos))
+#define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos))
+#define NITEMS(tb) ((int)erts_smp_atomic_read(&(tb)->common.nitems))
+
+/*
+** A stack of this size is enough for an AVL tree with more than
+** 0xFFFFFFFF elements. May be subject to change if
+** the datatype of the element counter is changed to a 64 bit integer.
+** The Maximal height of an AVL tree is calculated as:
+** h(n) <= 1.4404 * log(n + 2) - 0.328
+** Where n denotes the number of nodes, h(n) the height of the tree
+** with n nodes and log is the binary logarithm.
+*/
+
+#define STACK_NEED 50
+#define TREE_MAX_ELEMENTS 0xFFFFFFFFUL
+
+#define PUSH_NODE(Dtt, Tdt) \
+ ((Dtt)->array[(Dtt)->pos++] = Tdt)
+
+#define POP_NODE(Dtt) \
+ (((Dtt)->pos) ? \
+ (Dtt)->array[--((Dtt)->pos)] : NULL)
+
+#define TOP_NODE(Dtt) \
+ ((Dtt->pos) ? \
+ (Dtt)->array[(Dtt)->pos - 1] : NULL)
+
+#define EMPTY_NODE(Dtt) (TOP_NODE(Dtt) == NULL)
+
+
+
+/* Obtain table static stack if available. NULL if not.
+** Must be released with release_stack()
+*/
+static DbTreeStack* get_static_stack(DbTableTree* tb)
+{
+ if (!erts_smp_atomic_xchg(&tb->is_stack_busy, 1)) {
+ return &tb->static_stack;
+ }
+ return NULL;
+}
+
+/* Obtain static stack if available, otherwise empty dynamic stack.
+** Must be released with release_stack()
+*/
+static DbTreeStack* get_any_stack(DbTableTree* tb)
+{
+ DbTreeStack* stack;
+ if (!erts_smp_atomic_xchg(&tb->is_stack_busy, 1)) {
+ return &tb->static_stack;
+ }
+ stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb,
+ sizeof(DbTreeStack) + sizeof(TreeDbTerm*) * STACK_NEED);
+ stack->pos = 0;
+ stack->slot = 0;
+ stack->array = (TreeDbTerm**) (stack + 1);
+ return stack;
+}
+
+static void release_stack(DbTableTree* tb, DbTreeStack* stack)
+{
+ if (stack == &tb->static_stack) {
+ ASSERT(erts_smp_atomic_read(&tb->is_stack_busy) == 1);
+ erts_smp_atomic_set(&tb->is_stack_busy, 0);
+ }
+ else {
+ erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb,
+ (void *) stack, sizeof(DbTreeStack) + sizeof(TreeDbTerm*) * STACK_NEED);
+ }
+}
+
+static void reset_static_stack(DbTableTree* tb)
+{
+ tb->static_stack.pos = 0;
+ tb->static_stack.slot = 0;
+}
+
+
+/*
+** Some macros for "direction stacks"
+*/
+#define DIR_LEFT 0
+#define DIR_RIGHT 1
+#define DIR_END 2
+
+/*
+ * Special binary flag
+ */
+#define BIN_FLAG_ALL_OBJECTS BIN_FLAG_USR1
+
+/*
+ * Number of records to delete before trapping.
+ */
+#define DELETE_RECORD_LIMIT 12000
+
+/*
+** Debugging
+*/
+#ifdef HARDDEBUG
+static TreeDbTerm *traverse_until(TreeDbTerm *t, int *current, int to);
+static void check_slot_pos(DbTableTree *tb);
+static void check_saved_stack(DbTableTree *tb);
+static int check_table_tree(TreeDbTerm *t);
+
+#define TREE_DEBUG
+#endif
+
+#ifdef TREE_DEBUG
+/*
+** Primitive trace macro
+*/
+#define DBG erts_fprintf(stderr,"%d\n",__LINE__)
+
+/*
+** Debugging dump
+*/
+
+static void do_dump_tree2(int to, void *to_arg, int show, TreeDbTerm *t,
+ int offset);
+
+#else
+
+#define DBG /* nothing */
+
+#endif
+
+/*
+ * Size calculations
+ */
+#define SIZ_OVERHEAD ((sizeof(TreeDbTerm)/sizeof(Eterm)) - 1)
+#define SIZ_DBTERM(TDT) (SIZ_OVERHEAD + (TDT)->dbterm.size)
+
+/*
+** Datatypes
+*/
+
+/*
+ * This structure is filled in by analyze_pattern() for the select
+ * functions.
+ */
+struct mp_info {
+ int all_objects; /* True if complete objects are always
+ * returned from the match_spec (can use
+ * copy_shallow on the return value) */
+ int something_can_match; /* The match_spec is not "impossible" */
+ int some_limitation; /* There is some limitation on the search
+ * area, i. e. least and/or most is set.*/
+ int got_partial; /* The limitation has a partially bound
+ * key */
+ Eterm least; /* The lowest matching key (possibly
+ * partially bound expression) */
+ Eterm most; /* The highest matching key (possibly
+ * partially bound expression) */
+
+ TreeDbTerm *save_term; /* If the key is completely bound, this
+ * will be the Tree node we're searching
+ * for, otherwise it will be useless */
+ Binary *mp; /* The compiled match program */
+};
+
+/*
+ * Used by doit_select(_chunk)
+ */
+struct select_context {
+ Process *p;
+ Eterm accum;
+ Binary *mp;
+ Eterm end_condition;
+ Eterm *lastobj;
+ Sint32 max;
+ int keypos;
+ int all_objects;
+ Sint got;
+ Sint chunk_size;
+};
+
+/*
+ * Used by doit_select_count
+ */
+struct select_count_context {
+ Process *p;
+ Binary *mp;
+ Eterm end_condition;
+ Eterm *lastobj;
+ Sint32 max;
+ int keypos;
+ int all_objects;
+ Sint got;
+};
+
+/*
+ * Used by doit_select_delete
+ */
+struct select_delete_context {
+ Process *p;
+ DbTableTree *tb;
+ Uint accum;
+ Binary *mp;
+ Eterm end_condition;
+ int erase_lastterm;
+ TreeDbTerm *lastterm;
+ Sint32 max;
+ int keypos;
+};
+
+/*
+** Forward declarations
+*/
+static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key);
+static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
+ Eterm object);
+static int do_free_tree_cont(DbTableTree *tb, int num_left);
+static TreeDbTerm* get_term(DbTableTree *tb,
+ TreeDbTerm* old,
+ Eterm obj);
+static void free_term(DbTableTree *tb, TreeDbTerm* p);
+static int balance_left(TreeDbTerm **this);
+static int balance_right(TreeDbTerm **this);
+static int delsub(TreeDbTerm **this);
+static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot);
+static TreeDbTerm *find_node(DbTableTree *tb, Eterm key);
+static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key);
+static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack*, Eterm key);
+static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack*, Eterm key);
+static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack*,
+ Eterm key);
+static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack*,
+ Eterm key);
+static void traverse_backwards(DbTableTree *tb,
+ DbTreeStack*,
+ Eterm lastkey,
+ int (*doit)(DbTableTree *tb,
+ TreeDbTerm *,
+ void *,
+ int),
+ void *context);
+static void traverse_forward(DbTableTree *tb,
+ DbTreeStack*,
+ Eterm lastkey,
+ int (*doit)(DbTableTree *tb,
+ TreeDbTerm *,
+ void *,
+ int),
+ void *context);
+static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
+ Eterm *partly_bound_key);
+static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key);
+static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done);
+
+static int analyze_pattern(DbTableTree *tb, Eterm pattern,
+ struct mp_info *mpi);
+static int doit_select(DbTableTree *tb,
+ TreeDbTerm *this,
+ void *ptr,
+ int forward);
+static int doit_select_count(DbTableTree *tb,
+ TreeDbTerm *this,
+ void *ptr,
+ int forward);
+static int doit_select_chunk(DbTableTree *tb,
+ TreeDbTerm *this,
+ void *ptr,
+ int forward);
+static int doit_select_delete(DbTableTree *tb,
+ TreeDbTerm *this,
+ void *ptr,
+ int forward);
+static void do_dump_tree(int to, void *to_arg, TreeDbTerm *t);
+
+static int partly_bound_can_match_lesser(Eterm partly_bound_1,
+ Eterm partly_bound_2);
+static int partly_bound_can_match_greater(Eterm partly_bound_1,
+ Eterm partly_bound_2);
+static int do_partly_bound_can_match_lesser(Eterm a, Eterm b,
+ int *done);
+static int do_partly_bound_can_match_greater(Eterm a, Eterm b,
+ int *done);
+static BIF_RETTYPE ets_select_reverse(Process *p, Eterm a1,
+ Eterm a2, Eterm a3);
+
+/* Method interface functions */
+static int db_first_tree(Process *p, DbTable *tbl,
+ Eterm *ret);
+static int db_next_tree(Process *p, DbTable *tbl,
+ Eterm key, Eterm *ret);
+static int db_last_tree(Process *p, DbTable *tbl,
+ Eterm *ret);
+static int db_prev_tree(Process *p, DbTable *tbl,
+ Eterm key,
+ Eterm *ret);
+static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail);
+static int db_get_tree(Process *p, DbTable *tbl,
+ Eterm key, Eterm *ret);
+static int db_member_tree(DbTable *tbl, Eterm key, Eterm *ret);
+static int db_get_element_tree(Process *p, DbTable *tbl,
+ Eterm key,int ndex,
+ Eterm *ret);
+static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret);
+static int db_erase_object_tree(DbTable *tbl, Eterm object,Eterm *ret);
+static int db_slot_tree(Process *p, DbTable *tbl,
+ Eterm slot_term, Eterm *ret);
+static int db_select_tree(Process *p, DbTable *tbl,
+ Eterm pattern, int reversed, Eterm *ret);
+static int db_select_count_tree(Process *p, DbTable *tbl,
+ Eterm pattern, Eterm *ret);
+static int db_select_chunk_tree(Process *p, DbTable *tbl,
+ Eterm pattern, Sint chunk_size,
+ int reversed, Eterm *ret);
+static int db_select_continue_tree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+static int db_select_count_continue_tree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+static int db_select_delete_tree(Process *p, DbTable *tbl,
+ Eterm pattern, Eterm *ret);
+static int db_select_delete_continue_tree(Process *p, DbTable *tbl,
+ Eterm continuation, Eterm *ret);
+static void db_print_tree(int to, void *to_arg,
+ int show, DbTable *tbl);
+static int db_free_table_tree(DbTable *tbl);
+
+static int db_free_table_continue_tree(DbTable *tbl);
+
+static void db_foreach_offheap_tree(DbTable *,
+ void (*)(ErlOffHeap *, void *),
+ void *);
+
+static int db_delete_all_objects_tree(Process* p, DbTable* tbl);
+
+#ifdef HARDDEBUG
+static void db_check_table_tree(DbTable *tbl);
+#endif
+static int db_lookup_dbterm_tree(DbTable *, Eterm key, DbUpdateHandle*);
+static void db_finalize_dbterm_tree(DbUpdateHandle*);
+
+/*
+** Static variables
+*/
+
+Export ets_select_reverse_exp;
+
+/*
+** External interface
+*/
+DbTableMethod db_tree =
+{
+ db_create_tree,
+ db_first_tree,
+ db_next_tree,
+ db_last_tree,
+ db_prev_tree,
+ db_put_tree,
+ db_get_tree,
+ db_get_element_tree,
+ db_member_tree,
+ db_erase_tree,
+ db_erase_object_tree,
+ db_slot_tree,
+ db_select_chunk_tree,
+ db_select_tree, /* why not chunk size=0 ??? */
+ db_select_delete_tree,
+ db_select_continue_tree,
+ db_select_delete_continue_tree,
+ db_select_count_tree,
+ db_select_count_continue_tree,
+ db_delete_all_objects_tree,
+ db_free_table_tree,
+ db_free_table_continue_tree,
+ db_print_tree,
+ db_foreach_offheap_tree,
+#ifdef HARDDEBUG
+ db_check_table_tree,
+#else
+ NULL,
+#endif
+ db_lookup_dbterm_tree,
+ db_finalize_dbterm_tree
+
+};
+
+
+
+
+
+void db_initialize_tree(void)
+{
+ memset(&ets_select_reverse_exp, 0, sizeof(Export));
+ ets_select_reverse_exp.address =
+ &ets_select_reverse_exp.code[3];
+ ets_select_reverse_exp.code[0] = am_ets;
+ ets_select_reverse_exp.code[1] = am_reverse;
+ ets_select_reverse_exp.code[2] = 3;
+ ets_select_reverse_exp.code[3] =
+ (Eterm) em_apply_bif;
+ ets_select_reverse_exp.code[4] =
+ (Eterm) &ets_select_reverse;
+ return;
+};
+
+/*
+** Table interface routines ie what's called by the bif's
+*/
+
+int db_create_tree(Process *p, DbTable *tbl)
+{
+ DbTableTree *tb = &tbl->tree;
+ tb->root = NULL;
+ tb->static_stack.array = erts_db_alloc(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ sizeof(TreeDbTerm *) * STACK_NEED);
+ tb->static_stack.pos = 0;
+ tb->static_stack.slot = 0;
+ erts_smp_atomic_init(&tb->is_stack_busy, 0);
+ tb->deletion = 0;
+ return DB_ERROR_NONE;
+}
+
+static int db_first_tree(Process *p, DbTable *tbl, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ TreeDbTerm *this;
+ Eterm e;
+ Eterm *hp;
+ Uint sz;
+
+ if (( this = tb->root ) == NULL) {
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ /* Walk down to the tree to the left */
+ if ((stack = get_static_stack(tb)) != NULL) {
+ stack->pos = stack->slot = 0;
+ }
+ while (this->left != NULL) {
+ if (stack) PUSH_NODE(stack, this);
+ this = this->left;
+ }
+ if (stack) {
+ PUSH_NODE(stack, this);
+ stack->slot = 1;
+ release_stack(tb,stack);
+ }
+ e = GETKEY(tb, this->dbterm.tpl);
+ sz = size_object(e);
+
+ hp = HAlloc(p, sz);
+
+ *ret = copy_struct(e,sz,&hp,&MSO(p));
+
+ return DB_ERROR_NONE;
+}
+
+static int db_next_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ TreeDbTerm *this;
+ Eterm e;
+ Eterm *hp;
+ Uint sz;
+
+ if (is_atom(key) && key == am_EOT)
+ return DB_ERROR_BADKEY;
+ stack = get_any_stack(tb);
+ this = find_next(tb, stack, key);
+ release_stack(tb,stack);
+ if (this == NULL) {
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ e = GETKEY(tb, this->dbterm.tpl);
+ sz = size_object(e);
+
+ hp = HAlloc(p, sz);
+
+ *ret = copy_struct(e,sz,&hp,&MSO(p));
+
+ return DB_ERROR_NONE;
+}
+
+static int db_last_tree(Process *p, DbTable *tbl, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm *this;
+ DbTreeStack* stack;
+ Eterm e;
+ Eterm *hp;
+ Uint sz;
+
+ if (( this = tb->root ) == NULL) {
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ /* Walk down to the tree to the left */
+ if ((stack = get_static_stack(tb)) != NULL) {
+ stack->pos = stack->slot = 0;
+ }
+ while (this->right != NULL) {
+ if (stack) PUSH_NODE(stack, this);
+ this = this->right;
+ }
+ if (stack) {
+ PUSH_NODE(stack, this);
+ stack->slot = NITEMS(tb);
+ release_stack(tb,stack);
+ }
+ e = GETKEY(tb, this->dbterm.tpl);
+ sz = size_object(e);
+
+ hp = HAlloc(p, sz);
+
+ *ret = copy_struct(e,sz,&hp,&MSO(p));
+
+ return DB_ERROR_NONE;
+}
+
+static int db_prev_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm *this;
+ DbTreeStack* stack;
+ Eterm e;
+ Eterm *hp;
+ Uint sz;
+
+ if (is_atom(key) && key == am_EOT)
+ return DB_ERROR_BADKEY;
+ stack = get_any_stack(tb);
+ this = find_prev(tb, stack, key);
+ release_stack(tb,stack);
+ if (this == NULL) {
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+ e = GETKEY(tb, this->dbterm.tpl);
+ sz = size_object(e);
+
+ hp = HAlloc(p, sz);
+
+ *ret = copy_struct(e,sz,&hp,&MSO(p));
+
+ return DB_ERROR_NONE;
+}
+
+static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail)
+{
+ DbTableTree *tb = &tbl->tree;
+ /* Non recursive insertion in AVL tree, building our own stack */
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 0;
+ int state = 0;
+ TreeDbTerm **this = &tb->root;
+ Sint c;
+ Eterm key;
+ int dir;
+ TreeDbTerm *p1, *p2, *p;
+
+ key = GETKEY(tb, tuple_val(obj));
+
+ reset_static_stack(tb);
+
+ dstack[dpos++] = DIR_END;
+ for (;;)
+ if (!*this) { /* Found our place */
+ state = 1;
+ if (erts_smp_atomic_inctest(&tb->common.nitems) >= TREE_MAX_ELEMENTS) {
+ erts_smp_atomic_dec(&tb->common.nitems);
+ return DB_ERROR_SYSRES;
+ }
+ *this = get_term(tb, NULL, obj);
+ (*this)->balance = 0;
+ (*this)->left = (*this)->right = NULL;
+ break;
+ } else if ((c = cmp(key,GETKEY(tb,(*this)->dbterm.tpl))) < 0) {
+ /* go left */
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else if (!key_clash_fail) { /* Equal key and this is a set, replace. */
+ *this = get_term(tb, *this, obj);
+ break;
+ } else {
+ return DB_ERROR_BADKEY; /* key already exists */
+ }
+
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ p = *this;
+ if (dir == DIR_LEFT) {
+ switch (p->balance) {
+ case 1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = -1;
+ break;
+ case -1: /* The icky case */
+ p1 = p->left;
+ if (p1->balance == -1) { /* Single LL rotation */
+ p->left = p1->right;
+ p1->right = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RR rotation */
+ p2 = p1->right;
+ p1->right = p2->left;
+ p2->left = p1;
+ p->left = p2->right;
+ p2->right = p;
+ p->balance = (p2->balance == -1) ? +1 : 0;
+ p1->balance = (p2->balance == 1) ? -1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ } else { /* dir == DIR_RIGHT */
+ switch (p->balance) {
+ case -1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = 1;
+ break;
+ case 1:
+ p1 = p->right;
+ if (p1->balance == 1) { /* Single RR rotation */
+ p->right = p1->left;
+ p1->left = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RL rotation */
+ p2 = p1->left;
+ p1->left = p2->right;
+ p2->right = p1;
+ p->right = p2->left;
+ p2->left = p;
+ p->balance = (p2->balance == 1) ? -1 : 0;
+ p1->balance = (p2->balance == -1) ? 1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ }
+ }
+ return DB_ERROR_NONE;
+}
+
+static int db_get_tree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ Eterm copy;
+ Eterm *hp;
+ TreeDbTerm *this;
+
+ /*
+ * This is always a set, so we know exactly how large
+ * the data is when we have found it.
+ * The list created around it is purely for interface conformance.
+ */
+
+ this = find_node(tb,key);
+ if (this == NULL) {
+ *ret = NIL;
+ } else {
+ hp = HAlloc(p, this->dbterm.size + 2);
+ copy = copy_shallow(DBTERM_BUF(&this->dbterm),
+ this->dbterm.size,
+ &hp,
+ &MSO(p));
+ *ret = CONS(hp, copy, NIL);
+ }
+ return DB_ERROR_NONE;
+}
+
+static int db_member_tree(DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+
+ *ret = (find_node(tb,key) == NULL) ? am_false : am_true;
+ return DB_ERROR_NONE;
+}
+
+static int db_get_element_tree(Process *p, DbTable *tbl,
+ Eterm key, int ndex, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ /*
+ * Look the node up:
+ */
+ Eterm *hp;
+ TreeDbTerm *this;
+
+ /*
+ * This is always a set, so we know exactly how large
+ * the data is when we have found it.
+ * No list is created around elements in set's so there are no list
+ * around the element here either.
+ */
+
+ this = find_node(tb,key);
+ if (this == NULL) {
+ return DB_ERROR_BADKEY;
+ } else {
+ Eterm element;
+ Uint sz;
+ if (ndex > arityval(this->dbterm.tpl[0])) {
+ return DB_ERROR_BADPARAM;
+ }
+ element = this->dbterm.tpl[ndex];
+ sz = size_object(element);
+ hp = HAlloc(p, sz);
+ *ret = copy_struct(element,
+ sz,
+ &hp,
+ &MSO(p));
+ }
+ return DB_ERROR_NONE;
+}
+
+static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm *res;
+
+ *ret = am_true;
+
+ if ((res = linkout_tree(tb, key)) != NULL) {
+ free_term(tb, res);
+ }
+ return DB_ERROR_NONE;
+}
+
+static int db_erase_object_tree(DbTable *tbl, Eterm object, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm *res;
+
+ *ret = am_true;
+
+ if ((res = linkout_object_tree(tb, object)) != NULL) {
+ free_term(tb, res);
+ }
+ return DB_ERROR_NONE;
+}
+
+
+static int db_slot_tree(Process *p, DbTable *tbl,
+ Eterm slot_term, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ Sint slot;
+ TreeDbTerm *st;
+ Eterm *hp;
+ Eterm copy;
+
+ /*
+ * The notion of a "slot" is not natural in a tree, but we try to
+ * simulate it by giving the n'th node in the tree instead.
+ * Traversing a tree in this way is not very convenient, but by
+ * using the saved stack we at least sometimes will get acceptable
+ * performance.
+ */
+
+ if (is_not_small(slot_term) ||
+ ((slot = signed_val(slot_term)) < 0) ||
+ (slot > NITEMS(tb)))
+ return DB_ERROR_BADPARAM;
+
+ if (slot == NITEMS(tb)) {
+ *ret = am_EOT;
+ return DB_ERROR_NONE;
+ }
+
+ /*
+ * We use the slot position and search from there, slot positions
+ * are counted from 1 and up.
+ */
+ ++slot;
+ st = slot_search(p, tb, slot);
+ if (st == NULL) {
+ *ret = am_false;
+ return DB_ERROR_UNSPEC;
+ }
+ hp = HAlloc(p, st->dbterm.size + 2);
+ copy = copy_shallow(DBTERM_BUF(&st->dbterm),
+ st->dbterm.size,
+ &hp,
+ &MSO(p));
+ *ret = CONS(hp, copy, NIL);
+ return DB_ERROR_NONE;
+}
+
+
+
+static BIF_RETTYPE ets_select_reverse(Process *p, Eterm a1, Eterm a2, Eterm a3)
+{
+ Eterm list;
+ Eterm result;
+ Eterm* hp;
+ Eterm* hend;
+
+ int max_iter = CONTEXT_REDS * 10;
+
+ if (is_nil(a1)) {
+ hp = HAlloc(p, 3);
+ BIF_RET(TUPLE2(hp,a2,a3));
+ } else if (is_not_list(a1)) {
+ error:
+ BIF_ERROR(p, BADARG);
+ }
+
+ list = a1;
+ result = a2;
+ hp = hend = NULL;
+ while (is_list(list)) {
+ Eterm* pair = list_val(list);
+ if (--max_iter == 0) {
+ BUMP_ALL_REDS(p);
+ HRelease(p, hend, hp);
+ BIF_TRAP3(&ets_select_reverse_exp, p, list, result, a3);
+ }
+ if (hp == hend) {
+ hp = HAlloc(p, 64);
+ hend = hp + 64;
+ }
+ result = CONS(hp, CAR(pair), result);
+ hp += 2;
+ list = CDR(pair);
+ }
+ if (is_not_nil(list)) {
+ goto error;
+ }
+ HRelease(p, hend, hp);
+ BUMP_REDS(p,CONTEXT_REDS - max_iter / 10);
+ hp = HAlloc(p,3);
+ BIF_RET(TUPLE2(hp, result, a3));
+}
+
+static BIF_RETTYPE bif_trap1(Export *bif,
+ Process *p,
+ Eterm p1)
+{
+ BIF_TRAP1(bif, p, p1);
+}
+
+static BIF_RETTYPE bif_trap3(Export *bif,
+ Process *p,
+ Eterm p1,
+ Eterm p2,
+ Eterm p3)
+{
+ BIF_TRAP3(bif, p, p1, p2, p3);
+}
+
+/*
+** This is called either when the select bif traps or when ets:select/1
+** is called. It does mostly the same as db_select_tree and may in either case
+** trap to itself again (via the ets:select/1 bif).
+** Note that this is common for db_select_tree and db_select_chunk_tree.
+*/
+static int db_select_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_context sc;
+ unsigned sz;
+ Eterm *hp;
+ Eterm lastkey;
+ Eterm end_condition;
+ Binary *mp;
+ Eterm key;
+ Eterm *tptr;
+ Sint chunk_size;
+ Sint reverse;
+
+
+#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
+
+ /* Decode continuation. We know it's a tuple but not the arity or
+ anything else */
+
+ tptr = tuple_val(continuation);
+
+ if (arityval(*tptr) != 8)
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+
+ if (!is_small(tptr[4]) || !is_binary(tptr[5]) ||
+ !(is_list(tptr[6]) || tptr[6] == NIL) || !is_small(tptr[7]) ||
+ !is_small(tptr[8]))
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+
+ lastkey = tptr[2];
+ end_condition = tptr[3];
+ if (!(thing_subtag(*binary_val(tptr[5])) == REFC_BINARY_SUBTAG))
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ mp = ((ProcBin *) binary_val(tptr[5]))->val;
+ if (!IsMatchProgBinary(mp))
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ chunk_size = signed_val(tptr[4]);
+
+ sc.p = p;
+ sc.accum = tptr[6];
+ sc.mp = mp;
+ sc.end_condition = NIL;
+ sc.lastobj = NULL;
+ sc.max = 1000;
+ sc.keypos = tb->common.keypos;
+ sc.all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
+ sc.chunk_size = chunk_size;
+ reverse = unsigned_val(tptr[7]);
+ sc.got = signed_val(tptr[8]);
+
+ stack = get_any_stack(tb);
+ if (chunk_size) {
+ if (reverse) {
+ traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc);
+ } else {
+ traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc);
+ }
+ } else {
+ if (reverse) {
+ traverse_forward(tb, stack, lastkey, &doit_select, &sc);
+ } else {
+ traverse_backwards(tb, stack, lastkey, &doit_select, &sc);
+ }
+ }
+ release_stack(tb,stack);
+
+ BUMP_REDS(p, 1000 - sc.max);
+
+ if (sc.max > 0 || (chunk_size && sc.got == chunk_size)) {
+ if (chunk_size) {
+ Eterm *hp;
+ unsigned sz;
+
+ if (sc.got < chunk_size || sc.lastobj == NULL) {
+ /* end of table, sc.lastobj may be NULL as we may have been
+ at the very last object in the table when trapping. */
+ if (!sc.got) {
+ RET_TO_BIF(am_EOT, DB_ERROR_NONE);
+ } else {
+ RET_TO_BIF(bif_trap3(&ets_select_reverse_exp, p,
+ sc.accum, NIL, am_EOT),
+ DB_ERROR_NONE);
+ }
+ }
+
+ key = GETKEY(tb, sc.lastobj);
+
+ sz = size_object(key);
+ hp = HAlloc(p, 9 + sz);
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ continuation = TUPLE8
+ (hp,
+ tptr[1],
+ key,
+ tptr[3],
+ tptr[4],
+ tptr[5],
+ NIL,
+ tptr[7],
+ make_small(0));
+ RET_TO_BIF(bif_trap3(&ets_select_reverse_exp, p,
+ sc.accum, NIL, continuation),
+ DB_ERROR_NONE);
+ } else {
+ RET_TO_BIF(sc.accum, DB_ERROR_NONE);
+ }
+ }
+ key = GETKEY(tb, sc.lastobj);
+ if (chunk_size) {
+ if (end_condition != NIL &&
+ ((!reverse && cmp_partly_bound(end_condition,key) < 0) ||
+ (reverse && cmp_partly_bound(end_condition,key) > 0))) {
+ /* done anyway */
+ if (!sc.got) {
+ RET_TO_BIF(am_EOT, DB_ERROR_NONE);
+ } else {
+ RET_TO_BIF(bif_trap3(&ets_select_reverse_exp, p,
+ sc.accum, NIL, am_EOT),
+ DB_ERROR_NONE);
+ }
+ }
+ } else {
+ if (end_condition != NIL &&
+ ((!reverse && cmp_partly_bound(end_condition,key) > 0) ||
+ (reverse && cmp_partly_bound(end_condition,key) < 0))) {
+ /* done anyway */
+ RET_TO_BIF(sc.accum,DB_ERROR_NONE);
+ }
+ }
+ /* Not done yet, let's trap. */
+ sz = size_object(key);
+ hp = HAlloc(p, 9 + sz);
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ continuation = TUPLE8
+ (hp,
+ tptr[1],
+ key,
+ tptr[3],
+ tptr[4],
+ tptr[5],
+ sc.accum,
+ tptr[7],
+ make_small(sc.got));
+ RET_TO_BIF(bif_trap1(bif_export[BIF_ets_select_1], p, continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+}
+
+
+static int db_select_tree(Process *p, DbTable *tbl,
+ Eterm pattern, int reverse, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_context sc;
+ struct mp_info mpi;
+ Eterm lastkey = NIL;
+ Eterm key;
+ Eterm continuation;
+ unsigned sz;
+ Eterm *hp;
+ TreeDbTerm *this;
+ int errcode;
+ Eterm mpb;
+
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+ mpi.mp = NULL;
+
+ sc.accum = NIL;
+ sc.lastobj = NULL;
+ sc.p = p;
+ sc.max = 1000;
+ sc.end_condition = NIL;
+ sc.keypos = tb->common.keypos;
+ sc.got = 0;
+ sc.chunk_size = 0;
+
+ if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(NIL,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ RET_TO_BIF(NIL,DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ sc.mp = mpi.mp;
+ sc.all_objects = mpi.all_objects;
+
+ if (!mpi.got_partial && mpi.some_limitation &&
+ cmp(mpi.least,mpi.most) == 0) {
+ doit_select(tb,mpi.save_term,&sc,0 /* direction doesn't matter */);
+ RET_TO_BIF(sc.accum,DB_ERROR_NONE);
+ }
+
+ stack = get_any_stack(tb);
+ if (reverse) {
+ if (mpi.some_limitation) {
+ if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) {
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ }
+ sc.end_condition = mpi.most;
+ }
+
+ traverse_forward(tb, stack, lastkey, &doit_select, &sc);
+ } else {
+ if (mpi.some_limitation) {
+ if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ }
+ sc.end_condition = mpi.least;
+ }
+
+ traverse_backwards(tb, stack, lastkey, &doit_select, &sc);
+ }
+ release_stack(tb,stack);
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,"Least: %T\n", mpi.least);
+ erts_fprintf(stderr,"Most: %T\n", mpi.most);
+#endif
+ BUMP_REDS(p, 1000 - sc.max);
+ if (sc.max > 0) {
+ RET_TO_BIF(sc.accum,DB_ERROR_NONE);
+ }
+
+ key = GETKEY(tb, sc.lastobj);
+ sz = size_object(key);
+ hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ if (mpi.all_objects)
+ (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb=db_make_mp_binary(p,mpi.mp,&hp);
+
+ continuation = TUPLE8
+ (hp,
+ tb->common.id,
+ key,
+ sc.end_condition, /* From the match program, needn't be copied */
+ make_small(0), /* Chunk size of zero means not chunked to the
+ continuation BIF */
+ mpb,
+ sc.accum,
+ make_small(reverse),
+ make_small(sc.got));
+
+ /* Don't free mpi.mp, so don't use macro */
+ *ret = bif_trap1(bif_export[BIF_ets_select_1], p, continuation);
+ return DB_ERROR_NONE;
+
+#undef RET_TO_BIF
+
+}
+
+
+/*
+** This is called either when the select_count bif traps.
+*/
+static int db_select_count_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_count_context sc;
+ unsigned sz;
+ Eterm *hp;
+ Eterm lastkey;
+ Eterm end_condition;
+ Binary *mp;
+ Eterm key;
+ Eterm *tptr;
+ Eterm egot;
+
+
+#define RET_TO_BIF(Term, State) do { *ret = (Term); return State; } while(0);
+
+ /* Decode continuation. We know it's a tuple and everything else as
+ this is only called by ourselves */
+
+ /* continuation:
+ {Table, Lastkey, EndCondition, MatchProgBin, HowManyGot}*/
+
+ tptr = tuple_val(continuation);
+
+ if (arityval(*tptr) != 5)
+ erl_exit(1,"Internal error in ets:select_count/1");
+
+ lastkey = tptr[2];
+ end_condition = tptr[3];
+ if (!(thing_subtag(*binary_val(tptr[4])) == REFC_BINARY_SUBTAG))
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+ mp = ((ProcBin *) binary_val(tptr[4]))->val;
+ if (!IsMatchProgBinary(mp))
+ RET_TO_BIF(NIL,DB_ERROR_BADPARAM);
+
+ sc.p = p;
+ sc.mp = mp;
+ sc.end_condition = NIL;
+ sc.lastobj = NULL;
+ sc.max = 1000;
+ sc.keypos = tb->common.keypos;
+ if (is_big(tptr[5])) {
+ sc.got = big_to_uint32(tptr[5]);
+ } else {
+ sc.got = unsigned_val(tptr[5]);
+ }
+
+ stack = get_any_stack(tb);
+ traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc);
+ release_stack(tb,stack);
+
+ BUMP_REDS(p, 1000 - sc.max);
+
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.got,p), DB_ERROR_NONE);
+ }
+ key = GETKEY(tb, sc.lastobj);
+ if (end_condition != NIL &&
+ (cmp_partly_bound(end_condition,key) > 0)) {
+ /* done anyway */
+ RET_TO_BIF(make_small(sc.got),DB_ERROR_NONE);
+ }
+ /* Not done yet, let's trap. */
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.got)) {
+ hp = HAlloc(p, sz + 6);
+ egot = make_small(sc.got);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + 6);
+ egot = uint_to_big(sc.got, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ continuation = TUPLE5
+ (hp,
+ tptr[1],
+ key,
+ tptr[3],
+ tptr[4],
+ egot);
+ RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p, continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+}
+
+
+static int db_select_count_tree(Process *p, DbTable *tbl,
+ Eterm pattern, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_count_context sc;
+ struct mp_info mpi;
+ Eterm lastkey = NIL;
+ Eterm key;
+ Eterm continuation;
+ unsigned sz;
+ Eterm *hp;
+ TreeDbTerm *this;
+ int errcode;
+ Eterm egot;
+ Eterm mpb;
+
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+ mpi.mp = NULL;
+
+ sc.lastobj = NULL;
+ sc.p = p;
+ sc.max = 1000;
+ sc.end_condition = NIL;
+ sc.keypos = tb->common.keypos;
+ sc.got = 0;
+
+ if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(NIL,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ RET_TO_BIF(make_small(0),DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ sc.mp = mpi.mp;
+ sc.all_objects = mpi.all_objects;
+
+ if (!mpi.got_partial && mpi.some_limitation &&
+ cmp(mpi.least,mpi.most) == 0) {
+ doit_select_count(tb,mpi.save_term,&sc,0 /* dummy */);
+ RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE);
+ }
+
+ stack = get_any_stack(tb);
+ if (mpi.some_limitation) {
+ if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ }
+ sc.end_condition = mpi.least;
+ }
+
+ traverse_backwards(tb, stack, lastkey, &doit_select_count, &sc);
+ release_stack(tb,stack);
+ BUMP_REDS(p, 1000 - sc.max);
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.got,p),DB_ERROR_NONE);
+ }
+
+ key = GETKEY(tb, sc.lastobj);
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.got)) {
+ hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ egot = make_small(sc.got);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ egot = uint_to_big(sc.got, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ if (mpi.all_objects)
+ (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = db_make_mp_binary(p,mpi.mp,&hp);
+
+ continuation = TUPLE5
+ (hp,
+ tb->common.id,
+ key,
+ sc.end_condition, /* From the match program, needn't be copied */
+ mpb,
+ egot);
+
+ /* Don't free mpi.mp, so don't use macro */
+ *ret = bif_trap1(&ets_select_count_continue_exp, p, continuation);
+ return DB_ERROR_NONE;
+
+#undef RET_TO_BIF
+
+}
+
+static int db_select_chunk_tree(Process *p, DbTable *tbl,
+ Eterm pattern, Sint chunk_size,
+ int reverse,
+ Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ DbTreeStack* stack;
+ struct select_context sc;
+ struct mp_info mpi;
+ Eterm lastkey = NIL;
+ Eterm key;
+ Eterm continuation;
+ unsigned sz;
+ Eterm *hp;
+ TreeDbTerm *this;
+ int errcode;
+ Eterm mpb;
+
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+ mpi.mp = NULL;
+
+ sc.accum = NIL;
+ sc.lastobj = NULL;
+ sc.p = p;
+ sc.max = 1000;
+ sc.end_condition = NIL;
+ sc.keypos = tb->common.keypos;
+ sc.got = 0;
+ sc.chunk_size = chunk_size;
+
+ if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(NIL,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ RET_TO_BIF(am_EOT,DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ sc.mp = mpi.mp;
+ sc.all_objects = mpi.all_objects;
+
+ if (!mpi.got_partial && mpi.some_limitation &&
+ cmp(mpi.least,mpi.most) == 0) {
+ doit_select(tb,mpi.save_term,&sc, 0 /* direction doesn't matter */);
+ if (sc.accum != NIL) {
+ hp=HAlloc(p, 3);
+ RET_TO_BIF(TUPLE2(hp,sc.accum,am_EOT),DB_ERROR_NONE);
+ } else {
+ RET_TO_BIF(am_EOT,DB_ERROR_NONE);
+ }
+ }
+
+ stack = get_any_stack(tb);
+ if (reverse) {
+ if (mpi.some_limitation) {
+ if ((this = find_next_from_pb_key(tb, stack, mpi.most)) != NULL) {
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ }
+ sc.end_condition = mpi.least;
+ }
+
+ traverse_backwards(tb, stack, lastkey, &doit_select_chunk, &sc);
+ } else {
+ if (mpi.some_limitation) {
+ if ((this = find_prev_from_pb_key(tb, stack, mpi.least)) != NULL) {
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ }
+ sc.end_condition = mpi.most;
+ }
+
+ traverse_forward(tb, stack, lastkey, &doit_select_chunk, &sc);
+ }
+ release_stack(tb,stack);
+
+ BUMP_REDS(p, 1000 - sc.max);
+ if (sc.max > 0 || sc.got == chunk_size) {
+ Eterm *hp;
+ unsigned sz;
+
+ if (sc.got < chunk_size ||
+ sc.lastobj == NULL) {
+ /* We haven't got all and we haven't trapped
+ which should mean we are at the end of the
+ table, sc.lastobj may be NULL if the table was empty */
+
+ if (!sc.got) {
+ RET_TO_BIF(am_EOT, DB_ERROR_NONE);
+ } else {
+ RET_TO_BIF(bif_trap3(&ets_select_reverse_exp, p,
+ sc.accum, NIL, am_EOT),
+ DB_ERROR_NONE);
+ }
+ }
+
+ key = GETKEY(tb, sc.lastobj);
+ sz = size_object(key);
+ hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ if (mpi.all_objects)
+ (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = db_make_mp_binary(p,mpi.mp,&hp);
+
+ continuation = TUPLE8
+ (hp,
+ tb->common.id,
+ key,
+ sc.end_condition, /* From the match program,
+ needn't be copied */
+ make_small(chunk_size),
+ mpb,
+ NIL,
+ make_small(reverse),
+ make_small(0));
+ /* Don't let RET_TO_BIF macro free mpi.mp*/
+ *ret = bif_trap3(&ets_select_reverse_exp, p,
+ sc.accum, NIL, continuation);
+ return DB_ERROR_NONE;
+ }
+
+ key = GETKEY(tb, sc.lastobj);
+ sz = size_object(key);
+ hp = HAlloc(p, 9 + sz + PROC_BIN_SIZE);
+ key = copy_struct(key, sz, &hp, &MSO(p));
+
+ if (mpi.all_objects)
+ (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ mpb = db_make_mp_binary(p,mpi.mp,&hp);
+ continuation = TUPLE8
+ (hp,
+ tb->common.id,
+ key,
+ sc.end_condition, /* From the match program, needn't be copied */
+ make_small(chunk_size),
+ mpb,
+ sc.accum,
+ make_small(reverse),
+ make_small(sc.got));
+ /* Don't let RET_TO_BIF macro free mpi.mp*/
+ *ret = bif_trap1(bif_export[BIF_ets_select_1], p, continuation);
+ return DB_ERROR_NONE;
+
+#undef RET_TO_BIF
+
+}
+
+/*
+** This is called when select_delete traps
+*/
+static int db_select_delete_continue_tree(Process *p,
+ DbTable *tbl,
+ Eterm continuation,
+ Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ struct select_delete_context sc;
+ unsigned sz;
+ Eterm *hp;
+ Eterm lastkey;
+ Eterm end_condition;
+ Binary *mp;
+ Eterm key;
+ Eterm *tptr;
+ Eterm eaccsum;
+
+
+#define RET_TO_BIF(Term, State) do { \
+ if (sc.erase_lastterm) { \
+ free_term(tb, sc.lastterm); \
+ } \
+ *ret = (Term); \
+ return State; \
+ } while(0);
+
+ /* Decode continuation. We know it's correct, this can only be called
+ by trapping */
+
+ tptr = tuple_val(continuation);
+
+ lastkey = tptr[2];
+ end_condition = tptr[3];
+
+ sc.erase_lastterm = 0; /* Before first RET_TO_BIF */
+ sc.lastterm = NULL;
+
+ mp = ((ProcBin *) binary_val(tptr[4]))->val;
+ sc.p = p;
+ sc.tb = tb;
+ if (is_big(tptr[5])) {
+ sc.accum = big_to_uint32(tptr[5]);
+ } else {
+ sc.accum = unsigned_val(tptr[5]);
+ }
+ sc.mp = mp;
+ sc.end_condition = NIL;
+ sc.max = 1000;
+ sc.keypos = tb->common.keypos;
+
+ ASSERT(!erts_smp_atomic_read(&tb->is_stack_busy));
+ traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc);
+
+ BUMP_REDS(p, 1000 - sc.max);
+
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.accum, p), DB_ERROR_NONE);
+ }
+ key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
+ if (end_condition != NIL &&
+ cmp_partly_bound(end_condition,key) > 0) { /* done anyway */
+ RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
+ }
+ /* Not done yet, let's trap. */
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.accum)) {
+ hp = HAlloc(p, sz + 6);
+ eaccsum = make_small(sc.accum);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + 6);
+ eaccsum = uint_to_big(sc.accum, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ continuation = TUPLE5
+ (hp,
+ tptr[1],
+ key,
+ tptr[3],
+ tptr[4],
+ eaccsum);
+ RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p, continuation),
+ DB_ERROR_NONE);
+
+#undef RET_TO_BIF
+}
+
+static int db_select_delete_tree(Process *p, DbTable *tbl,
+ Eterm pattern, Eterm *ret)
+{
+ DbTableTree *tb = &tbl->tree;
+ struct select_delete_context sc;
+ struct mp_info mpi;
+ Eterm lastkey = NIL;
+ Eterm key;
+ Eterm continuation;
+ unsigned sz;
+ Eterm *hp;
+ TreeDbTerm *this;
+ int errcode;
+ Eterm mpb;
+ Eterm eaccsum;
+
+#define RET_TO_BIF(Term,RetVal) do { \
+ if (mpi.mp != NULL) { \
+ erts_bin_free(mpi.mp); \
+ } \
+ if (sc.erase_lastterm) { \
+ free_term(tb, sc.lastterm); \
+ } \
+ *ret = (Term); \
+ return RetVal; \
+ } while(0)
+
+ mpi.mp = NULL;
+
+ sc.accum = 0;
+ sc.erase_lastterm = 0;
+ sc.lastterm = NULL;
+ sc.p = p;
+ sc.max = 1000;
+ sc.end_condition = NIL;
+ sc.keypos = tb->common.keypos;
+ sc.tb = tb;
+
+ if ((errcode = analyze_pattern(tb, pattern, &mpi)) != DB_ERROR_NONE) {
+ RET_TO_BIF(0,errcode);
+ }
+
+ if (!mpi.something_can_match) {
+ RET_TO_BIF(make_small(0),DB_ERROR_NONE);
+ /* can't possibly match anything */
+ }
+
+ sc.mp = mpi.mp;
+
+ if (!mpi.got_partial && mpi.some_limitation &&
+ cmp(mpi.least,mpi.most) == 0) {
+ doit_select_delete(tb,mpi.save_term,&sc, 0 /* direction doesn't
+ matter */);
+ RET_TO_BIF(erts_make_integer(sc.accum,p),DB_ERROR_NONE);
+ }
+
+ if (mpi.some_limitation) {
+ if ((this = find_next_from_pb_key(tb, &tb->static_stack, mpi.most)) != NULL) {
+ lastkey = GETKEY(tb, this->dbterm.tpl);
+ }
+ sc.end_condition = mpi.least;
+ }
+
+ traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc);
+ BUMP_REDS(p, 1000 - sc.max);
+
+ if (sc.max > 0) {
+ RET_TO_BIF(erts_make_integer(sc.accum,p), DB_ERROR_NONE);
+ }
+
+ key = GETKEY(tb, (sc.lastterm)->dbterm.tpl);
+ sz = size_object(key);
+ if (IS_USMALL(0, sc.accum)) {
+ hp = HAlloc(p, sz + PROC_BIN_SIZE + 6);
+ eaccsum = make_small(sc.accum);
+ }
+ else {
+ hp = HAlloc(p, BIG_UINT_HEAP_SIZE + sz + PROC_BIN_SIZE + 6);
+ eaccsum = uint_to_big(sc.accum, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ key = copy_struct(key, sz, &hp, &MSO(p));
+ mpb = db_make_mp_binary(p,mpi.mp,&hp);
+
+ continuation = TUPLE5
+ (hp,
+ tb->common.id,
+ key,
+ sc.end_condition, /* From the match program, needn't be copied */
+ mpb,
+ eaccsum);
+
+ /* Don't free mpi.mp, so don't use macro */
+ if (sc.erase_lastterm) {
+ free_term(tb, sc.lastterm);
+ }
+ *ret = bif_trap1(&ets_select_delete_continue_exp, p, continuation);
+ return DB_ERROR_NONE;
+
+#undef RET_TO_BIF
+
+}
+
+/*
+** Other interface routines (not directly coupled to one bif)
+*/
+
+/* Display hash table contents (for dump) */
+static void db_print_tree(int to, void *to_arg,
+ int show,
+ DbTable *tbl)
+{
+ DbTableTree *tb = &tbl->tree;
+#ifdef TREE_DEBUG
+ if (show)
+ erts_print(to, to_arg, "\nTree data dump:\n"
+ "------------------------------------------------\n");
+ do_dump_tree2(to, to_arg, show, tb->root, 0);
+ if (show)
+ erts_print(to, to_arg, "\n"
+ "------------------------------------------------\n");
+#else
+ erts_print(to, to_arg, "Ordered set (AVL tree), Elements: %d\n", NITEMS(tb));
+ do_dump_tree(to, to_arg, tb->root);
+#endif
+}
+
+/* release all memory occupied by a single table */
+static int db_free_table_tree(DbTable *tbl)
+{
+ while (!db_free_table_continue_tree(tbl))
+ ;
+ return 1;
+}
+
+static int db_free_table_continue_tree(DbTable *tbl)
+{
+ DbTableTree *tb = &tbl->tree;
+ int result;
+
+ if (!tb->deletion) {
+ tb->static_stack.pos = 0;
+ tb->deletion = 1;
+ PUSH_NODE(&tb->static_stack, tb->root);
+ }
+ result = do_free_tree_cont(tb, DELETE_RECORD_LIMIT);
+ if (result) { /* Completely done. */
+ erts_db_free(ERTS_ALC_T_DB_STK,
+ (DbTable *) tb,
+ (void *) tb->static_stack.array,
+ sizeof(TreeDbTerm *) * STACK_NEED);
+ ASSERT(erts_smp_atomic_read(&tb->common.memory_size)
+ == sizeof(DbTable));
+ }
+ return result;
+}
+
+static int db_delete_all_objects_tree(Process* p, DbTable* tbl)
+{
+ db_free_table_tree(tbl);
+ db_create_tree(p, tbl);
+ erts_smp_atomic_set(&tbl->tree.common.nitems, 0);
+ return 0;
+}
+
+static void do_db_tree_foreach_offheap(TreeDbTerm *,
+ void (*)(ErlOffHeap *, void *),
+ void *);
+
+static void db_foreach_offheap_tree(DbTable *tbl,
+ void (*func)(ErlOffHeap *, void *),
+ void * arg)
+{
+ do_db_tree_foreach_offheap(tbl->tree.root, func, arg);
+}
+
+
+/*
+** Functions for internal use
+*/
+
+
+static void
+do_db_tree_foreach_offheap(TreeDbTerm *tdbt,
+ void (*func)(ErlOffHeap *, void *),
+ void * arg)
+{
+ if(!tdbt)
+ return;
+ do_db_tree_foreach_offheap(tdbt->left, func, arg);
+ (*func)(&(tdbt->dbterm.off_heap), arg);
+ do_db_tree_foreach_offheap(tdbt->right, func, arg);
+}
+
+static TreeDbTerm *linkout_tree(DbTableTree *tb,
+ Eterm key)
+{
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 0;
+ int state = 0;
+ TreeDbTerm **this = &tb->root;
+ Sint c;
+ int dir;
+ TreeDbTerm *q = NULL;
+
+ /*
+ * Somewhat complicated, deletion in an AVL tree,
+ * The two helpers balance_left and balance_right are used to
+ * keep the balance. As in insert, we do the stacking ourselves.
+ */
+
+ reset_static_stack(tb);
+ dstack[dpos++] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Failure */
+ return NULL;
+ } else if ((c = cmp(key,GETKEY(tb,(*this)->dbterm.tpl))) < 0) {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Equal key, found the one to delete*/
+ q = (*this);
+ if (q->right == NULL) {
+ (*this) = q->left;
+ state = 1;
+ } else if (q->left == NULL) {
+ (*this) = q->right;
+ state = 1;
+ } else {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ state = delsub(this);
+ }
+ erts_smp_atomic_dec(&tb->common.nitems);
+ break;
+ }
+ }
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ if (dir == DIR_LEFT) {
+ state = balance_left(this);
+ } else {
+ state = balance_right(this);
+ }
+ }
+ return q;
+}
+
+static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
+ Eterm object)
+{
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 0;
+ int state = 0;
+ TreeDbTerm **this = &tb->root;
+ Sint c;
+ int dir;
+ TreeDbTerm *q = NULL;
+ Eterm key;
+
+ /*
+ * Somewhat complicated, deletion in an AVL tree,
+ * The two helpers balance_left and balance_right are used to
+ * keep the balance. As in insert, we do the stacking ourselves.
+ */
+
+
+ key = GETKEY(tb, tuple_val(object));
+
+ reset_static_stack(tb);
+ dstack[dpos++] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Failure */
+ return NULL;
+ } else if ((c = cmp(key,GETKEY(tb,(*this)->dbterm.tpl))) < 0) {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Equal key, found the only possible matching object*/
+ if (!eq(object,make_tuple((*this)->dbterm.tpl))) {
+ return NULL;
+ }
+ q = (*this);
+ if (q->right == NULL) {
+ (*this) = q->left;
+ state = 1;
+ } else if (q->left == NULL) {
+ (*this) = q->right;
+ state = 1;
+ } else {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ state = delsub(this);
+ }
+ erts_smp_atomic_dec(&tb->common.nitems);
+ break;
+ }
+ }
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ if (dir == DIR_LEFT) {
+ state = balance_left(this);
+ } else {
+ state = balance_right(this);
+ }
+ }
+ return q;
+}
+
+/*
+** For the select functions, analyzes the pattern and determines which
+** part of the tree should be searched. Also compiles the match program
+*/
+static int analyze_pattern(DbTableTree *tb, Eterm pattern,
+ struct mp_info *mpi)
+{
+ Eterm lst, tpl, ttpl;
+ Eterm *matches,*guards, *bodies;
+ Eterm sbuff[30];
+ Eterm *buff = sbuff;
+ Eterm *ptpl;
+ int i;
+ int num_heads = 0;
+ Eterm key;
+ Eterm partly_bound;
+ int res;
+ Eterm least = 0;
+ Eterm most = 0;
+
+ mpi->some_limitation = 1;
+ mpi->got_partial = 0;
+ mpi->something_can_match = 0;
+ mpi->mp = NULL;
+ mpi->all_objects = 1;
+ mpi->save_term = NULL;
+
+ for (lst = pattern; is_list(lst); lst = CDR(list_val(lst)))
+ ++num_heads;
+
+ if (lst != NIL) {/* proper list... */
+ return DB_ERROR_BADPARAM;
+ }
+ if (num_heads > 10) {
+ buff = erts_alloc(ERTS_ALC_T_DB_TMP, sizeof(Eterm) * num_heads * 3);
+ }
+
+ matches = buff;
+ guards = buff + num_heads;
+ bodies = buff + (num_heads * 2);
+
+ i = 0;
+ for(lst = pattern; is_list(lst); lst = CDR(list_val(lst))) {
+ Eterm body;
+ ttpl = CAR(list_val(lst));
+ if (!is_tuple(ttpl)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+ ptpl = tuple_val(ttpl);
+ if (ptpl[0] != make_arityval(3U)) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+ matches[i] = tpl = ptpl[1];
+ guards[i] = ptpl[2];
+ bodies[i] = body = ptpl[3];
+ if (!is_list(body) || CDR(list_val(body)) != NIL ||
+ CAR(list_val(body)) != am_DollarUnderscore) {
+ mpi->all_objects = 0;
+ }
+ ++i;
+
+ partly_bound = NIL;
+ res = key_given(tb, tpl, &mpi->save_term, &partly_bound);
+ if ( res >= 0 ) { /* Can match something */
+ key = 0;
+ mpi->something_can_match = 1;
+ if (res > 0) {
+ key = GETKEY(tb,tuple_val(tpl));
+ } else if (partly_bound != NIL) {
+ mpi->got_partial = 1;
+ key = partly_bound;
+ } else {
+ mpi->some_limitation = 0;
+ }
+ if (key != 0) {
+ if (least == 0 ||
+ partly_bound_can_match_lesser(key,least)) {
+ least = key;
+ }
+ if (most == 0 ||
+ partly_bound_can_match_greater(key,most)) {
+ most = key;
+ }
+ }
+ }
+ }
+ mpi->least = least;
+ mpi->most = most;
+
+ /*
+ * It would be nice not to compile the match_spec if nothing could match,
+ * but then the select calls would not fail like they should on bad
+ * match specs that happen to specify non existent keys etc.
+ */
+ if ((mpi->mp = db_match_compile(matches, guards, bodies,
+ num_heads, DCOMP_TABLE, NULL))
+ == NULL) {
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_BADPARAM;
+ }
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return DB_ERROR_NONE;
+}
+
+static void do_dump_tree(int to, void *to_arg, TreeDbTerm *t)
+{
+ if (t != NULL) {
+ do_dump_tree(to, to_arg, t->left);
+ erts_print(to, to_arg, "%T\n", make_tuple(t->dbterm.tpl));
+ do_dump_tree(to, to_arg, t->right);
+ }
+}
+
+static void free_term(DbTableTree *tb, TreeDbTerm* p)
+{
+ db_free_term_data(&(p->dbterm));
+ erts_db_free(ERTS_ALC_T_DB_TERM,
+ (DbTable *) tb,
+ (void *) p,
+ SIZ_DBTERM(p)*sizeof(Uint));
+}
+
+static int do_free_tree_cont(DbTableTree *tb, int num_left)
+{
+ TreeDbTerm *root;
+ TreeDbTerm *p;
+
+ for (;;) {
+ root = POP_NODE(&tb->static_stack);
+ if (root == NULL) break;
+ for (;;) {
+ if ((p = root->left) != NULL) {
+ root->left = NULL;
+ PUSH_NODE(&tb->static_stack, root);
+ root = p;
+ } else if ((p = root->right) != NULL) {
+ root->right = NULL;
+ PUSH_NODE(&tb->static_stack, root);
+ root = p;
+ } else {
+ free_term(tb, root);
+ if (--num_left > 0) {
+ break;
+ } else {
+ return 0; /* Done enough for now */
+ }
+ }
+ }
+ }
+ return 1;
+}
+
+static TreeDbTerm* get_term(DbTableTree *tb,
+ TreeDbTerm* old,
+ Eterm obj)
+{
+ TreeDbTerm* p = db_get_term((DbTableCommon *) tb,
+ (old != NULL) ? &(old->dbterm) : NULL,
+ ((char *) &(old->dbterm)) - ((char *) old),
+ obj);
+ return p;
+}
+
+/*
+ * Deletion helpers
+ */
+static int balance_left(TreeDbTerm **this)
+{
+ TreeDbTerm *p, *p1, *p2;
+ int b1, b2, h = 1;
+
+ p = *this;
+ switch (p->balance) {
+ case -1:
+ p->balance = 0;
+ break;
+ case 0:
+ p->balance = 1;
+ h = 0;
+ break;
+ case 1:
+ p1 = p->right;
+ b1 = p1->balance;
+ if (b1 >= 0) { /* Single RR rotation */
+ p->right = p1->left;
+ p1->left = p;
+ if (b1 == 0) {
+ p->balance = 1;
+ p1->balance = -1;
+ h = 0;
+ } else {
+ p->balance = p1->balance = 0;
+ }
+ (*this) = p1;
+ } else { /* Double RL rotation */
+ p2 = p1->left;
+ b2 = p2->balance;
+ p1->left = p2->right;
+ p2->right = p1;
+ p->right = p2->left;
+ p2->left = p;
+ p->balance = (b2 == 1) ? -1 : 0;
+ p1->balance = (b2 == -1) ? 1 : 0;
+ p2->balance = 0;
+ (*this) = p2;
+ }
+ break;
+ }
+ return h;
+}
+
+static int balance_right(TreeDbTerm **this)
+{
+ TreeDbTerm *p, *p1, *p2;
+ int b1, b2, h = 1;
+
+ p = *this;
+ switch (p->balance) {
+ case 1:
+ p->balance = 0;
+ break;
+ case 0:
+ p->balance = -1;
+ h = 0;
+ break;
+ case -1:
+ p1 = p->left;
+ b1 = p1->balance;
+ if (b1 <= 0) { /* Single LL rotation */
+ p->left = p1->right;
+ p1->right = p;
+ if (b1 == 0) {
+ p->balance = -1;
+ p1->balance = 1;
+ h = 0;
+ } else {
+ p->balance = p1->balance = 0;
+ }
+ (*this) = p1;
+ } else { /* Double LR rotation */
+ p2 = p1->right;
+ b2 = p2->balance;
+ p1->right = p2->left;
+ p2->left = p1;
+ p->left = p2->right;
+ p2->right = p;
+ p->balance = (b2 == -1) ? 1 : 0;
+ p1->balance = (b2 == 1) ? -1 : 0;
+ p2->balance = 0;
+ (*this) = p2;
+ }
+ }
+ return h;
+}
+
+static int delsub(TreeDbTerm **this)
+{
+ TreeDbTerm **tstack[STACK_NEED];
+ int tpos = 0;
+ TreeDbTerm *q = (*this);
+ TreeDbTerm **r = &(q->left);
+ int h;
+
+ /*
+ * Walk down the tree to the right and search
+ * for a void right child, pick that child out
+ * and return it to be put in the deleted
+ * object's place.
+ */
+
+ while ((*r)->right != NULL) {
+ tstack[tpos++] = r;
+ r = &((*r)->right);
+ }
+ *this = *r;
+ *r = (*r)->left;
+ (*this)->left = q->left;
+ (*this)->right = q->right;
+ (*this)->balance = q->balance;
+ tstack[0] = &((*this)->left);
+ h = 1;
+ while (tpos && h) {
+ r = tstack[--tpos];
+ h = balance_right(r);
+ }
+ return h;
+}
+
+/*
+ * Helper for db_slot
+ */
+
+static TreeDbTerm *slot_search(Process *p, DbTableTree *tb, Sint slot)
+{
+ TreeDbTerm *this;
+ TreeDbTerm *tmp;
+ DbTreeStack* stack = get_any_stack(tb);
+ ASSERT(stack != NULL);
+
+ if (slot == 1) { /* Don't search from where we are if we are
+ looking for the first slot */
+ stack->slot = 0;
+ }
+
+ if (stack->slot == 0) { /* clear stack if slot positions
+ are not recorded */
+ stack->pos = 0;
+ }
+ if (EMPTY_NODE(stack)) {
+ this = tb->root;
+ if (this == NULL)
+ goto done;
+ while (this->left != NULL){
+ PUSH_NODE(stack, this);
+ this = this->left;
+ }
+ PUSH_NODE(stack, this);
+ stack->slot = 1;
+ }
+ this = TOP_NODE(stack);
+ while (stack->slot != slot && this != NULL) {
+ if (slot > stack->slot) {
+ if (this->right != NULL) {
+ this = this->right;
+ while (this->left != NULL) {
+ PUSH_NODE(stack, this);
+ this = this->left;
+ }
+ PUSH_NODE(stack, this);
+ } else {
+ for (;;) {
+ tmp = POP_NODE(stack);
+ this = TOP_NODE(stack);
+ if (this == NULL || this->left == tmp)
+ break;
+ }
+ }
+ ++(stack->slot);
+ } else {
+ if (this->left != NULL) {
+ this = this->left;
+ while (this->right != NULL) {
+ PUSH_NODE(stack, this);
+ this = this->right;
+ }
+ PUSH_NODE(stack, this);
+ } else {
+ for (;;) {
+ tmp = POP_NODE(stack);
+ this = TOP_NODE(stack);
+ if (this == NULL || this->right == tmp)
+ break;
+ }
+ }
+ --(stack->slot);
+ }
+ }
+done:
+ release_stack(tb,stack);
+ return this;
+}
+
+/*
+ * Find next and previous in sort order
+ */
+
+static TreeDbTerm *find_next(DbTableTree *tb, DbTreeStack* stack, Eterm key)
+{
+ TreeDbTerm *this;
+ TreeDbTerm *tmp;
+ Sint c;
+
+ if(( this = TOP_NODE(stack)) != NULL) {
+ if (!CMP_EQ(GETKEY(tb, this->dbterm.tpl),key)) {
+ /* Start from the beginning */
+ stack->pos = stack->slot = 0;
+ }
+ }
+ if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
+ if (( this = tb->root ) == NULL)
+ return NULL;
+ for (;;) {
+ PUSH_NODE(stack, this);
+ if (( c = cmp(GETKEY(tb, this->dbterm.tpl),key) ) < 0) {
+ if (this->right == NULL) /* We are at the previos
+ and the element does
+ not exist */
+ break;
+ else
+ this = this->right;
+ } else if (c > 0) {
+ if (this->left == NULL) /* Done */
+ return this;
+ else
+ this = this->left;
+ } else
+ break;
+ }
+ }
+ /* The next element from this... */
+ if (this->right != NULL) {
+ this = this->right;
+ PUSH_NODE(stack,this);
+ while (this->left != NULL) {
+ this = this->left;
+ PUSH_NODE(stack, this);
+ }
+ if (stack->slot > 0)
+ ++(stack->slot);
+ } else {
+ do {
+ tmp = POP_NODE(stack);
+ if (( this = TOP_NODE(stack)) == NULL) {
+ stack->slot = 0;
+ return NULL;
+ }
+ } while (this->right == tmp);
+ if (stack->slot > 0)
+ ++(stack->slot);
+ }
+ return this;
+}
+
+static TreeDbTerm *find_prev(DbTableTree *tb, DbTreeStack* stack, Eterm key)
+{
+ TreeDbTerm *this;
+ TreeDbTerm *tmp;
+ Sint c;
+
+ if(( this = TOP_NODE(stack)) != NULL) {
+ if (!CMP_EQ(GETKEY(tb, this->dbterm.tpl),key)) {
+ /* Start from the beginning */
+ stack->pos = stack->slot = 0;
+ }
+ }
+ if (EMPTY_NODE(stack)) { /* Have to rebuild the stack */
+ if (( this = tb->root ) == NULL)
+ return NULL;
+ for (;;) {
+ PUSH_NODE(stack, this);
+ if (( c = cmp(GETKEY(tb, this->dbterm.tpl),key) ) > 0) {
+ if (this->left == NULL) /* We are at the next
+ and the element does
+ not exist */
+ break;
+ else
+ this = this->left;
+ } else if (c < 0) {
+ if (this->right == NULL) /* Done */
+ return this;
+ else
+ this = this->right;
+ } else
+ break;
+ }
+ }
+ /* The previous element from this... */
+ if (this->left != NULL) {
+ this = this->left;
+ PUSH_NODE(stack,this);
+ while (this->right != NULL) {
+ this = this->right;
+ PUSH_NODE(stack, this);
+ }
+ if (stack->slot > 0)
+ --(stack->slot);
+ } else {
+ do {
+ tmp = POP_NODE(stack);
+ if (( this = TOP_NODE(stack)) == NULL) {
+ stack->slot = 0;
+ return NULL;
+ }
+ } while (this->left == tmp);
+ if (stack->slot > 0)
+ --(stack->slot);
+ }
+ return this;
+}
+
+static TreeDbTerm *find_next_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
+ Eterm key)
+{
+ TreeDbTerm *this;
+ TreeDbTerm *tmp;
+ Sint c;
+
+ /* spool the stack, we have to "re-search" */
+ stack->pos = stack->slot = 0;
+ if (( this = tb->root ) == NULL)
+ return NULL;
+ for (;;) {
+ PUSH_NODE(stack, this);
+ if (( c = cmp_partly_bound(key,GETKEY(tb, this->dbterm.tpl)) ) >= 0) {
+ if (this->right == NULL) {
+ do {
+ tmp = POP_NODE(stack);
+ if (( this = TOP_NODE(stack)) == NULL) {
+ return NULL;
+ }
+ } while (this->right == tmp);
+ return this;
+ } else
+ this = this->right;
+ } else /*if (c < 0)*/ {
+ if (this->left == NULL) /* Done */
+ return this;
+ else
+ this = this->left;
+ }
+ }
+}
+
+static TreeDbTerm *find_prev_from_pb_key(DbTableTree *tb, DbTreeStack* stack,
+ Eterm key)
+{
+ TreeDbTerm *this;
+ TreeDbTerm *tmp;
+ Sint c;
+
+ /* spool the stack, we have to "re-search" */
+ stack->pos = stack->slot = 0;
+ if (( this = tb->root ) == NULL)
+ return NULL;
+ for (;;) {
+ PUSH_NODE(stack, this);
+ if (( c = cmp_partly_bound(key,GETKEY(tb, this->dbterm.tpl)) ) <= 0) {
+ if (this->left == NULL) {
+ do {
+ tmp = POP_NODE(stack);
+ if (( this = TOP_NODE(stack)) == NULL) {
+ return NULL;
+ }
+ } while (this->left == tmp);
+ return this;
+ } else
+ this = this->left;
+ } else /*if (c < 0)*/ {
+ if (this->right == NULL) /* Done */
+ return this;
+ else
+ this = this->right;
+ }
+ }
+}
+
+
+/*
+ * Just lookup a node
+ */
+static TreeDbTerm *find_node(DbTableTree *tb, Eterm key)
+{
+ TreeDbTerm *this;
+ Sint res;
+ DbTreeStack* stack = get_static_stack(tb);
+
+ if(!stack || EMPTY_NODE(stack)
+ || !CMP_EQ(GETKEY(tb, ( this = TOP_NODE(stack) )->dbterm.tpl), key)) {
+
+ this = tb->root;
+ while (this != NULL &&
+ ( res = cmp(key, GETKEY(tb, this->dbterm.tpl)) ) != 0) {
+ if (res < 0)
+ this = this->left;
+ else
+ this = this->right;
+ }
+ }
+ release_stack(tb,stack);
+ return this;
+}
+
+/*
+ * Lookup a node and return the address of the node pointer in the tree
+ */
+static TreeDbTerm **find_node2(DbTableTree *tb, Eterm key)
+{
+ TreeDbTerm **this;
+ Sint res;
+
+ this = &tb->root;
+ while ((*this) != NULL &&
+ ( res = cmp(key, GETKEY(tb, (*this)->dbterm.tpl)) ) != 0) {
+ if (res < 0)
+ this = &((*this)->left);
+ else
+ this = &((*this)->right);
+ }
+ if (*this == NULL)
+ return NULL;
+ return this;
+}
+
+static int db_lookup_dbterm_tree(DbTable *tbl, Eterm key, DbUpdateHandle* handle)
+{
+ DbTableTree *tb = &tbl->tree;
+ TreeDbTerm **pp = find_node2(tb, key);
+
+ if (pp == NULL) return 0;
+
+ handle->tb = tbl;
+ handle->dbterm = &(*pp)->dbterm;
+ handle->bp = (void**) pp;
+ handle->new_size = (*pp)->dbterm.size;
+ handle->mustResize = 0;
+ return 1;
+}
+
+static void db_finalize_dbterm_tree(DbUpdateHandle* handle)
+{
+ if (handle->mustResize) {
+ Eterm* top;
+ Eterm copy;
+ DbTerm* newDbTerm;
+ DbTableTree *tb = &handle->tb->tree;
+ TreeDbTerm* oldp = (TreeDbTerm*) *handle->bp;
+ TreeDbTerm* newp = erts_db_alloc(ERTS_ALC_T_DB_TERM,
+ handle->tb,
+ sizeof(TreeDbTerm)+sizeof(Eterm)*(handle->new_size-1));
+ memcpy(newp, oldp, sizeof(TreeDbTerm)-sizeof(DbTerm)); /* copy only tree header */
+ *(handle->bp) = newp;
+ reset_static_stack(tb);
+ newDbTerm = &newp->dbterm;
+
+ newDbTerm->size = handle->new_size;
+ newDbTerm->off_heap.mso = NULL;
+ newDbTerm->off_heap.externals = NULL;
+ #ifndef HYBRID /* FIND ME! */
+ newDbTerm->off_heap.funs = NULL;
+ #endif
+ newDbTerm->off_heap.overhead = 0;
+
+ /* make a flat copy */
+ top = DBTERM_BUF(newDbTerm);
+ copy = copy_struct(make_tuple(handle->dbterm->tpl),
+ handle->new_size,
+ &top, &newDbTerm->off_heap);
+ DBTERM_SET_TPL(newDbTerm,tuple_val(copy));
+
+ db_free_term_data(handle->dbterm);
+ erts_db_free(ERTS_ALC_T_DB_TERM,
+ handle->tb,
+ (void *) (((char *) handle->dbterm) - (sizeof(TreeDbTerm) - sizeof(DbTerm))),
+ sizeof(TreeDbTerm) + sizeof(Eterm)*(handle->dbterm->size-1));
+ }
+#ifdef DEBUG
+ handle->dbterm = 0;
+#endif
+ return;
+}
+
+/*
+ * Traverse the tree with a callback function, used by db_match_xxx
+ */
+static void traverse_backwards(DbTableTree *tb,
+ DbTreeStack* stack,
+ Eterm lastkey,
+ int (*doit)(DbTableTree *,
+ TreeDbTerm *,
+ void *,
+ int),
+ void *context)
+{
+ TreeDbTerm *this, *next;
+
+ if (lastkey == NIL) {
+ stack->pos = stack->slot = 0;
+ if (( this = tb->root ) == NULL) {
+ return;
+ }
+ while (this != NULL) {
+ PUSH_NODE(stack, this);
+ this = this->right;
+ }
+ this = TOP_NODE(stack);
+ next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl));
+ if (!((*doit)(tb, this, context, 0)))
+ return;
+ } else {
+ next = find_prev(tb, stack, lastkey);
+ }
+
+ while ((this = next) != NULL) {
+ next = find_prev(tb, stack, GETKEY(tb, this->dbterm.tpl));
+ if (!((*doit)(tb, this, context, 0)))
+ return;
+ }
+}
+
+/*
+ * Traverse the tree with a callback function, used by db_match_xxx
+ */
+static void traverse_forward(DbTableTree *tb,
+ DbTreeStack* stack,
+ Eterm lastkey,
+ int (*doit)(DbTableTree *,
+ TreeDbTerm *,
+ void *,
+ int),
+ void *context)
+{
+ TreeDbTerm *this, *next;
+
+ if (lastkey == NIL) {
+ stack->pos = stack->slot = 0;
+ if (( this = tb->root ) == NULL) {
+ return;
+ }
+ while (this != NULL) {
+ PUSH_NODE(stack, this);
+ this = this->left;
+ }
+ this = TOP_NODE(stack);
+ next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl));
+ if (!((*doit)(tb, this, context, 1)))
+ return;
+ } else {
+ next = find_next(tb, stack, lastkey);
+ }
+
+ while ((this = next) != NULL) {
+ next = find_next(tb, stack, GETKEY(tb, this->dbterm.tpl));
+ if (!((*doit)(tb, this, context, 1)))
+ return;
+ }
+}
+
+/*
+ * Returns 0 if not given 1 if given and -1 on no possible match
+ * if key is given; *ret is set to point to the object concerned.
+ */
+static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
+ Eterm *partly_bound)
+{
+ TreeDbTerm *this;
+ Eterm key;
+
+ ASSERT(ret != NULL);
+ if (pattern == am_Underscore || db_is_variable(pattern) != -1)
+ return 0;
+ key = db_getkey(tb->common.keypos, pattern);
+ if (is_non_value(key))
+ return -1; /* can't possibly match anything */
+ if (!db_has_variable(key)) { /* Bound key */
+ if (( this = find_node(tb, key) ) == NULL) {
+ return -1;
+ }
+ *ret = this;
+ return 1;
+ } else if (partly_bound != NULL && key != am_Underscore &&
+ db_is_variable(key) < 0)
+ *partly_bound = key;
+
+ return 0;
+}
+
+
+
+static Sint do_cmp_partly_bound(Eterm a, Eterm b, int *done)
+{
+ Eterm* aa;
+ Eterm* bb;
+ Eterm a_hdr;
+ Eterm b_hdr;
+ int i;
+ Sint j;
+
+ /* A variable matches anything */
+ if (is_atom(a) && (a == am_Underscore || (db_is_variable(a) >= 0))) {
+ *done = 1;
+ return 0;
+ }
+ if (a == b)
+ return 0;
+
+ switch (a & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ if (!is_list(b)) {
+ return cmp(a,b);
+ }
+ aa = list_val(a);
+ bb = list_val(b);
+ while (1) {
+ if ((j = do_cmp_partly_bound(*aa++, *bb++, done)) != 0 || *done)
+ return j;
+ if (*aa==*bb)
+ return 0;
+ if (is_not_list(*aa) || is_not_list(*bb))
+ return do_cmp_partly_bound(*aa, *bb, done);
+ aa = list_val(*aa);
+ bb = list_val(*bb);
+ }
+ case TAG_PRIMARY_BOXED:
+ if ((b & _TAG_PRIMARY_MASK) != TAG_PRIMARY_BOXED) {
+ return cmp(a,b);
+ }
+ a_hdr = ((*boxed_val(a)) & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE;
+ b_hdr = ((*boxed_val(b)) & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE;
+ if (a_hdr != b_hdr) {
+ return cmp(a, b);
+ }
+ if (a_hdr == (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE)) {
+ aa = tuple_val(a);
+ bb = tuple_val(b);
+ /* compare the arities */
+ i = arityval(*aa); /* get the arity*/
+ if (i < arityval(*bb)) return(-1);
+ if (i > arityval(*bb)) return(1);
+ while (i--) {
+ if ((j = do_cmp_partly_bound(*++aa, *++bb, done)) != 0
+ || *done)
+ return j;
+ }
+ return 0;
+ }
+ /* Drop through */
+ default:
+ return cmp(a, b);
+ }
+}
+
+static Sint cmp_partly_bound(Eterm partly_bound_key, Eterm bound_key)
+{
+ int done = 0;
+ Sint ret = do_cmp_partly_bound(partly_bound_key, bound_key, &done);
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,"\ncmp_partly_bound: %T", partly_bound_key);
+ if (ret < 0)
+ erts_fprintf(stderr," < ");
+ else if (ret > 0)
+ erts_fprintf(stderr," > ");
+ else
+ erts_fprintf(stderr," == ");
+ erts_fprintf(stderr,"%T\n",bound_key);
+#endif
+ return ret;
+}
+
+/*
+** For partly_bound debugging....
+**
+BIF_RETTYPE ets_testnisse_2(BIF_ALIST_2)
+BIF_ADECL_2
+{
+ Eterm r1 = make_small(partly_bound_can_match_lesser(BIF_ARG_1,
+ BIF_ARG_2));
+ Eterm r2 = make_small(partly_bound_can_match_greater(BIF_ARG_1,
+ BIF_ARG_2));
+ Eterm *hp = HAlloc(BIF_P,3);
+ Eterm ret;
+
+ ret = TUPLE2(hp,r1,r2);
+ BIF_RET(ret);
+}
+**
+*/
+static int partly_bound_can_match_lesser(Eterm partly_bound_1,
+ Eterm partly_bound_2)
+{
+ int done = 0;
+ int ret = do_partly_bound_can_match_lesser(partly_bound_1,
+ partly_bound_2,
+ &done);
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,"\npartly_bound_can_match_lesser: %T",partly_bound_1);
+ if (ret)
+ erts_fprintf(stderr," can match lesser than ");
+ else
+ erts_fprintf(stderr," can not match lesser than ");
+ erts_fprintf(stderr,"%T\n",partly_bound_2);
+#endif
+ return ret;
+}
+
+static int partly_bound_can_match_greater(Eterm partly_bound_1,
+ Eterm partly_bound_2)
+{
+ int done = 0;
+ int ret = do_partly_bound_can_match_greater(partly_bound_1,
+ partly_bound_2,
+ &done);
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,"\npartly_bound_can_match_greater: %T",partly_bound_1);
+ if (ret)
+ erts_fprintf(stderr," can match greater than ");
+ else
+ erts_fprintf(stderr," can not match greater than ");
+ erts_fprintf(stderr,"%T\n",partly_bound_2);
+#endif
+ return ret;
+}
+
+static int do_partly_bound_can_match_lesser(Eterm a, Eterm b,
+ int *done)
+{
+ Eterm* aa;
+ Eterm* bb;
+ Sint i;
+ int j;
+
+ if (is_atom(a) && (a == am_Underscore ||
+ (db_is_variable(a) >= 0))) {
+ *done = 1;
+ if (is_atom(b) && (b == am_Underscore ||
+ (db_is_variable(b) >= 0))) {
+ return 0;
+ } else {
+ return 1;
+ }
+ } else if (is_atom(b) && (b == am_Underscore ||
+ (db_is_variable(b) >= 0))) {
+ *done = 1;
+ return 0;
+ }
+
+ if (a == b)
+ return 0;
+
+ if (not_eq_tags(a,b)) {
+ *done = 1;
+ return (cmp(a, b) < 0) ? 1 : 0;
+ }
+
+ /* we now know that tags are the same */
+ switch (tag_val_def(a)) {
+ case TUPLE_DEF:
+ aa = tuple_val(a);
+ bb = tuple_val(b);
+ /* compare the arities */
+ if (arityval(*aa) < arityval(*bb)) return 1;
+ if (arityval(*aa) > arityval(*bb)) return 0;
+ i = arityval(*aa); /* get the arity*/
+ while (i--) {
+ if ((j = do_partly_bound_can_match_lesser(*++aa, *++bb,
+ done)) != 0
+ || *done)
+ return j;
+ }
+ return 0;
+ case LIST_DEF:
+ aa = list_val(a);
+ bb = list_val(b);
+ while (1) {
+ if ((j = do_partly_bound_can_match_lesser(*aa++, *bb++,
+ done)) != 0
+ || *done)
+ return j;
+ if (*aa==*bb)
+ return 0;
+ if (is_not_list(*aa) || is_not_list(*bb))
+ return do_partly_bound_can_match_lesser(*aa, *bb,
+ done);
+ aa = list_val(*aa);
+ bb = list_val(*bb);
+ }
+ default:
+ if((i = cmp(a, b)) != 0) {
+ *done = 1;
+ }
+ return (i < 0) ? 1 : 0;
+ }
+}
+
+static int do_partly_bound_can_match_greater(Eterm a, Eterm b,
+ int *done)
+{
+ Eterm* aa;
+ Eterm* bb;
+ Sint i;
+ int j;
+
+ if (is_atom(a) && (a == am_Underscore ||
+ (db_is_variable(a) >= 0))) {
+ *done = 1;
+ if (is_atom(b) && (b == am_Underscore ||
+ (db_is_variable(b) >= 0))) {
+ return 0;
+ } else {
+ return 1;
+ }
+ } else if (is_atom(b) && (b == am_Underscore ||
+ (db_is_variable(b) >= 0))) {
+ *done = 1;
+ return 0;
+ }
+
+ if (a == b)
+ return 0;
+
+ if (not_eq_tags(a,b)) {
+ *done = 1;
+ return (cmp(a, b) > 0) ? 1 : 0;
+ }
+
+ /* we now know that tags are the same */
+ switch (tag_val_def(a)) {
+ case TUPLE_DEF:
+ aa = tuple_val(a);
+ bb = tuple_val(b);
+ /* compare the arities */
+ if (arityval(*aa) < arityval(*bb)) return 0;
+ if (arityval(*aa) > arityval(*bb)) return 1;
+ i = arityval(*aa); /* get the arity*/
+ while (i--) {
+ if ((j = do_partly_bound_can_match_greater(*++aa, *++bb,
+ done)) != 0
+ || *done)
+ return j;
+ }
+ return 0;
+ case LIST_DEF:
+ aa = list_val(a);
+ bb = list_val(b);
+ while (1) {
+ if ((j = do_partly_bound_can_match_greater(*aa++, *bb++,
+ done)) != 0
+ || *done)
+ return j;
+ if (*aa==*bb)
+ return 0;
+ if (is_not_list(*aa) || is_not_list(*bb))
+ return do_partly_bound_can_match_greater(*aa, *bb,
+ done);
+ aa = list_val(*aa);
+ bb = list_val(*bb);
+ }
+ default:
+ if((i = cmp(a, b)) != 0) {
+ *done = 1;
+ }
+ return (i > 0) ? 1 : 0;
+ }
+}
+
+/*
+ * Callback functions for the different match functions
+ */
+
+static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+ int forward)
+{
+ struct select_context *sc = (struct select_context *) ptr;
+ Eterm ret;
+ Uint32 dummy;
+
+ sc->lastobj = this->dbterm.tpl;
+
+ if (sc->end_condition != NIL &&
+ ((forward &&
+ cmp_partly_bound(sc->end_condition,
+ GETKEY_WITH_POS(sc->keypos,
+ this->dbterm.tpl)) < 0) ||
+ (!forward &&
+ cmp_partly_bound(sc->end_condition,
+ GETKEY_WITH_POS(sc->keypos,
+ this->dbterm.tpl)) > 0))) {
+ return 0;
+ }
+ ret = db_prog_match(sc->p, sc->mp,
+ make_tuple(this->dbterm.tpl),
+ 0, &dummy);
+ if (is_value(ret)) {
+ Uint sz;
+ Eterm *hp;
+ if (sc->all_objects) {
+ hp = HAlloc(sc->p, this->dbterm.size + 2);
+ ret = copy_shallow(DBTERM_BUF(&this->dbterm),
+ this->dbterm.size,
+ &hp,
+ &MSO(sc->p));
+ } else {
+ sz = size_object(ret);
+ hp = HAlloc(sc->p, sz + 2);
+ ret = copy_struct(ret, sz,
+ &hp, &MSO(sc->p));
+ }
+ sc->accum = CONS(hp, ret, sc->accum);
+ }
+ if (MBUF(sc->p)) {
+ /*
+ * Force a trap and GC if a heap fragment was created. Many heap fragments
+ * make the GC slow.
+ */
+ sc->max = 0;
+ }
+ if (--(sc->max) <= 0) {
+ return 0;
+ }
+ return 1;
+}
+
+static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+ int forward)
+{
+ struct select_count_context *sc = (struct select_count_context *) ptr;
+ Eterm ret;
+ Uint32 dummy;
+
+ sc->lastobj = this->dbterm.tpl;
+
+ /* Always backwards traversing */
+ if (sc->end_condition != NIL &&
+ (cmp_partly_bound(sc->end_condition,
+ GETKEY_WITH_POS(sc->keypos,
+ this->dbterm.tpl)) > 0)) {
+ return 0;
+ }
+ ret = db_prog_match(sc->p, sc->mp,
+ make_tuple(this->dbterm.tpl),
+ 0, &dummy);
+ if (ret == am_true) {
+ ++(sc->got);
+ }
+ if (--(sc->max) <= 0) {
+ return 0;
+ }
+ return 1;
+}
+
+static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+ int forward)
+{
+ struct select_context *sc = (struct select_context *) ptr;
+ Eterm ret;
+ Uint32 dummy;
+
+ sc->lastobj = this->dbterm.tpl;
+
+ if (sc->end_condition != NIL &&
+ ((forward &&
+ cmp_partly_bound(sc->end_condition,
+ GETKEY_WITH_POS(sc->keypos,
+ this->dbterm.tpl)) < 0) ||
+ (!forward &&
+ cmp_partly_bound(sc->end_condition,
+ GETKEY_WITH_POS(sc->keypos,
+ this->dbterm.tpl)) > 0))) {
+ return 0;
+ }
+
+ ret = db_prog_match(sc->p, sc->mp,
+ make_tuple(this->dbterm.tpl),
+ 0, &dummy);
+ if (is_value(ret)) {
+ Uint sz;
+ Eterm *hp;
+
+ ++(sc->got);
+ if (sc->all_objects) {
+ hp = HAlloc(sc->p, this->dbterm.size + 2);
+ ret = copy_shallow(DBTERM_BUF(&this->dbterm),
+ this->dbterm.size,
+ &hp,
+ &MSO(sc->p));
+ } else {
+ sz = size_object(ret);
+ hp = HAlloc(sc->p, sz + 2);
+ ret = copy_struct(ret, sz, &hp, &MSO(sc->p));
+ }
+ sc->accum = CONS(hp, ret, sc->accum);
+ }
+ if (MBUF(sc->p)) {
+ /*
+ * Force a trap and GC if a heap fragment was created. Many heap fragments
+ * make the GC slow.
+ */
+ sc->max = 0;
+ }
+ if (--(sc->max) <= 0 || sc->got == sc->chunk_size) {
+ return 0;
+ }
+ return 1;
+}
+
+
+static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
+ int forward)
+{
+ struct select_delete_context *sc = (struct select_delete_context *) ptr;
+ Eterm ret;
+ Uint32 dummy;
+ Eterm key;
+
+ if (sc->erase_lastterm)
+ free_term(tb, sc->lastterm);
+ sc->erase_lastterm = 0;
+ sc->lastterm = this;
+
+ if (sc->end_condition != NIL &&
+ cmp_partly_bound(sc->end_condition,
+ GETKEY_WITH_POS(sc->keypos,
+ this->dbterm.tpl)) > 0)
+ return 0;
+ ret = db_prog_match(sc->p, sc->mp,
+ make_tuple(this->dbterm.tpl),
+ 0, &dummy);
+ if (ret == am_true) {
+ key = GETKEY(sc->tb, this->dbterm.tpl);
+ linkout_tree(sc->tb, key);
+ sc->erase_lastterm = 1;
+ ++sc->accum;
+ }
+ if (--(sc->max) <= 0) {
+ return 0;
+ }
+ return 1;
+}
+
+#ifdef TREE_DEBUG
+static void do_dump_tree2(int to, void *to_arg, int show, TreeDbTerm *t,
+ int offset)
+{
+ if (t == NULL)
+ return 0;
+ do_dump_tree2(to, to_arg, show, t->right, offset + 4);
+ if (show) {
+ erts_print(to, to_arg, "%*s%T (addr = %p, bal = %d)\n"
+ offset, "", make_tuple(t->dbterm.tpl),
+ t, t->balance);
+ }
+ do_dump_tree2(to, to_arg, show, t->left, offset + 4);
+ return sum;
+}
+
+#endif
+
+#ifdef HARDDEBUG
+
+void db_check_table_tree(DbTable *tbl)
+{
+ DbTableTree *tb = &tbl->tree;
+ check_table_tree(tb->root);
+ check_saved_stack(tb);
+ check_slot_pos(tb);
+}
+
+static TreeDbTerm *traverse_until(TreeDbTerm *t, int *current, int to)
+{
+ TreeDbTerm *tmp;
+ if (t == NULL)
+ return NULL;
+ tmp = traverse_until(t->left, current, to);
+ if (tmp != NULL)
+ return tmp;
+ ++(*current);
+ if (*current == to)
+ return t;
+ return traverse_until(t->right, current, to);
+}
+
+static void check_slot_pos(DbTableTree *tb)
+{
+ int pos = 0;
+ TreeDbTerm *t;
+ if (tb->stack.slot == 0 || tb->stack.pos == 0)
+ return;
+ t = traverse_until(tb->root, &pos, tb->stack.slot);
+ if (t != tb->stack.array[tb->stack.pos - 1]) {
+ erts_fprintf(stderr, "Slot position does not correspont with stack, "
+ "element position %d is really 0x%08X, when stack says "
+ "it's 0x%08X\n", tb->stack.slot, t,
+ tb->stack.array[tb->stack.pos - 1]);
+ do_dump_tree2(ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ }
+}
+
+
+static void check_saved_stack(DbTableTree *tb)
+{
+ TreeDbTerm *t = tb->root;
+ DbTreeStack* stack = &tb->static_stack;
+ int n = 0;
+ if (stack->pos == 0)
+ return;
+ if (t != stack->array[0]) {
+ erts_fprintf(stderr,"tb->stack[0] is 0x%08X, should be 0x%08X\n",
+ stack->array[0], t);
+ do_dump_tree2(ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ return;
+ }
+ while (n < stack->pos) {
+ if (t == NULL) {
+ erts_fprintf(stderr, "NULL pointer in tree when stack not empty,"
+ " stack depth is %d\n", n);
+ do_dump_tree2(ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ return;
+ }
+ n++;
+ if (n < stack->pos) {
+ if (stack->array[n] == t->left)
+ t = t->left;
+ else if (stack->array[n] == t->right)
+ t = t->right;
+ else {
+ erts_fprintf(stderr, "tb->stack[%d] == 0x%08X does not "
+ "represent child pointer in tree!"
+ "(left == 0x%08X, right == 0x%08X\n",
+ n, tb->stack[n], t->left, t->right);
+ do_dump_tree2(ERTS_PRINT_STDERR, NULL, 1, tb->root, 0);
+ return;
+ }
+ }
+ }
+}
+
+static int check_table_tree(TreeDbTerm *t)
+{
+ int lh, rh;
+ if (t == NULL)
+ return 0;
+ lh = check_table_tree(t->left);
+ rh = check_table_tree(t->right);
+ if ((rh - lh) != t->balance) {
+ erts_fprintf(stderr, "Invalid tree balance for this node:\n");
+ erts_fprintf(stderr,"balance = %d, left = 0x%08X, right = 0x%08X\n"
+ "data = %T",
+ t->balance, t->left, t->right,
+ make_tuple(t->dbterm.tpl));
+ erts_fprintf(stderr,"\nDump:\n---------------------------------\n");
+ do_dump_tree2(ERTS_PRINT_STDERR, NULL, 1, t, 0);
+ erts_fprintf(stderr,"\n---------------------------------\n");
+ }
+ return ((rh > lh) ? rh : lh) + 1;
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_db_tree.h b/erts/emulator/beam/erl_db_tree.h
new file mode 100644
index 0000000000..7bc235e135
--- /dev/null
+++ b/erts/emulator/beam/erl_db_tree.h
@@ -0,0 +1,55 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef _DB_TREE_H
+#define _DB_TREE_H
+
+#include "erl_db_util.h"
+
+typedef struct tree_db_term {
+ struct tree_db_term *left, *right; /* left and right child */
+ int balance; /* tree balancing value */
+ DbTerm dbterm; /* The actual term */
+} TreeDbTerm;
+
+typedef struct {
+ Uint pos; /* Current position on stack */
+ Uint slot; /* "Slot number" of top element or 0 if not set */
+ TreeDbTerm** array; /* The stack */
+} DbTreeStack;
+
+typedef struct db_table_tree {
+ DbTableCommon common;
+
+ /* Tree-specific fields */
+ TreeDbTerm *root; /* The tree root */
+ Uint deletion; /* Being deleted */
+ erts_smp_atomic_t is_stack_busy;
+ DbTreeStack static_stack;
+} DbTableTree;
+
+/*
+** Function prototypes, looks the same (except the suffix) for all
+** table types. The process is always an [in out] parameter.
+*/
+void db_initialize_tree(void);
+
+int db_create_tree(Process *p, DbTable *tbl);
+
+#endif /* _DB_TREE_H */
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
new file mode 100644
index 0000000000..8c373451fd
--- /dev/null
+++ b/erts/emulator/beam/erl_db_util.c
@@ -0,0 +1,4651 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Common utilities for the different types of db tables.
+ * Mostly matching etc.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#define ERTS_WANT_DB_INTERNAL__
+#include "erl_db.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_binary.h"
+
+#include "erl_db_util.h"
+
+
+/*
+** Flags for the guard bif's
+*/
+
+/* These are offsets from the DCOMP_* value */
+#define DBIF_GUARD 1
+#define DBIF_BODY 0
+
+/* These are the DBIF flag bits corresponding to the DCOMP_* value.
+ * If a bit is set, the BIF is allowed in that context. */
+#define DBIF_TABLE_GUARD (1 << (DCOMP_TABLE + DBIF_GUARD))
+#define DBIF_TABLE_BODY (1 << (DCOMP_TABLE + DBIF_BODY))
+#define DBIF_TRACE_GUARD (1 << (DCOMP_TRACE + DBIF_GUARD))
+#define DBIF_TRACE_BODY (1 << (DCOMP_TRACE + DBIF_BODY))
+#define DBIF_ALL \
+DBIF_TABLE_GUARD | DBIF_TABLE_BODY | DBIF_TRACE_GUARD | DBIF_TRACE_BODY
+
+
+
+/*
+** Some convenience macros for stacks (DMC == db_match_compile)
+*/
+
+#define DMC_DEFAULT_SIZE 25
+
+#define DMC_STACK_TYPE(Type) DMC_##Type##_stack
+
+#define DMC_DECLARE_STACK_TYPE(Type) \
+typedef struct DMC_STACK_TYPE(Type) { \
+ int pos; \
+ int siz; \
+ Type def[DMC_DEFAULT_SIZE]; \
+ Type *data; \
+} DMC_STACK_TYPE(Type)
+
+#define DMC_INIT_STACK(Name) \
+ (Name).pos = 0; (Name).siz = DMC_DEFAULT_SIZE; (Name).data = (Name).def
+
+#define DMC_STACK_DATA(Name) (Name).data
+
+#define DMC_STACK_NUM(Name) (Name).pos
+
+#define DMC_PUSH(On, What) \
+do { \
+ if ((On).pos >= (On).siz) { \
+ (On).siz *= 2; \
+ (On).data \
+ = (((On).def == (On).data) \
+ ? memcpy(erts_alloc(ERTS_ALC_T_DB_MC_STK, \
+ (On).siz*sizeof(*((On).data))), \
+ (On).def, \
+ DMC_DEFAULT_SIZE*sizeof(*((On).data))) \
+ : erts_realloc(ERTS_ALC_T_DB_MC_STK, \
+ (void *) (On).data, \
+ (On).siz*sizeof(*((On).data)))); \
+ } \
+ (On).data[(On).pos++] = What; \
+} while (0)
+
+#define DMC_POP(From) (From).data[--(From).pos]
+
+#define DMC_TOP(From) (From).data[(From).pos - 1]
+
+#define DMC_EMPTY(Name) ((Name).pos == 0)
+
+#define DMC_PEEK(On, At) (On).data[At]
+
+#define DMC_POKE(On, At, Value) ((On).data[At] = (Value))
+
+#define DMC_CLEAR(Name) (Name).pos = 0
+
+#define DMC_FREE(Name) \
+do { \
+ if ((Name).def != (Name).data) \
+ erts_free(ERTS_ALC_T_DB_MC_STK, (Name).data); \
+} while (0)
+
+static ERTS_INLINE Process *
+get_proc(Process *cp, Uint32 cp_locks, Eterm id, Uint32 id_locks)
+{
+ Process *proc = erts_pid2proc(cp, cp_locks, id, id_locks);
+ if (!proc && is_atom(id))
+ proc = erts_whereis_process(cp, cp_locks, id, id_locks, 0);
+ return proc;
+}
+
+
+static Eterm
+set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
+ Eterm ret;
+ Uint flags;
+
+ if (tracer == NIL) {
+ flags = tracee_p->trace_flags & ~TRACEE_FLAGS;
+ } else {
+ flags = ((tracee_p->trace_flags & ~d_flags) | e_flags);
+ if (! flags) tracer = NIL;
+ }
+ ret = tracee_p->tracer_proc != tracer || tracee_p->trace_flags != flags
+ ? am_true : am_false;
+ tracee_p->tracer_proc = tracer;
+ tracee_p->trace_flags = flags;
+ return ret;
+}
+/*
+** Assuming all locks on tracee_p on entry
+**
+** Changes tracee_p->trace_flags and tracee_p->tracer_proc
+** according to input disable/enable flags and tracer.
+**
+** Returns am_true|am_false on success, am_true if value changed,
+** returns fail_term on failure. Fails if tracer pid or port is invalid.
+*/
+static Eterm
+set_match_trace(Process *tracee_p, Eterm fail_term, Eterm tracer,
+ Uint d_flags, Uint e_flags) {
+ Eterm ret = fail_term;
+ Process *tracer_p;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCKS_ALL ==
+ erts_proc_lc_my_proc_locks(tracee_p));
+
+ if (is_internal_pid(tracer)
+ && (tracer_p =
+ erts_pid2proc(tracee_p, ERTS_PROC_LOCKS_ALL,
+ tracer, ERTS_PROC_LOCKS_ALL))) {
+ if (tracee_p != tracer_p) {
+ ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
+ tracer_p->trace_flags |= tracee_p->trace_flags ? F_TRACER : 0;
+ erts_smp_proc_unlock(tracer_p, ERTS_PROC_LOCKS_ALL);
+ }
+ } else if (is_internal_port(tracer)) {
+ Port *tracer_port =
+ erts_id2port(tracer, tracee_p, ERTS_PROC_LOCKS_ALL);
+ if (tracer_port) {
+ if (! INVALID_TRACER_PORT(tracer_port, tracer)) {
+ ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
+ }
+ erts_smp_port_unlock(tracer_port);
+ }
+ } else {
+ ASSERT(is_nil(tracer));
+ ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
+ }
+ return ret;
+}
+
+
+/* Type checking... */
+
+#define BOXED_IS_TUPLE(Boxed) is_arity_value(*boxed_val((Boxed)))
+
+/*
+**
+** Types and enum's (compiled matches)
+**
+*/
+
+/*
+** match VM instructions
+*/
+typedef enum {
+ matchArray, /* Only when parameter is an array (DCOMP_TRACE) */
+ matchArrayBind, /* ------------- " ------------ */
+ matchTuple,
+ matchPushT,
+ matchPushL,
+ matchPop,
+ matchBind,
+ matchCmp,
+ matchEqBin,
+ matchEqFloat,
+ matchEqBig,
+ matchEqRef,
+ matchEq,
+ matchList,
+ matchSkip,
+ matchPushC,
+ matchConsA, /* Car is below Cdr */
+ matchConsB, /* Cdr is below Car (unusual) */
+ matchMkTuple,
+ matchCall0,
+ matchCall1,
+ matchCall2,
+ matchCall3,
+ matchPushV,
+ matchPushExpr, /* Push the whole expression we're matching ('$_') */
+ matchPushArrayAsList, /* Only when parameter is an Array and
+ not an erlang term (DCOMP_TRACE) */
+ matchPushArrayAsListU, /* As above but unknown size */
+ matchTrue,
+ matchOr,
+ matchAnd,
+ matchOrElse,
+ matchAndAlso,
+ matchJump,
+ matchSelf,
+ matchWaste,
+ matchReturn,
+ matchProcessDump,
+ matchDisplay,
+ matchIsSeqTrace,
+ matchSetSeqToken,
+ matchGetSeqToken,
+ matchSetReturnTrace,
+ matchSetExceptionTrace,
+ matchCatch,
+ matchEnableTrace,
+ matchDisableTrace,
+ matchEnableTrace2,
+ matchDisableTrace2,
+ matchTryMeElse,
+ matchCaller,
+ matchHalt,
+ matchSilent,
+ matchSetSeqTokenFake,
+ matchTrace2,
+ matchTrace3
+} MatchOps;
+
+/*
+** Guard bif's
+*/
+
+typedef struct dmc_guard_bif {
+ Eterm name; /* atom */
+ void *biff;
+ /* BIF_RETTYPE (*biff)(); */
+ int arity;
+ Uint32 flags;
+} DMCGuardBif;
+
+/*
+** Error information (for lint)
+*/
+
+/*
+** Type declarations for stacks
+*/
+DMC_DECLARE_STACK_TYPE(Eterm);
+
+DMC_DECLARE_STACK_TYPE(Uint);
+
+DMC_DECLARE_STACK_TYPE(unsigned);
+
+/*
+** Data about the heap during compilation
+*/
+
+typedef struct DMCHeap {
+ int size;
+ unsigned def[DMC_DEFAULT_SIZE];
+ unsigned *data;
+ int used;
+} DMCHeap;
+
+/*
+** Return values from sub compilation steps (guard compilation)
+*/
+
+typedef enum dmc_ret {
+ retOk,
+ retFail,
+ retRestart
+} DMCRet;
+
+/*
+** Diverse context information
+*/
+
+typedef struct dmc_context {
+ int stack_need;
+ int stack_used;
+ ErlHeapFragment *save;
+ ErlHeapFragment *copy;
+ Eterm *matchexpr;
+ Eterm *guardexpr;
+ Eterm *bodyexpr;
+ int num_match;
+ int current_match;
+ int eheap_need;
+ Uint cflags;
+ int is_guard; /* 1 if in guard, 0 if in body */
+ int special; /* 1 if the head in the match was a single expression */
+ DMCErrInfo *err_info;
+} DMCContext;
+
+/*
+**
+** Global variables
+**
+*/
+
+/*
+** Internal
+*/
+
+/*
+** The pseudo process used by the VM (pam).
+*/
+
+#define ERTS_DEFAULT_MS_HEAP_SIZE 128
+
+typedef struct {
+ Process process;
+ Eterm *heap;
+ Eterm default_heap[ERTS_DEFAULT_MS_HEAP_SIZE];
+} ErtsMatchPseudoProcess;
+
+
+#ifdef ERTS_SMP
+static erts_smp_tsd_key_t match_pseudo_process_key;
+#else
+static ErtsMatchPseudoProcess *match_pseudo_process;
+#endif
+
+static ERTS_INLINE void
+cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap)
+{
+ if (mpsp->process.mbuf
+ || mpsp->process.off_heap.mso
+#ifndef HYBRID /* FIND ME! */
+ || mpsp->process.off_heap.funs
+#endif
+ || mpsp->process.off_heap.externals) {
+ erts_cleanup_empty_process(&mpsp->process);
+ }
+#ifdef DEBUG
+ else {
+ erts_debug_verify_clean_empty_process(&mpsp->process);
+ }
+#endif
+ if (!keep_heap) {
+ if (mpsp->heap != &mpsp->default_heap[0]) {
+ /* Have to be done *after* call to erts_cleanup_empty_process() */
+ erts_free(ERTS_ALC_T_DB_MS_RUN_HEAP, (void *) mpsp->heap);
+ mpsp->heap = &mpsp->default_heap[0];
+ }
+#ifdef DEBUG
+ else {
+ int i;
+ for (i = 0; i < ERTS_DEFAULT_MS_HEAP_SIZE; i++) {
+#ifdef ARCH_64
+ mpsp->default_heap[i] = (Eterm) 0xdeadbeefdeadbeef;
+#else
+ mpsp->default_heap[i] = (Eterm) 0xdeadbeef;
+#endif
+ }
+ }
+#endif
+ }
+}
+
+static ErtsMatchPseudoProcess *
+create_match_pseudo_process(void)
+{
+ ErtsMatchPseudoProcess *mpsp;
+ mpsp = (ErtsMatchPseudoProcess *)erts_alloc(ERTS_ALC_T_DB_MS_PSDO_PROC,
+ sizeof(ErtsMatchPseudoProcess));
+ erts_init_empty_process(&mpsp->process);
+ mpsp->heap = &mpsp->default_heap[0];
+ return mpsp;
+}
+
+static ERTS_INLINE ErtsMatchPseudoProcess *
+get_match_pseudo_process(Process *c_p, Uint heap_size)
+{
+ ErtsMatchPseudoProcess *mpsp;
+#ifdef ERTS_SMP
+ mpsp = (ErtsMatchPseudoProcess *) c_p->scheduler_data->match_pseudo_process;
+ if (mpsp)
+ cleanup_match_pseudo_process(mpsp, 0);
+ else {
+ ASSERT(erts_smp_tsd_get(match_pseudo_process_key) == NULL);
+ mpsp = create_match_pseudo_process();
+ c_p->scheduler_data->match_pseudo_process = (void *) mpsp;
+ erts_smp_tsd_set(match_pseudo_process_key, (void *) mpsp);
+ }
+ ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key));
+ mpsp->process.scheduler_data = c_p->scheduler_data;
+#else
+ mpsp = match_pseudo_process;
+ cleanup_match_pseudo_process(mpsp, 0);
+#endif
+ if (heap_size > ERTS_DEFAULT_MS_HEAP_SIZE)
+ mpsp->heap = (Eterm *) erts_alloc(ERTS_ALC_T_DB_MS_RUN_HEAP,
+ heap_size*sizeof(Uint));
+ else {
+ ASSERT(mpsp->heap == &mpsp->default_heap[0]);
+ }
+ return mpsp;
+}
+
+#ifdef ERTS_SMP
+static void
+destroy_match_pseudo_process(void)
+{
+ ErtsMatchPseudoProcess *mpsp;
+ mpsp = (ErtsMatchPseudoProcess *)erts_smp_tsd_get(match_pseudo_process_key);
+ if (mpsp) {
+ cleanup_match_pseudo_process(mpsp, 0);
+ erts_free(ERTS_ALC_T_DB_MS_PSDO_PROC, (void *) mpsp);
+ erts_smp_tsd_set(match_pseudo_process_key, (void *) NULL);
+ }
+}
+#endif
+
+static
+void
+match_pseudo_process_init(void)
+{
+#ifdef ERTS_SMP
+ erts_smp_tsd_key_create(&match_pseudo_process_key);
+ erts_smp_install_exit_handler(destroy_match_pseudo_process);
+#else
+ match_pseudo_process = create_match_pseudo_process();
+#endif
+}
+
+void
+erts_match_set_release_result(Process* c_p)
+{
+ (void) get_match_pseudo_process(c_p, 0); /* Clean it up */
+}
+
+/* The trace control word. */
+
+static erts_smp_atomic_t trace_control_word;
+
+
+Eterm
+erts_ets_copy_object(Eterm obj, Process* to)
+{
+ Uint size = size_object(obj);
+ Eterm* hp = HAlloc(to, size);
+ Eterm res;
+
+ res = copy_struct(obj, size, &hp, &MSO(to));
+#ifdef DEBUG
+ if (eq(obj, res) == 0) {
+ erl_exit(1, "copy not equal to source\n");
+ }
+#endif
+ return res;
+}
+
+/* This needs to be here, before the bif table... */
+
+static Eterm db_set_trace_control_word_fake_1(Process *p, Eterm val);
+
+/*
+** The table of callable bif's, i e guard bif's and
+** some special animals that can provide us with trace
+** information. This array is sorted on init.
+*/
+static DMCGuardBif guard_tab[] =
+{
+ {
+ am_is_atom,
+ &is_atom_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_float,
+ &is_float_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_integer,
+ &is_integer_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_list,
+ &is_list_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_number,
+ &is_number_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_pid,
+ &is_pid_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_port,
+ &is_port_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_reference,
+ &is_reference_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_tuple,
+ &is_tuple_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_binary,
+ &is_binary_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_function,
+ &is_function_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_is_record,
+ &is_record_3,
+ 3,
+ DBIF_ALL
+ },
+ {
+ am_abs,
+ &abs_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_element,
+ &element_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_hd,
+ &hd_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_length,
+ &length_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_node,
+ &node_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_node,
+ &node_0,
+ 0,
+ DBIF_ALL
+ },
+ {
+ am_round,
+ &round_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_size,
+ &size_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_bit_size,
+ &bit_size_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_tl,
+ &tl_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_trunc,
+ &trunc_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_float,
+ &float_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_Plus,
+ &splus_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_Minus,
+ &sminus_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_Plus,
+ &splus_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Minus,
+ &sminus_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Times,
+ &stimes_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Div,
+ &div_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_div,
+ &intdiv_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_rem,
+ &rem_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_band,
+ &band_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_bor,
+ &bor_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_bxor,
+ &bxor_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_bnot,
+ &bnot_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_bsl,
+ &bsl_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_bsr,
+ &bsr_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Gt,
+ &sgt_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Ge,
+ &sge_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Lt,
+ &slt_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Le,
+ &sle_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Eq,
+ &seq_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Eqeq,
+ &seqeq_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Neq,
+ &sneq_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_Neqeq,
+ &sneqeq_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_not,
+ &not_1,
+ 1,
+ DBIF_ALL
+ },
+ {
+ am_xor,
+ &xor_2,
+ 2,
+ DBIF_ALL
+ },
+ {
+ am_get_tcw,
+ &db_get_trace_control_word_0,
+ 0,
+ DBIF_TRACE_GUARD | DBIF_TRACE_BODY
+ },
+ {
+ am_set_tcw,
+ &db_set_trace_control_word_1,
+ 1,
+ DBIF_TRACE_BODY
+ },
+ {
+ am_set_tcw_fake,
+ &db_set_trace_control_word_fake_1,
+ 1,
+ DBIF_TRACE_BODY
+ }
+};
+
+/*
+** Exported
+*/
+Eterm db_am_eot; /* Atom '$end_of_table' */
+
+/*
+** Forward decl's
+*/
+
+
+/*
+** ... forwards for compiled matches
+*/
+/* Utility code */
+static DMCGuardBif *dmc_lookup_bif(Eterm t, int arity);
+#ifdef DMC_DEBUG
+static Eterm dmc_lookup_bif_reversed(void *f);
+#endif
+static int cmp_uint(void *a, void *b);
+static int cmp_guard_bif(void *a, void *b);
+static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info);
+static Uint my_size_object(Eterm t);
+static Eterm my_copy_struct(Eterm t, Eterm **hp, ErlOffHeap* off_heap);
+
+/* Guard compilation */
+static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(Uint) *text,
+ Eterm t);
+static DMCRet dmc_list(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant);
+static DMCRet dmc_tuple(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant);
+static DMCRet dmc_variable(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant);
+static DMCRet dmc_fun(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant);
+static DMCRet dmc_expr(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant);
+static DMCRet compile_guard_expr(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t);
+/* match expression subroutine */
+static DMCRet dmc_one_term(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Eterm) *stack,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm c);
+
+
+#ifdef DMC_DEBUG
+static int test_disassemble_next = 0;
+static void db_match_dis(Binary *prog);
+#define TRACE erts_fprintf(stderr,"Trace: %s:%d\n",__FILE__,__LINE__)
+#define FENCE_PATTERN_SIZE 1
+#define FENCE_PATTERN 0xDEADBEEFUL
+#else
+#define TRACE /* Nothing */
+#define FENCE_PATTERN_SIZE 0
+#endif
+static void add_dmc_err(DMCErrInfo *err_info,
+ char *str,
+ int variable,
+ Eterm term,
+ DMCErrorSeverity severity);
+
+static Eterm dpm_array_to_list(Process *psp, Eterm *arr, int arity);
+
+static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace);
+
+static Eterm seq_trace_fake(Process *p, Eterm arg1);
+
+
+/*
+** Interface routines.
+*/
+
+/*
+** Pseudo BIF:s to be callable from the PAM VM.
+*/
+
+BIF_RETTYPE db_get_trace_control_word_0(Process *p)
+{
+ Uint32 tcw = (Uint32) erts_smp_atomic_read(&trace_control_word);
+ BIF_RET(erts_make_integer((Uint) tcw, p));
+}
+
+BIF_RETTYPE db_set_trace_control_word_1(Process *p, Eterm new)
+{
+ Uint val;
+ Uint32 old_tcw;
+ if (!term_to_Uint(new, &val))
+ BIF_ERROR(p, BADARG);
+ if (val != ((Uint32)val))
+ BIF_ERROR(p, BADARG);
+
+ old_tcw = (Uint32) erts_smp_atomic_xchg(&trace_control_word, (long) val);
+ BIF_RET(erts_make_integer((Uint) old_tcw, p));
+}
+
+static Eterm db_set_trace_control_word_fake_1(Process *p, Eterm new)
+{
+ Uint val;
+ if (!term_to_Uint(new, &val))
+ BIF_ERROR(p, BADARG);
+ if (val != ((Uint32)val))
+ BIF_ERROR(p, BADARG);
+ BIF_RET(db_get_trace_control_word_0(p));
+}
+
+/*
+** The API used by the tracer (declared in global.h):
+*/
+
+/*
+** Matchexpr is a list of tuples containing match-code, i e:
+**
+** Matchexpr = [{Pattern, Guards, Body}, ...]
+** Pattern = [ PatternExpr , ...]
+** PatternExpr = Constant | PatternTuple | PatternList | Variable
+** Constant = Any erlang term
+** PatternTuple = { PatternExpr ... }
+** PatternList = [ PatternExpr ]
+** Variable = '$' ++ <number>
+** Guards = [Guard ...]
+** Guard = {GuardFunc, GuardExpr, ...}
+** GuardExpr = BoundVariable | Guard | GuardList | GuardTuple | ConstExpr
+** BoundVariable = Variable (existing in Pattern)
+** GuardList = [ GuardExpr , ... ]
+** GuardTuple = {{ GuardExpr, ... }}
+** ConstExpr = {const, Constant}
+** GuardFunc = is_list | .... | element | ...
+** Body = [ BodyExpr, ... ]
+** BodyExpr = GuardExpr | { BodyFunc, GuardExpr, ... }
+** BodyFunc = return_trace | seq_trace | trace | ...
+** - or something like that...
+*/
+
+
+Eterm erts_match_set_get_source(Binary *mpsp)
+{
+ MatchProg *prog = Binary2MatchProg(mpsp);
+ return prog->saved_program;
+}
+
+/* This one is for the tracing */
+Binary *erts_match_set_compile(Process *p, Eterm matchexpr) {
+ Binary *bin;
+ Uint sz;
+ Eterm *hp;
+
+ bin = db_match_set_compile(p, matchexpr, DCOMP_TRACE);
+ if (bin != NULL) {
+ MatchProg *prog = Binary2MatchProg(bin);
+ sz = size_object(matchexpr);
+ prog->saved_program_buf = new_message_buffer(sz);
+ hp = prog->saved_program_buf->mem;
+ prog->saved_program =
+ copy_struct(matchexpr, sz, &hp,
+ &(prog->saved_program_buf->off_heap));
+ }
+ return bin;
+}
+
+Binary *db_match_set_compile(Process *p, Eterm matchexpr,
+ Uint flags)
+{
+ Eterm l;
+ Eterm t;
+ Eterm l2;
+ Eterm *tp;
+ Eterm *hp;
+ int n = 0;
+ int num_heads;
+ int i;
+ Binary *mps = NULL;
+ int compiled = 0;
+ Eterm *matches,*guards, *bodies;
+ Eterm *buff;
+ Eterm sbuff[15];
+
+ if (!is_list(matchexpr))
+ return NULL;
+ num_heads = 0;
+ for (l = matchexpr; is_list(l); l = CDR(list_val(l)))
+ ++num_heads;
+
+ if (l != NIL) /* proper list... */
+ return NULL;
+
+ if (num_heads > 5) {
+ buff = erts_alloc(ERTS_ALC_T_DB_TMP,
+ sizeof(Eterm) * num_heads * 3);
+ } else {
+ buff = sbuff;
+ }
+
+ matches = buff;
+ guards = buff + num_heads;
+ bodies = buff + (num_heads * 2);
+
+ i = 0;
+ for (l = matchexpr; is_list(l); l = CDR(list_val(l))) {
+ t = CAR(list_val(l));
+ if (!is_tuple(t) || arityval((tp = tuple_val(t))[0]) != 3) {
+ goto error;
+ }
+ if (!(flags & DCOMP_TRACE) || (!is_list(tp[1]) &&
+ !is_nil(tp[1]))) {
+ t = tp[1];
+ } else {
+ /* This is when tracing, the parameter is a list,
+ that I convert to a tuple and that is matched
+ against an array (strange, but gives the semantics
+ of matching against a parameter list) */
+ n = 0;
+ for (l2 = tp[1]; is_list(l2); l2 = CDR(list_val(l2))) {
+ ++n;
+ }
+ if (l2 != NIL) {
+ goto error;
+ }
+ hp = HAlloc(p, n + 1);
+ t = make_tuple(hp);
+ *hp++ = make_arityval((Uint) n);
+ l2 = tp[1];
+ while (n--) {
+ *hp++ = CAR(list_val(l2));
+ l2 = CDR(list_val(l2));
+ }
+ }
+ matches[i] = t;
+ guards[i] = tp[2];
+ bodies[i] = tp[3];
+ ++i;
+ }
+ if ((mps = db_match_compile(matches, guards, bodies,
+ num_heads,
+ flags,
+ NULL)) == NULL) {
+ goto error;
+ }
+ compiled = 1;
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return mps;
+
+error:
+ if (compiled) {
+ erts_bin_free(mps);
+ }
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return NULL;
+}
+
+/* This is used when tracing */
+Eterm erts_match_set_lint(Process *p, Eterm matchexpr) {
+ return db_match_set_lint(p, matchexpr, DCOMP_TRACE);
+}
+
+Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags)
+{
+ Eterm l;
+ Eterm t;
+ Eterm l2;
+ Eterm *tp;
+ Eterm *hp;
+ DMCErrInfo *err_info = db_new_dmc_err_info();
+ Eterm ret;
+ int n = 0;
+ int num_heads;
+ Binary *mp;
+ Eterm *matches,*guards, *bodies;
+ Eterm sbuff[15];
+ Eterm *buff = sbuff;
+ int i;
+
+ if (!is_list(matchexpr)) {
+ add_dmc_err(err_info, "Match programs are not in a list.",
+ -1, 0UL, dmcError);
+ goto done;
+ }
+ num_heads = 0;
+ for (l = matchexpr; is_list(l); l = CDR(list_val(l)))
+ ++num_heads;
+
+ if (l != NIL) { /* proper list... */
+ add_dmc_err(err_info, "Match programs are not in a proper "
+ "list.",
+ -1, 0UL, dmcError);
+ goto done;
+ }
+
+ if (num_heads > 5) {
+ buff = erts_alloc(ERTS_ALC_T_DB_TMP,
+ sizeof(Eterm) * num_heads * 3);
+ }
+
+ matches = buff;
+ guards = buff + num_heads;
+ bodies = buff + (num_heads * 2);
+
+ i = 0;
+ for (l = matchexpr; is_list(l); l = CDR(list_val(l))) {
+ t = CAR(list_val(l));
+ if (!is_tuple(t) || arityval((tp = tuple_val(t))[0]) != 3) {
+ add_dmc_err(err_info,
+ "Match program part is not a tuple of "
+ "arity 3.",
+ -1, 0UL, dmcError);
+ goto done;
+ }
+ if (!(flags & DCOMP_TRACE) || (!is_list(tp[1]) &&
+ !is_nil(tp[1]))) {
+ t = tp[1];
+ } else {
+ n = 0;
+ for (l2 = tp[1]; is_list(l2); l2 = CDR(list_val(l2))) {
+ ++n;
+ }
+ if (l2 != NIL) {
+ add_dmc_err(err_info,
+ "Match expression part %T is not a "
+ "proper list.",
+ -1, tp[1], dmcError);
+
+ goto done;
+ }
+ hp = HAlloc(p, n + 1);
+ t = make_tuple(hp);
+ *hp++ = make_arityval((Uint) n);
+ l2 = tp[1];
+ while (n--) {
+ *hp++ = CAR(list_val(l2));
+ l2 = CDR(list_val(l2));
+ }
+ }
+ matches[i] = t;
+ guards[i] = tp[2];
+ bodies[i] = tp[3];
+ ++i;
+ }
+ mp = db_match_compile(matches, guards, bodies, num_heads,
+ flags, err_info);
+ if (mp != NULL) {
+ erts_bin_free(mp);
+ }
+done:
+ ret = db_format_dmc_err_info(p, err_info);
+ db_free_dmc_err_info(err_info);
+ if (buff != sbuff) {
+ erts_free(ERTS_ALC_T_DB_TMP, buff);
+ }
+ return ret;
+}
+
+Eterm erts_match_set_run(Process *p, Binary *mpsp,
+ Eterm *args, int num_args,
+ Uint32 *return_flags)
+{
+ Eterm ret;
+
+ ret = db_prog_match(p, mpsp,
+ (Eterm) args,
+ num_args, return_flags);
+#if defined(HARDDEBUG)
+ if (is_non_value(ret)) {
+ erts_fprintf(stderr, "Failed\n");
+ } else {
+ erts_fprintf(stderr, "Returning : %T\n", ret);
+ }
+#endif
+ return ret;
+ /* Returns
+ * THE_NON_VALUE if no match
+ * am_false if {message,false} has been called,
+ * am_true if {message,_} has not been called or
+ * if {message,true} has been called,
+ * Msg if {message,Msg} has been called.
+ */
+}
+
+/*
+** API Used by other erl_db modules.
+*/
+
+void db_initialize_util(void){
+ qsort(guard_tab,
+ sizeof(guard_tab) / sizeof(DMCGuardBif),
+ sizeof(DMCGuardBif),
+ (int (*)(const void *, const void *)) &cmp_guard_bif);
+ match_pseudo_process_init();
+ erts_smp_atomic_init(&trace_control_word, 0);
+}
+
+
+
+Eterm db_getkey(int keypos, Eterm obj)
+{
+ if (is_tuple(obj)) {
+ Eterm *tptr = tuple_val(obj);
+ if (arityval(*tptr) >= keypos)
+ return *(tptr + keypos);
+ }
+ return THE_NON_VALUE;
+}
+
+/*
+** Matching compiled (executed by "Pam" :-)
+*/
+
+/*
+** The actual compiling of the match expression and the guards
+*/
+Binary *db_match_compile(Eterm *matchexpr,
+ Eterm *guards,
+ Eterm *body,
+ int num_progs,
+ Uint flags,
+ DMCErrInfo *err_info)
+{
+ DMCHeap heap;
+ DMC_STACK_TYPE(Eterm) stack;
+ DMC_STACK_TYPE(Uint) text;
+ DMCContext context;
+ MatchProg *ret = NULL;
+ Eterm t;
+ Uint i;
+ Uint num_iters;
+ int structure_checked;
+ DMCRet res;
+ int current_try_label;
+ Uint max_eheap_need;
+ Binary *bp = NULL;
+ unsigned clause_start;
+
+ DMC_INIT_STACK(stack);
+ DMC_INIT_STACK(text);
+
+ context.stack_need = context.stack_used = 0;
+ context.save = context.copy = NULL;
+ context.num_match = num_progs;
+ context.matchexpr = matchexpr;
+ context.guardexpr = guards;
+ context.bodyexpr = body;
+ context.eheap_need = 0;
+ context.err_info = err_info;
+ context.cflags = flags;
+
+ heap.size = DMC_DEFAULT_SIZE;
+ heap.data = heap.def;
+
+ /*
+ ** Compile the match expression
+ */
+restart:
+ heap.used = 0;
+ max_eheap_need = 0;
+ for (context.current_match = 0;
+ context.current_match < num_progs;
+ ++context.current_match) { /* This loop is long,
+ too long */
+ memset(heap.data, 0, heap.size * sizeof(*heap.data));
+ t = context.matchexpr[context.current_match];
+ context.stack_used = 0;
+ context.eheap_need = 0;
+ structure_checked = 0;
+ if (context.current_match < num_progs - 1) {
+ DMC_PUSH(text,matchTryMeElse);
+ current_try_label = DMC_STACK_NUM(text);
+ DMC_PUSH(text,0);
+ } else {
+ current_try_label = -1;
+ }
+ clause_start = DMC_STACK_NUM(text); /* the "special" test needs it */
+ DMC_PUSH(stack,NIL);
+ for (;;) {
+ switch (t & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_BOXED:
+ if (!BOXED_IS_TUPLE(t)) {
+ goto simple_term;
+ }
+ num_iters = arityval(*tuple_val(t));
+ if (!structure_checked) { /* i.e. we did not
+ pop it */
+ DMC_PUSH(text,matchTuple);
+ DMC_PUSH(text,num_iters);
+ }
+ structure_checked = 0;
+ for (i = 1; i <= num_iters; ++i) {
+ if ((res = dmc_one_term(&context,
+ &heap,
+ &stack,
+ &text,
+ tuple_val(t)[i]))
+ != retOk) {
+ if (res == retRestart) {
+ goto restart; /* restart the
+ surrounding
+ loop */
+ } else goto error;
+ }
+ }
+ break;
+ case TAG_PRIMARY_LIST:
+ if (!structure_checked) {
+ DMC_PUSH(text, matchList);
+ }
+ structure_checked = 0; /* Whatever it is, we did
+ not pop it */
+ if ((res = dmc_one_term(&context, &heap, &stack,
+ &text, CAR(list_val(t))))
+ != retOk) {
+ if (res == retRestart) {
+ goto restart;
+ } else goto error;
+ }
+ t = CDR(list_val(t));
+ continue;
+ default: /* Nil and non proper tail end's or
+ single terms as match
+ expressions */
+ simple_term:
+ structure_checked = 0;
+ if ((res = dmc_one_term(&context, &heap, &stack,
+ &text, t))
+ != retOk) {
+ if (res == retRestart) {
+ goto restart;
+ } else goto error;
+ }
+ break;
+ }
+
+ /* The *program's* stack just *grows* while we are
+ traversing one composite data structure, we can
+ check the stack usage here */
+
+ if (context.stack_used > context.stack_need)
+ context.stack_need = context.stack_used;
+
+ /* We are at the end of one composite data structure,
+ pop sub structures and emit a matchPop instruction
+ (or break) */
+ if ((t = DMC_POP(stack)) == NIL) {
+ break;
+ } else {
+ DMC_PUSH(text, matchPop);
+ structure_checked = 1; /*
+ * Checked with matchPushT
+ * or matchPushL
+ */
+ --(context.stack_used);
+ }
+ }
+
+ /*
+ ** There is one single top variable in the match expression
+ ** iff the text is tho Uint's and the single instruction
+ ** is 'matchBind' or it is only a skip.
+ */
+ context.special =
+ (DMC_STACK_NUM(text) == 2 + clause_start &&
+ DMC_PEEK(text,clause_start) == matchBind) ||
+ (DMC_STACK_NUM(text) == 1 + clause_start &&
+ DMC_PEEK(text, clause_start) == matchSkip);
+
+ if (flags & DCOMP_TRACE) {
+ if (context.special) {
+ if (DMC_PEEK(text, clause_start) == matchBind) {
+ DMC_POKE(text, clause_start, matchArrayBind);
+ }
+ } else {
+ ASSERT(DMC_STACK_NUM(text) >= 1);
+ if (DMC_PEEK(text, clause_start) != matchTuple) {
+ /* If it isn't "special" and the argument is
+ not a tuple, the expression is not valid
+ when matching an array*/
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Match head is invalid in "
+ "this context.",
+ -1, 0UL,
+ dmcError);
+ }
+ goto error;
+ }
+ DMC_POKE(text, clause_start, matchArray);
+ }
+ }
+
+
+ /*
+ ** ... and the guards
+ */
+ context.is_guard = 1;
+ if (compile_guard_expr
+ (&context,
+ &heap,
+ &text,
+ context.guardexpr[context.current_match]) != retOk)
+ goto error;
+ context.is_guard = 0;
+ if ((context.cflags & DCOMP_TABLE) &&
+ !is_list(context.bodyexpr[context.current_match])) {
+ if (context.err_info) {
+ add_dmc_err(context.err_info,
+ "Body clause does not return "
+ "anything.", -1, 0UL,
+ dmcError);
+ }
+ goto error;
+ }
+ if (compile_guard_expr
+ (&context,
+ &heap,
+ &text,
+ context.bodyexpr[context.current_match]) != retOk)
+ goto error;
+
+ /*
+ * The compilation does not bail out when error information
+ * is requested, so we need to detect that here...
+ */
+ if (context.err_info != NULL &&
+ (context.err_info)->error_added) {
+ goto error;
+ }
+
+
+ /* If the matchprogram comes here, the match is
+ successful */
+ DMC_PUSH(text,matchHalt);
+ /* Fill in try-me-else label if there is one. */
+ if (current_try_label >= 0) {
+ DMC_POKE(text, current_try_label, DMC_STACK_NUM(text));
+ }
+ /* So, how much eheap did this part of the match program need? */
+ if (context.eheap_need > max_eheap_need) {
+ max_eheap_need = context.eheap_need;
+ }
+ } /* for (context.current_match = 0 ...) */
+
+
+ /*
+ ** Done compiling
+ ** Allocate enough space for the program,
+ ** heap size is in 'heap_used', stack size is in 'stack_need'
+ ** and text size is simply DMC_STACK_NUM(text).
+ ** The "program memory" is allocated like this:
+ ** text ----> +-------------+
+ ** | |
+ ** ..........
+ ** +-------------+
+ **
+ ** The heap-eheap-stack block of a MatchProg is nowadays allocated
+ ** when the match program is run (see db_prog_match()).
+ **
+ ** heap ----> +-------------+
+ ** ..........
+ ** eheap ---> + +
+ ** ..........
+ ** stack ---> + +
+ ** ..........
+ ** +-------------+
+ ** The stack is expected to grow towards *higher* adresses.
+ ** A special case is when the match expression is a single binding
+ ** (i.e '$1'), then the field single_variable is set to 1.
+ */
+ bp = erts_create_magic_binary(((sizeof(MatchProg) - sizeof(Uint)) +
+ (DMC_STACK_NUM(text) * sizeof(Uint))),
+ erts_db_match_prog_destructor);
+ ret = Binary2MatchProg(bp);
+ ret->saved_program_buf = NULL;
+ ret->saved_program = NIL;
+ ret->term_save = context.save;
+ ret->num_bindings = heap.used;
+ ret->single_variable = context.special;
+ sys_memcpy(ret->text, DMC_STACK_DATA(text),
+ DMC_STACK_NUM(text) * sizeof(Uint));
+ ret->heap_size = ((heap.used * sizeof(Eterm)) +
+ (max_eheap_need * sizeof(Eterm)) +
+ (context.stack_need * sizeof(Eterm *)) +
+ (3 * (FENCE_PATTERN_SIZE * sizeof(Eterm *))));
+ ret->eheap_offset = heap.used + FENCE_PATTERN_SIZE;
+ ret->stack_offset = ret->eheap_offset + max_eheap_need + FENCE_PATTERN_SIZE;
+#ifdef DMC_DEBUG
+ ret->prog_end = ret->text + DMC_STACK_NUM(text);
+#endif
+
+ /*
+ * Fall through to cleanup code, but context.save should not be free'd
+ */
+ context.save = NULL;
+error: /* Here is were we land when compilation failed. */
+ while (context.save != NULL) {
+ ErlHeapFragment *ll = context.save->next;
+ free_message_buffer(context.save);
+ context.save = ll;
+ }
+ DMC_FREE(stack);
+ DMC_FREE(text);
+ if (context.copy != NULL)
+ free_message_buffer(context.copy);
+ if (heap.data != heap.def)
+ erts_free(ERTS_ALC_T_DB_MS_CMPL_HEAP, (void *) heap.data);
+ return bp;
+}
+
+/*
+** Free a match program (in a binary)
+*/
+void erts_db_match_prog_destructor(Binary *bprog)
+{
+ MatchProg *prog;
+ ErlHeapFragment *tmp, *ll;
+ if (bprog == NULL)
+ return;
+ prog = Binary2MatchProg(bprog);
+ tmp = prog->term_save;
+ while (tmp != NULL) {
+ ll = tmp->next;
+ free_message_buffer(tmp);
+ tmp = ll;
+ }
+ if (prog->saved_program_buf != NULL)
+ free_message_buffer(prog->saved_program_buf);
+}
+
+void
+erts_match_prog_foreach_offheap(Binary *bprog,
+ void (*func)(ErlOffHeap *, void *),
+ void *arg)
+{
+ MatchProg *prog;
+ ErlHeapFragment *tmp;
+ if (bprog == NULL)
+ return;
+ prog = Binary2MatchProg(bprog);
+ tmp = prog->term_save;
+ while (tmp) {
+ (*func)(&(tmp->off_heap), arg);
+ tmp = tmp->next;
+ }
+ if (prog->saved_program_buf)
+ (*func)(&(prog->saved_program_buf->off_heap), arg);
+}
+
+/*
+** This is not the most efficient way to do it, but it's a rare
+** and not especially nice case when this is used.
+*/
+static Eterm dpm_array_to_list(Process *psp, Eterm *arr, int arity)
+{
+ Eterm *hp = HAlloc(psp, arity * 2);
+ Eterm ret = NIL;
+ while (--arity >= 0) {
+ ret = CONS(hp, arr[arity], ret);
+ hp += 2;
+ }
+ return ret;
+}
+/*
+** Execution of the match program, this is Pam.
+** May return THE_NON_VALUE, which is a bailout.
+** the para meter 'arity' is only used if 'term' is actually an array,
+** i.e. 'DCOMP_TRACE' was specified
+*/
+Eterm db_prog_match(Process *c_p, Binary *bprog, Eterm term,
+ int arity,
+ Uint32 *return_flags)
+{
+ MatchProg *prog = Binary2MatchProg(bprog);
+ Eterm *ep;
+ Eterm *tp;
+ Eterm t;
+ Eterm **sp;
+ Eterm *esp;
+ Eterm *hp;
+ Uint *pc = prog->text;
+ Eterm *ehp;
+ Eterm ret;
+ Uint n = 0; /* To avoid warning. */
+ int i;
+ unsigned do_catch;
+ ErtsMatchPseudoProcess *mpsp;
+ Process *psp;
+ Process *tmpp;
+ Process *current_scheduled;
+ ErtsSchedulerData *esdp;
+ Eterm (*bif)(Process*, ...);
+ int fail_label;
+ int atomic_trace;
+#ifdef DMC_DEBUG
+ unsigned long *heap_fence;
+ unsigned long *eheap_fence;
+ unsigned long *stack_fence;
+ Uint save_op;
+#endif /* DMC_DEBUG */
+
+ mpsp = get_match_pseudo_process(c_p, prog->heap_size);
+ psp = &mpsp->process;
+
+ /* We need to lure the scheduler into believing in the pseudo process,
+ because of floating point exceptions. Do *after* mpsp is set!!! */
+
+ esdp = ERTS_GET_SCHEDULER_DATA_FROM_PROC(c_p);
+ ASSERT(esdp != NULL);
+ current_scheduled = esdp->current_process;
+ esdp->current_process = psp;
+ /* SMP: psp->scheduler_data is set by get_match_pseudo_process */
+
+ atomic_trace = 0;
+#define BEGIN_ATOMIC_TRACE(p) \
+ do { \
+ if (! atomic_trace) { \
+ erts_smp_proc_unlock((p), ERTS_PROC_LOCK_MAIN); \
+ erts_smp_block_system(0); \
+ atomic_trace = !0; \
+ } \
+ } while (0)
+#define END_ATOMIC_TRACE(p) \
+ do { \
+ if (atomic_trace) { \
+ erts_smp_release_system(); \
+ erts_smp_proc_lock((p), ERTS_PROC_LOCK_MAIN); \
+ atomic_trace = 0; \
+ } \
+ } while (0)
+
+#ifdef DMC_DEBUG
+ save_op = 0;
+ heap_fence = (unsigned long *) mpsp->heap + prog->eheap_offset - 1;
+ eheap_fence = (unsigned long *) mpsp->heap + prog->stack_offset - 1;
+ stack_fence = (unsigned long *) mpsp->heap + prog->heap_size - 1;
+ *heap_fence = FENCE_PATTERN;
+ *eheap_fence = FENCE_PATTERN;
+ *stack_fence = FENCE_PATTERN;
+#endif /* DMC_DEBUG */
+
+#ifdef HARDDEBUG
+#define FAIL() {erts_printf("Fail line %d\n",__LINE__); goto fail;}
+#else
+#define FAIL() goto fail
+#endif
+#define FAIL_TERM am_EXIT /* The term to set as return when bif fails and
+ do_catch != 0 */
+
+ *return_flags = 0U;
+
+restart:
+ ep = &term;
+ esp = mpsp->heap + prog->stack_offset;
+ sp = (Eterm **) esp;
+ hp = mpsp->heap;
+ ehp = mpsp->heap + prog->eheap_offset;
+ ret = am_true;
+ do_catch = 0;
+ fail_label = -1;
+
+ for (;;) {
+#ifdef DMC_DEBUG
+ if (*heap_fence != FENCE_PATTERN) {
+ erl_exit(1, "Heap fence overwritten in db_prog_match after op "
+ "0x%08x, overwritten with 0x%08x.", save_op, *heap_fence);
+ }
+ if (*eheap_fence != FENCE_PATTERN) {
+ erl_exit(1, "Eheap fence overwritten in db_prog_match after op "
+ "0x%08x, overwritten with 0x%08x.", save_op,
+ *eheap_fence);
+ }
+ if (*stack_fence != FENCE_PATTERN) {
+ erl_exit(1, "Stack fence overwritten in db_prog_match after op "
+ "0x%08x, overwritten with 0x%08x.", save_op,
+ *stack_fence);
+ }
+ save_op = *pc;
+#endif
+ switch (*pc++) {
+ case matchTryMeElse:
+ fail_label = *pc++;
+ break;
+ case matchArray: /* only when DCOMP_TRACE, is always first
+ instruction. */
+ n = *pc++;
+ if ((int) n != arity)
+ FAIL();
+ ep = (Eterm *) *ep;
+ break;
+ case matchArrayBind: /* When the array size is unknown. */
+ n = *pc++;
+ hp[n] = dpm_array_to_list(psp, (Eterm *) term, arity);
+ break;
+ case matchTuple: /* *ep is a tuple of arity n */
+ if (!is_tuple(*ep))
+ FAIL();
+ ep = tuple_val(*ep);
+ n = *pc++;
+ if (arityval(*ep) != n)
+ FAIL();
+ ++ep;
+ break;
+ case matchPushT: /* *ep is a tuple of arity n,
+ push ptr to first element */
+ if (!is_tuple(*ep))
+ FAIL();
+ tp = tuple_val(*ep);
+ n = *pc++;
+ if (arityval(*tp) != n)
+ FAIL();
+ *sp++ = tp + 1;
+ ++ep;
+ break;
+ case matchList:
+ if (!is_list(*ep))
+ FAIL();
+ ep = list_val(*ep);
+ break;
+ case matchPushL:
+ if (!is_list(*ep))
+ FAIL();
+ *sp++ = list_val(*ep);
+ ++ep;
+ break;
+ case matchPop:
+ ep = *(--sp);
+ break;
+ case matchBind:
+ n = *pc++;
+ hp[n] = *ep++;
+ break;
+ case matchCmp:
+ n = *pc++;
+ if (!eq(hp[n],*ep))
+ FAIL();
+ ++ep;
+ break;
+ case matchEqBin:
+ t = (Eterm) *pc++;
+ if (!eq(*ep,t))
+ FAIL();
+ ++ep;
+ break;
+ case matchEqFloat:
+ if (!is_float(*ep))
+ FAIL();
+ if (memcmp(float_val(*ep) + 1, pc, sizeof(double)))
+ FAIL();
+ pc += 2;
+ ++ep;
+ break;
+ case matchEqRef:
+ if (!is_ref(*ep))
+ FAIL();
+ if (!eq(*ep, make_internal_ref(pc)))
+ FAIL();
+ i = thing_arityval(*pc);
+ pc += i+1;
+ ++ep;
+ break;
+ case matchEqBig:
+ if (!is_big(*ep))
+ FAIL();
+ tp = big_val(*ep);
+ if (*tp != *pc)
+ FAIL();
+ i = BIG_ARITY(pc);
+ while(i--)
+ if (*++tp != *++pc)
+ FAIL();
+ ++pc;
+ ++ep;
+ break;
+ case matchEq:
+ t = (Eterm) *pc++;
+ if (t != *ep++)
+ FAIL();
+ break;
+ case matchSkip:
+ ++ep;
+ break;
+ /*
+ * Here comes guard instructions
+ */
+ case matchPushC: /* Push constant */
+ *esp++ = *pc++;
+ break;
+ case matchConsA:
+ ehp[1] = *--esp;
+ ehp[0] = esp[-1];
+ esp[-1] = make_list(ehp);
+ ehp += 2;
+ break;
+ case matchConsB:
+ ehp[0] = *--esp;
+ ehp[1] = esp[-1];
+ esp[-1] = make_list(ehp);
+ ehp += 2;
+ break;
+ case matchMkTuple:
+ n = *pc++;
+ t = make_tuple(ehp);
+ *ehp++ = make_arityval(n);
+ while (n--) {
+ *ehp++ = *--esp;
+ }
+ *esp++ = t;
+ break;
+ case matchCall0:
+ bif = (Eterm (*)(Process*, ...)) *pc++;
+ t = (*bif)(psp);
+ if (is_non_value(t)) {
+ if (do_catch)
+ t = FAIL_TERM;
+ else
+ FAIL();
+ }
+ *esp++ = t;
+ break;
+ case matchCall1:
+ bif = (Eterm (*)(Process*, ...)) *pc++;
+ t = (*bif)(psp, esp[-1]);
+ if (is_non_value(t)) {
+ if (do_catch)
+ t = FAIL_TERM;
+ else
+ FAIL();
+ }
+ esp[-1] = t;
+ break;
+ case matchCall2:
+ bif = (Eterm (*)(Process*, ...)) *pc++;
+ t = (*bif)(psp, esp[-1], esp[-2]);
+ if (is_non_value(t)) {
+ if (do_catch)
+ t = FAIL_TERM;
+ else
+ FAIL();
+ }
+ --esp;
+ esp[-1] = t;
+ break;
+ case matchCall3:
+ bif = (Eterm (*)(Process*, ...)) *pc++;
+ t = (*bif)(psp, esp[-1], esp[-2], esp[-3]);
+ if (is_non_value(t)) {
+ if (do_catch)
+ t = FAIL_TERM;
+ else
+ FAIL();
+ }
+ esp -= 2;
+ esp[-1] = t;
+ break;
+ case matchPushV:
+ *esp++ = hp[*pc++];
+ break;
+ case matchPushExpr:
+ *esp++ = term;
+ break;
+ case matchPushArrayAsList:
+ n = arity; /* Only happens when 'term' is an array */
+ tp = (Eterm *) term;
+ *esp++ = make_list(ehp);
+ while (n--) {
+ *ehp++ = *tp++;
+ *ehp = make_list(ehp + 1);
+ ehp++; /* As pointed out by Mikael Pettersson the expression
+ (*ehp++ = make_list(ehp + 1)) that I previously
+ had written here has undefined behaviour. */
+ }
+ ehp[-1] = NIL;
+ break;
+ case matchPushArrayAsListU:
+ /* This instruction is NOT efficient. */
+ *esp++ = dpm_array_to_list(psp, (Eterm *) term, arity);
+ break;
+ case matchTrue:
+ if (*--esp != am_true)
+ FAIL();
+ break;
+ case matchOr:
+ n = *pc++;
+ t = am_false;
+ while (n--) {
+ if (*--esp == am_true) {
+ t = am_true;
+ } else if (*esp != am_false) {
+ esp -= n;
+ if (do_catch) {
+ t = FAIL_TERM;
+ break;
+ } else {
+ FAIL();
+ }
+ }
+ }
+ *esp++ = t;
+ break;
+ case matchAnd:
+ n = *pc++;
+ t = am_true;
+ while (n--) {
+ if (*--esp == am_false) {
+ t = am_false;
+ } else if (*esp != am_true) {
+ esp -= n;
+ if (do_catch) {
+ t = FAIL_TERM;
+ break;
+ } else {
+ FAIL();
+ }
+ }
+ }
+ *esp++ = t;
+ break;
+ case matchOrElse:
+ n = *pc++;
+ if (*--esp == am_true) {
+ ++esp;
+ pc += n;
+ } else if (*esp != am_false) {
+ if (do_catch) {
+ *esp++ = FAIL_TERM;
+ pc += n;
+ } else {
+ FAIL();
+ }
+ }
+ break;
+ case matchAndAlso:
+ n = *pc++;
+ if (*--esp == am_false) {
+ esp++;
+ pc += n;
+ } else if (*esp != am_true) {
+ if (do_catch) {
+ *esp++ = FAIL_TERM;
+ pc += n;
+ } else {
+ FAIL();
+ }
+ }
+ break;
+ case matchJump:
+ n = *pc++;
+ pc += n;
+ break;
+ case matchSelf:
+ *esp++ = c_p->id;
+ break;
+ case matchWaste:
+ --esp;
+ break;
+ case matchReturn:
+ ret = *--esp;
+ break;
+ case matchProcessDump: {
+ erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
+ print_process_info(ERTS_PRINT_DSBUF, (void *) dsbufp, c_p);
+ *esp++ = new_binary(psp, (byte *)dsbufp->str, (int)dsbufp->str_len);
+ erts_destroy_tmp_dsbuf(dsbufp);
+ break;
+ }
+ case matchDisplay: /* Debugging, not for production! */
+ erts_printf("%T\n", esp[-1]);
+ esp[-1] = am_true;
+ break;
+ case matchSetReturnTrace:
+ *return_flags |= MATCH_SET_RETURN_TRACE;
+ *esp++ = am_true;
+ break;
+ case matchSetExceptionTrace:
+ *return_flags |= MATCH_SET_EXCEPTION_TRACE;
+ *esp++ = am_true;
+ break;
+ case matchIsSeqTrace:
+ if (SEQ_TRACE_TOKEN(c_p) != NIL)
+ *esp++ = am_true;
+ else
+ *esp++ = am_false;
+ break;
+ case matchSetSeqToken:
+ t = erts_seq_trace(c_p, esp[-1], esp[-2], 0);
+ if (is_non_value(t)) {
+ esp[-2] = FAIL_TERM;
+ } else {
+ esp[-2] = t;
+ }
+ --esp;
+ break;
+ case matchSetSeqTokenFake:
+ t = seq_trace_fake(c_p, esp[-1]);
+ if (is_non_value(t)) {
+ esp[-2] = FAIL_TERM;
+ } else {
+ esp[-2] = t;
+ }
+ --esp;
+ break;
+ case matchGetSeqToken:
+ if (SEQ_TRACE_TOKEN(c_p) == NIL)
+ *esp++ = NIL;
+ else {
+ *esp++ = make_tuple(ehp);
+ ehp[0] = make_arityval(5);
+ ehp[1] = SEQ_TRACE_TOKEN_FLAGS(c_p);
+ ehp[2] = SEQ_TRACE_TOKEN_LABEL(c_p);
+ ehp[3] = SEQ_TRACE_TOKEN_SERIAL(c_p);
+ ehp[4] = SEQ_TRACE_TOKEN_SENDER(c_p);
+ ehp[5] = SEQ_TRACE_TOKEN_LASTCNT(c_p);
+ ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5);
+ ASSERT(is_immed(ehp[1]));
+ ASSERT(is_immed(ehp[2]));
+ ASSERT(is_immed(ehp[3]));
+ ASSERT(is_immed(ehp[5]));
+ if(!is_immed(ehp[4])) {
+ Eterm *sender = &ehp[4];
+ ehp += 6;
+ *sender = copy_struct(*sender,
+ size_object(*sender),
+ &ehp,
+ &MSO(psp));
+ }
+ else
+ ehp += 6;
+
+ }
+ break;
+ case matchEnableTrace:
+ if ( (n = erts_trace_flag2bit(esp[-1]))) {
+ BEGIN_ATOMIC_TRACE(c_p);
+ set_tracee_flags(c_p, c_p->tracer_proc, 0, n);
+ esp[-1] = am_true;
+ } else {
+ esp[-1] = FAIL_TERM;
+ }
+ break;
+ case matchEnableTrace2:
+ n = erts_trace_flag2bit((--esp)[-1]);
+ esp[-1] = FAIL_TERM;
+ if (n) {
+ BEGIN_ATOMIC_TRACE(c_p);
+ if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
+ /* Always take over the tracer of the current process */
+ set_tracee_flags(tmpp, c_p->tracer_proc, 0, n);
+ esp[-1] = am_true;
+ }
+ }
+ break;
+ case matchDisableTrace:
+ if ( (n = erts_trace_flag2bit(esp[-1]))) {
+ BEGIN_ATOMIC_TRACE(c_p);
+ set_tracee_flags(c_p, c_p->tracer_proc, n, 0);
+ esp[-1] = am_true;
+ } else {
+ esp[-1] = FAIL_TERM;
+ }
+ break;
+ case matchDisableTrace2:
+ n = erts_trace_flag2bit((--esp)[-1]);
+ esp[-1] = FAIL_TERM;
+ if (n) {
+ BEGIN_ATOMIC_TRACE(c_p);
+ if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
+ /* Always take over the tracer of the current process */
+ set_tracee_flags(tmpp, c_p->tracer_proc, n, 0);
+ esp[-1] = am_true;
+ }
+ }
+ break;
+ case matchCaller:
+ if (!(c_p->cp) || !(hp = find_function_from_pc(c_p->cp))) {
+ *esp++ = am_undefined;
+ } else {
+ *esp++ = make_tuple(ehp);
+ ehp[0] = make_arityval(3);
+ ehp[1] = hp[0];
+ ehp[2] = hp[1];
+ ehp[3] = make_small(hp[2]);
+ ehp += 4;
+ }
+ break;
+ case matchSilent:
+ --esp;
+ if (*esp == am_true) {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ c_p->trace_flags |= F_TRACE_SILENT;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ else if (*esp == am_false) {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ c_p->trace_flags &= ~F_TRACE_SILENT;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ break;
+ case matchTrace2:
+ {
+ /* disable enable */
+ Uint d_flags = 0, e_flags = 0; /* process trace flags */
+ Eterm tracer = c_p->tracer_proc;
+ /* XXX Atomicity note: Not fully atomic. Default tracer
+ * is sampled from current process but applied to
+ * tracee and tracer later after releasing main
+ * locks on current process, so c_p->tracer_proc
+ * may actually have changed when tracee and tracer
+ * gets updated. I do not think nobody will notice.
+ * It is just the default value that is not fully atomic.
+ * and the real argument settable from match spec
+ * {trace,[],[{{tracer,Tracer}}]} is much, much older.
+ */
+ int cputs = 0;
+
+ if (! erts_trace_flags(esp[-1], &d_flags, &tracer, &cputs) ||
+ ! erts_trace_flags(esp[-2], &e_flags, &tracer, &cputs) ||
+ cputs ) {
+ (--esp)[-1] = FAIL_TERM;
+ break;
+ }
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ (--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer,
+ d_flags, e_flags);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ break;
+ case matchTrace3:
+ {
+ /* disable enable */
+ Uint d_flags = 0, e_flags = 0; /* process trace flags */
+ Eterm tracer = c_p->tracer_proc;
+ /* XXX Atomicity note. Not fully atomic. See above.
+ * Above it could possibly be solved, but not here.
+ */
+ int cputs = 0;
+ Eterm tracee = (--esp)[0];
+
+ if (! erts_trace_flags(esp[-1], &d_flags, &tracer, &cputs) ||
+ ! erts_trace_flags(esp[-2], &e_flags, &tracer, &cputs) ||
+ cputs ||
+ ! (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN,
+ tracee, ERTS_PROC_LOCKS_ALL))) {
+ (--esp)[-1] = FAIL_TERM;
+ break;
+ }
+ if (tmpp == c_p) {
+ (--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer,
+ d_flags, e_flags);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ } else {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ (--esp)[-1] = set_match_trace(tmpp, FAIL_TERM, tracer,
+ d_flags, e_flags);
+ erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ }
+ break;
+ case matchCatch:
+ do_catch = 1;
+ break;
+ case matchHalt:
+ goto success;
+ default:
+ erl_exit(1, "Internal error: unexpected opcode in match program.");
+ }
+ }
+fail:
+ *return_flags = 0U;
+ if (fail_label >= 0) { /* We failed during a "TryMeElse",
+ lets restart, with the next match
+ program */
+ pc = (prog->text) + fail_label;
+ cleanup_match_pseudo_process(mpsp, 1);
+ goto restart;
+ }
+ ret = THE_NON_VALUE;
+success:
+
+#ifdef DMC_DEBUG
+ if (*heap_fence != FENCE_PATTERN) {
+ erl_exit(1, "Heap fence overwritten in db_prog_match after op "
+ "0x%08x, overwritten with 0x%08x.", save_op, *heap_fence);
+ }
+ if (*eheap_fence != FENCE_PATTERN) {
+ erl_exit(1, "Eheap fence overwritten in db_prog_match after op "
+ "0x%08x, overwritten with 0x%08x.", save_op,
+ *eheap_fence);
+ }
+ if (*stack_fence != FENCE_PATTERN) {
+ erl_exit(1, "Stack fence overwritten in db_prog_match after op "
+ "0x%08x, overwritten with 0x%08x.", save_op,
+ *stack_fence);
+ }
+#endif
+
+ esdp->current_process = current_scheduled;
+
+ END_ATOMIC_TRACE(c_p);
+ return ret;
+#undef FAIL
+#undef FAIL_TERM
+#undef BEGIN_ATOMIC_TRACE
+#undef END_ATOMIC_TRACE
+}
+
+
+/*
+ * Convert a match program to a "magic" binary to return up to erlang
+ */
+Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp) {
+ return erts_mk_magic_binary_term(hpp, &MSO(p), mp);
+}
+
+DMCErrInfo *db_new_dmc_err_info(void)
+{
+ DMCErrInfo *ret = erts_alloc(ERTS_ALC_T_DB_DMC_ERR_INFO,
+ sizeof(DMCErrInfo));
+ ret->var_trans = NULL;
+ ret->num_trans = 0;
+ ret->error_added = 0;
+ ret->first = NULL;
+ return ret;
+}
+
+Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei)
+{
+ int sl;
+ int vnum;
+ DMCError *tmp;
+ Eterm *shp;
+ Eterm ret = NIL;
+ Eterm tlist, tpl, sev;
+ char buff[DMC_ERR_STR_LEN + 20 /* for the number */];
+
+ for (tmp = ei->first; tmp != NULL; tmp = tmp->next) {
+ if (tmp->variable >= 0 &&
+ tmp->variable < ei->num_trans &&
+ ei->var_trans != NULL) {
+ vnum = (int) ei->var_trans[tmp->variable];
+ } else {
+ vnum = tmp->variable;
+ }
+ if (vnum >= 0)
+ sprintf(buff,tmp->error_string, vnum);
+ else
+ strcpy(buff,tmp->error_string);
+ sl = strlen(buff);
+ shp = HAlloc(p, sl * 2 + 5);
+ sev = (tmp->severity == dmcWarning) ?
+ am_atom_put("warning",7) :
+ am_error;
+ tlist = buf_to_intlist(&shp, buff, sl, NIL);
+ tpl = TUPLE2(shp, sev, tlist);
+ shp += 3;
+ ret = CONS(shp, tpl, ret);
+ shp += 2;
+ }
+ return ret;
+}
+
+void db_free_dmc_err_info(DMCErrInfo *ei){
+ while (ei->first != NULL) {
+ DMCError *ll = ei->first->next;
+ erts_free(ERTS_ALC_T_DB_DMC_ERROR, ei->first);
+ ei->first = ll;
+ }
+ if (ei->var_trans)
+ erts_free(ERTS_ALC_T_DB_TRANS_TAB, ei->var_trans);
+ erts_free(ERTS_ALC_T_DB_DMC_ERR_INFO, ei);
+}
+
+/* Calculate integer addition: counter+incr.
+** Store bignum in *hpp and increase *hpp accordingly.
+** *hpp is assumed to be large enough to hold the result.
+*/
+Eterm db_add_counter(Eterm** hpp, Eterm counter, Eterm incr)
+{
+ Eterm big_tmp[2];
+ Eterm res;
+ Sint ires;
+ Eterm arg1;
+ Eterm arg2;
+
+ if (is_both_small(counter,incr)) {
+ ires = signed_val(counter) + signed_val(incr);
+ if (IS_SSMALL(ires)) {
+ return make_small(ires);
+ } else {
+ res = small_to_big(ires, *hpp);
+ ASSERT(BIG_NEED_SIZE(big_size(res))==2);
+ *hpp += 2;
+ return res;
+ }
+ }
+ else {
+ switch(NUMBER_CODE(counter, incr)) {
+ case SMALL_BIG:
+ arg1 = small_to_big(signed_val(counter), big_tmp);
+ arg2 = incr;
+ break;
+ case BIG_SMALL:
+ arg1 = counter;
+ arg2 = small_to_big(signed_val(incr), big_tmp);
+ break;
+ case BIG_BIG:
+ arg1 = incr;
+ arg2 = counter;
+ break;
+ default:
+ return THE_NON_VALUE;
+ }
+ res = big_plus(arg1, arg2, *hpp);
+ if (is_big(res)) {
+ *hpp += BIG_NEED_SIZE(big_size(res));
+ }
+ return res;
+ }
+}
+
+/*
+** Update one element:
+** handle: Initialized by db_lookup_dbterm()
+** position: The tuple position of the elements to be updated.
+** newval: The new value of the element.
+** Can not fail.
+*/
+void db_do_update_element(DbUpdateHandle* handle,
+ Sint position,
+ Eterm newval)
+{
+ Eterm oldval = handle->dbterm->tpl[position];
+ Eterm* newp;
+ Eterm* oldp;
+ Uint newval_sz;
+ Uint oldval_sz;
+
+ if (is_both_immed(newval,oldval)) {
+ handle->dbterm->tpl[position] = newval;
+ return;
+ }
+ else if (!handle->mustResize && is_boxed(newval)) {
+ newp = boxed_val(newval);
+ switch (*newp & _TAG_HEADER_MASK) {
+ case _TAG_HEADER_POS_BIG:
+ case _TAG_HEADER_NEG_BIG:
+ case _TAG_HEADER_FLOAT:
+ case _TAG_HEADER_HEAP_BIN:
+ newval_sz = header_arity(*newp) + 1;
+ if (is_boxed(oldval)) {
+ oldp = boxed_val(oldval);
+ switch (*oldp & _TAG_HEADER_MASK) {
+ case _TAG_HEADER_POS_BIG:
+ case _TAG_HEADER_NEG_BIG:
+ case _TAG_HEADER_FLOAT:
+ case _TAG_HEADER_HEAP_BIN:
+ oldval_sz = header_arity(*oldp) + 1;
+ if (oldval_sz == newval_sz) {
+ /* "self contained" terms of same size, do memcpy */
+ sys_memcpy(oldp, newp, newval_sz*sizeof(Eterm));
+ return;
+ }
+ goto both_size_set;
+ }
+ }
+ goto new_size_set;
+ }
+ }
+ /* Not possible for simple memcpy or dbterm is already non-contiguous, */
+ /* need to realloc... */
+
+ newval_sz = is_immed(newval) ? 0 : size_object(newval);
+new_size_set:
+
+ oldval_sz = is_immed(oldval) ? 0 : size_object(oldval);
+both_size_set:
+
+ handle->new_size = handle->new_size - oldval_sz + newval_sz;
+
+ /* write new value in old dbterm, finalize will make a flat copy */
+ handle->dbterm->tpl[position] = newval;
+ handle->mustResize = 1;
+}
+
+
+/*
+** Copy the object into a possibly new DbTerm,
+** offset is the offset of the DbTerm from the start
+** of the sysAllocaed structure, The possibly realloced and copied
+** structure is returned. Make sure (((char *) old) - offset) is a
+** pointer to a ERTS_ALC_T_DB_TERM allocated data area.
+*/
+void* db_get_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj)
+{
+ int size = size_object(obj);
+ void *structp = ((char*) old) - offset;
+ DbTerm* p;
+ Eterm copy;
+ Eterm *top;
+
+ if (old != 0) {
+ erts_cleanup_offheap(&old->off_heap);
+ if (size == old->size) {
+ p = old;
+ } else {
+ Uint new_sz = offset + sizeof(DbTerm) + sizeof(Eterm)*(size-1);
+ Uint old_sz = offset + sizeof(DbTerm) + sizeof(Eterm)*(old->size-1);
+
+ if (erts_ets_realloc_always_moves) {
+ void *nstructp = erts_db_alloc(ERTS_ALC_T_DB_TERM,
+ (DbTable *) tb,
+ new_sz);
+ memcpy(nstructp,structp,offset);
+ erts_db_free(ERTS_ALC_T_DB_TERM,
+ (DbTable *) tb,
+ structp,
+ old_sz);
+ structp = nstructp;
+ } else {
+ structp = erts_db_realloc(ERTS_ALC_T_DB_TERM,
+ (DbTable *) tb,
+ structp,
+ old_sz,
+ new_sz);
+ }
+ p = (DbTerm*) ((void *)(((char *) structp) + offset));
+ }
+ }
+ else {
+ structp = erts_db_alloc(ERTS_ALC_T_DB_TERM,
+ (DbTable *) tb,
+ (offset
+ + sizeof(DbTerm)
+ + sizeof(Eterm)*(size-1)));
+ p = (DbTerm*) ((void *)(((char *) structp) + offset));
+ }
+ p->size = size;
+ p->off_heap.mso = NULL;
+ p->off_heap.externals = NULL;
+#ifndef HYBRID /* FIND ME! */
+ p->off_heap.funs = NULL;
+#endif
+ p->off_heap.overhead = 0;
+
+ top = DBTERM_BUF(p);
+ copy = copy_struct(obj, size, &top, &p->off_heap);
+ DBTERM_SET_TPL(p,tuple_val(copy));
+
+ return structp;
+}
+
+
+void db_free_term_data(DbTerm* p)
+{
+ erts_cleanup_offheap(&p->off_heap);
+}
+
+
+/*
+** Check if object represents a "match" variable
+** i.e and atom $N where N is an integer
+**
+*/
+
+int db_is_variable(Eterm obj)
+{
+ byte *b;
+ int n;
+ int N;
+
+ if (is_not_atom(obj))
+ return -1;
+ b = atom_tab(atom_val(obj))->name;
+ if ((n = atom_tab(atom_val(obj))->len) < 2)
+ return -1;
+ if (*b++ != '$')
+ return -1;
+ n--;
+ /* Handle first digit */
+ if (*b == '0')
+ return (n == 1) ? 0 : -1;
+ if (*b >= '1' && *b <= '9')
+ N = *b++ - '0';
+ else
+ return -1;
+ n--;
+ while(n--) {
+ if (*b >= '0' && *b <= '9') {
+ N = N*10 + (*b - '0');
+ b++;
+ }
+ else
+ return -1;
+ }
+ return N;
+}
+
+
+/* check if obj is (or contains) a variable */
+/* return 1 if obj contains a variable or underscore */
+/* return 0 if obj is fully ground */
+
+int db_has_variable(Eterm obj)
+{
+ switch(obj & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST: {
+ while (is_list(obj)) {
+ if (db_has_variable(CAR(list_val(obj))))
+ return 1;
+ obj = CDR(list_val(obj));
+ }
+ return(db_has_variable(obj)); /* Non wellformed list or [] */
+ }
+ case TAG_PRIMARY_BOXED:
+ if (!BOXED_IS_TUPLE(obj)) {
+ return 0;
+ } else {
+ Eterm *tuple = tuple_val(obj);
+ int arity = arityval(*tuple++);
+ while(arity--) {
+ if (db_has_variable(*tuple))
+ return 1;
+ tuple++;
+ }
+ return(0);
+ }
+ case TAG_PRIMARY_IMMED1:
+ if (obj == am_Underscore || db_is_variable(obj) >= 0)
+ return 1;
+ }
+ return 0;
+}
+
+int erts_db_is_compiled_ms(Eterm term)
+{
+ return (is_binary(term)
+ && (thing_subtag(*binary_val(term)) == REFC_BINARY_SUBTAG)
+ && IsMatchProgBinary((((ProcBin *) binary_val(term))->val)));
+}
+
+/*
+** Local (static) utilities.
+*/
+
+/*
+***************************************************************************
+** Compiled matches
+***************************************************************************
+*/
+/*
+** Utility to add an error
+*/
+
+static void add_dmc_err(DMCErrInfo *err_info,
+ char *str,
+ int variable,
+ Eterm term,
+ DMCErrorSeverity severity)
+{
+ /* Linked in in reverse order, to ease the formatting */
+ DMCError *e = erts_alloc(ERTS_ALC_T_DB_DMC_ERROR, sizeof(DMCError));
+ if (term != 0UL) {
+ erts_snprintf(e->error_string, DMC_ERR_STR_LEN, str, term);
+ } else {
+ strncpy(e->error_string, str, DMC_ERR_STR_LEN);
+ e->error_string[DMC_ERR_STR_LEN] ='\0';
+ }
+ e->variable = variable;
+ e->severity = severity;
+ e->next = err_info->first;
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,"add_dmc_err: %s\n",e->error_string);
+#endif
+ err_info->first = e;
+ if (severity >= dmcError)
+ err_info->error_added = 1;
+}
+
+/*
+** Handle one term in the match expression (not the guard)
+*/
+static DMCRet dmc_one_term(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Eterm) *stack,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm c)
+{
+ Sint n;
+ Eterm *hp;
+ ErlHeapFragment *tmp_mb;
+ Uint sz, sz2, sz3;
+ Uint i, j;
+
+
+ switch (c & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_IMMED1:
+ if ((n = db_is_variable(c)) >= 0) { /* variable */
+ if (n >= heap->size) {
+ /*
+ ** Ouch, big integer in match variable.
+ */
+ Eterm *save_hp;
+ ASSERT(heap->data == heap->def);
+ sz = sz2 = sz3 = 0;
+ for (j = 0; j < context->num_match; ++j) {
+ sz += size_object(context->matchexpr[j]);
+ sz2 += size_object(context->guardexpr[j]);
+ sz3 += size_object(context->bodyexpr[j]);
+ }
+ context->copy =
+ new_message_buffer(sz + sz2 + sz3 +
+ context->num_match);
+ save_hp = hp = context->copy->mem;
+ hp += context->num_match;
+ for (j = 0; j < context->num_match; ++j) {
+ context->matchexpr[j] =
+ copy_struct(context->matchexpr[j],
+ size_object(context->matchexpr[j]), &hp,
+ &(context->copy->off_heap));
+ context->guardexpr[j] =
+ copy_struct(context->guardexpr[j],
+ size_object(context->guardexpr[j]), &hp,
+ &(context->copy->off_heap));
+ context->bodyexpr[j] =
+ copy_struct(context->bodyexpr[j],
+ size_object(context->bodyexpr[j]), &hp,
+ &(context->copy->off_heap));
+ }
+ for (j = 0; j < context->num_match; ++j) {
+ /* the actual expressions can be
+ atoms in their selves, place them first */
+ *save_hp++ = context->matchexpr[j];
+ }
+ heap->size = match_compact(context->copy,
+ context->err_info);
+ for (j = 0; j < context->num_match; ++j) {
+ /* restore the match terms, as they
+ may be atoms that changed */
+ context->matchexpr[j] = context->copy->mem[j];
+ }
+ heap->data = erts_alloc(ERTS_ALC_T_DB_MS_CMPL_HEAP,
+ heap->size*sizeof(unsigned));
+ sys_memset(heap->data, 0,
+ heap->size * sizeof(unsigned));
+ DMC_CLEAR(*stack);
+ /*DMC_PUSH(*stack,NIL);*/
+ DMC_CLEAR(*text);
+ return retRestart;
+ }
+ if (heap->data[n]) { /* already bound ? */
+ DMC_PUSH(*text,matchCmp);
+ DMC_PUSH(*text,n);
+ } else { /* Not bound, bind! */
+ if (n >= heap->used)
+ heap->used = n + 1;
+ DMC_PUSH(*text,matchBind);
+ DMC_PUSH(*text,n);
+ heap->data[n] = 1;
+ }
+ } else if (c == am_Underscore) {
+ DMC_PUSH(*text, matchSkip);
+ } else { /* Any immediate value */
+ DMC_PUSH(*text, matchEq);
+ DMC_PUSH(*text, (Uint) c);
+ }
+ break;
+ case TAG_PRIMARY_LIST:
+ DMC_PUSH(*text, matchPushL);
+ ++(context->stack_used);
+ DMC_PUSH(*stack, c);
+ break;
+ case TAG_PRIMARY_BOXED: {
+ Eterm hdr = *boxed_val(c);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE):
+ n = arityval(*tuple_val(c));
+ DMC_PUSH(*text, matchPushT);
+ ++(context->stack_used);
+ DMC_PUSH(*text, n);
+ DMC_PUSH(*stack, c);
+ break;
+ case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
+ n = thing_arityval(*internal_ref_val(c));
+ DMC_PUSH(*text, matchEqRef);
+ DMC_PUSH(*text, *internal_ref_val(c));
+ for (i = 1; i <= n; ++i) {
+ DMC_PUSH(*text, (Uint) internal_ref_val(c)[i]);
+ }
+ break;
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ n = thing_arityval(*big_val(c));
+ DMC_PUSH(*text, matchEqBig);
+ DMC_PUSH(*text, *big_val(c));
+ for (i = 1; i <= n; ++i) {
+ DMC_PUSH(*text, (Uint) big_val(c)[i]);
+ }
+ break;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ DMC_PUSH(*text,matchEqFloat);
+ DMC_PUSH(*text, (Uint) float_val(c)[1]);
+ /* XXX: this reads and pushes random junk on ARCH_64 */
+ DMC_PUSH(*text, (Uint) float_val(c)[2]);
+ break;
+ default: /* BINARY, FUN, VECTOR, or EXTERNAL */
+ /*
+ ** Make a private copy...
+ */
+ n = size_object(c);
+ tmp_mb = new_message_buffer(n);
+ hp = tmp_mb->mem;
+ DMC_PUSH(*text, matchEqBin);
+ DMC_PUSH(*text, copy_struct(c, n, &hp, &(tmp_mb->off_heap)));
+ tmp_mb->next = context->save;
+ context->save = tmp_mb;
+ break;
+ }
+ break;
+ }
+ default:
+ erl_exit(1, "db_match_compile: "
+ "Bad object on heap: 0x%08lx\n",
+ (unsigned long) c);
+ }
+ return retOk;
+}
+
+/*
+** Match guard compilation
+*/
+
+static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(Uint) *text,
+ Eterm t)
+{
+ int sz;
+ ErlHeapFragment *emb;
+ Eterm *hp;
+ Eterm tmp;
+
+ if (IS_CONST(t)) {
+ tmp = t;
+ } else {
+ sz = my_size_object(t);
+ emb = new_message_buffer(sz);
+ hp = emb->mem;
+ tmp = my_copy_struct(t,&hp,&(emb->off_heap));
+ emb->next = context->save;
+ context->save = emb;
+ }
+ DMC_PUSH(*text,matchPushC);
+ DMC_PUSH(*text,(Uint) tmp);
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+}
+
+#define RETURN_ERROR_X(String, X, Y, ContextP, ConstantF) \
+do { \
+if ((ContextP)->err_info != NULL) { \
+ (ConstantF) = 0; \
+ add_dmc_err((ContextP)->err_info, String, X, Y, dmcError); \
+ return retOk; \
+} else \
+ return retFail; \
+} while(0)
+
+#define RETURN_ERROR(String, ContextP, ConstantF) \
+ RETURN_ERROR_X(String, -1, 0UL, ContextP, ConstantF)
+
+#define RETURN_VAR_ERROR(String, N, ContextP, ConstantF) \
+ RETURN_ERROR_X(String, N, 0UL, ContextP, ConstantF)
+
+#define RETURN_TERM_ERROR(String, T, ContextP, ConstantF) \
+ RETURN_ERROR_X(String, -1, T, ContextP, ConstantF)
+
+#define WARNING(String, ContextP) \
+add_dmc_err((ContextP)->err_info, String, -1, 0UL, dmcWarning)
+
+#define VAR_WARNING(String, N, ContextP) \
+add_dmc_err((ContextP)->err_info, String, N, 0UL, dmcWarning)
+
+#define TERM_WARNING(String, T, ContextP) \
+add_dmc_err((ContextP)->err_info, String, -1, T, dmcWarning)
+
+static DMCRet dmc_list(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ int c1;
+ int c2;
+ int ret;
+
+ if ((ret = dmc_expr(context, heap, text, CAR(list_val(t)), &c1)) != retOk)
+ return ret;
+
+ if ((ret = dmc_expr(context, heap, text, CDR(list_val(t)), &c2)) != retOk)
+ return ret;
+
+ if (c1 && c2) {
+ *constant = 1;
+ return retOk;
+ }
+ *constant = 0;
+ if (!c1) {
+ /* The CAR is not a constant, so if the CDR is, we just push it,
+ otherwise it is already pushed. */
+ if (c2)
+ do_emit_constant(context, text, CDR(list_val(t)));
+ DMC_PUSH(*text, matchConsA);
+ } else { /* !c2 && c1 */
+ do_emit_constant(context, text, CAR(list_val(t)));
+ DMC_PUSH(*text, matchConsB);
+ }
+ --context->stack_used; /* Two objects on stack becomes one */
+ context->eheap_need += 2;
+ return retOk;
+}
+
+static DMCRet dmc_tuple(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ DMC_STACK_TYPE(Uint) instr_save;
+ int all_constant = 1;
+ int textpos = DMC_STACK_NUM(*text);
+ Eterm *p = tuple_val(t);
+ Uint nelems = arityval(*p);
+ Uint i;
+ int c;
+ DMCRet ret;
+
+ /*
+ ** We remember where we started to layout code,
+ ** assume all is constant and back up and restart if not so.
+ ** The tuple should be laid out with the last element first,
+ ** so we can memcpy the tuple to the eheap.
+ */
+ for (i = nelems; i > 0; --i) {
+ if ((ret = dmc_expr(context, heap, text, p[i], &c)) != retOk)
+ return ret;
+ if (!c && all_constant) {
+ all_constant = 0;
+ if (i < nelems) {
+ Uint j;
+
+ /*
+ * Oops, we need to relayout the constants.
+ * Save the already laid out instructions.
+ */
+ DMC_INIT_STACK(instr_save);
+ while (DMC_STACK_NUM(*text) > textpos)
+ DMC_PUSH(instr_save, DMC_POP(*text));
+ for (j = nelems; j > i; --j)
+ do_emit_constant(context, text, p[j]);
+ while(!DMC_EMPTY(instr_save))
+ DMC_PUSH(*text, DMC_POP(instr_save));
+ DMC_FREE(instr_save);
+ }
+ } else if (c && !all_constant) {
+ /* push a constant */
+ do_emit_constant(context, text, p[i]);
+ }
+ }
+
+ if (all_constant) {
+ *constant = 1;
+ return retOk;
+ }
+ DMC_PUSH(*text, matchMkTuple);
+ DMC_PUSH(*text, nelems);
+ context->stack_used -= (nelems - 1);
+ context->eheap_need += (nelems + 1);
+ *constant = 0;
+ return retOk;
+}
+
+static DMCRet dmc_whole_expression(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ if (context->cflags & DCOMP_TRACE) {
+ /* Hmmm, convert array to list... */
+ if (context->special) {
+ DMC_PUSH(*text, matchPushArrayAsListU);
+ } else {
+ ASSERT(is_tuple(context->matchexpr
+ [context->current_match]));
+ context->eheap_need +=
+ arityval(*(tuple_val(context->matchexpr
+ [context->current_match]))) * 2;
+ DMC_PUSH(*text, matchPushArrayAsList);
+ }
+ } else {
+ DMC_PUSH(*text, matchPushExpr);
+ }
+ ++context->stack_used;
+ if (context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ *constant = 0;
+ return retOk;
+}
+
+static DMCRet dmc_variable(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Uint n = db_is_variable(t);
+ ASSERT(n >= 0);
+ if (n >= heap->used)
+ RETURN_VAR_ERROR("Variable $%d is unbound.", n, context, *constant);
+ if (heap->data[n] == 0U)
+ RETURN_VAR_ERROR("Variable $%d is unbound.", n, context, *constant);
+ DMC_PUSH(*text, matchPushV);
+ DMC_PUSH(*text, n);
+ ++context->stack_used;
+ if (context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ *constant = 0;
+ return retOk;
+}
+
+static DMCRet dmc_all_bindings(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ int i;
+ int heap_used = 0;
+
+ DMC_PUSH(*text, matchPushC);
+ DMC_PUSH(*text, NIL);
+ for (i = heap->used - 1; i >= 0; --i) {
+ if (heap->data[i]) {
+ DMC_PUSH(*text, matchPushV);
+ DMC_PUSH(*text, i);
+ DMC_PUSH(*text, matchConsB);
+ heap_used += 2;
+ }
+ }
+ ++context->stack_used;
+ if ((context->stack_used + 1) > context->stack_need)
+ context->stack_need = (context->stack_used + 1);
+ context->eheap_need += heap_used;
+ *constant = 0;
+ return retOk;
+}
+
+static DMCRet dmc_const(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+
+ if (a != 2) {
+ RETURN_TERM_ERROR("Special form 'const' called with more than one "
+ "argument in %T.", t, context, *constant);
+ }
+ *constant = 1;
+ return retOk;
+}
+
+static DMCRet dmc_and(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int i;
+ int c;
+
+ if (a < 2) {
+ RETURN_TERM_ERROR("Special form 'and' called without arguments "
+ "in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ for (i = a; i > 1; --i) {
+ if ((ret = dmc_expr(context, heap, text, p[i], &c)) != retOk)
+ return ret;
+ if (c)
+ do_emit_constant(context, text, p[i]);
+ }
+ DMC_PUSH(*text, matchAnd);
+ DMC_PUSH(*text, (Uint) a - 1);
+ context->stack_used -= (a - 2);
+ return retOk;
+}
+
+static DMCRet dmc_or(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int i;
+ int c;
+
+ if (a < 2) {
+ RETURN_TERM_ERROR("Special form 'or' called without arguments "
+ "in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ for (i = a; i > 1; --i) {
+ if ((ret = dmc_expr(context, heap, text, p[i], &c)) != retOk)
+ return ret;
+ if (c)
+ do_emit_constant(context, text, p[i]);
+ }
+ DMC_PUSH(*text, matchOr);
+ DMC_PUSH(*text, (Uint) a - 1);
+ context->stack_used -= (a - 2);
+ return retOk;
+}
+
+
+static DMCRet dmc_andalso(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int i;
+ int c;
+ Uint lbl;
+ Uint lbl_next;
+ Uint lbl_val;
+
+ if (a < 2) {
+ RETURN_TERM_ERROR("Special form 'andalso' called without"
+ " arguments "
+ "in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ lbl = 0;
+ for (i = 2; i <= a; ++i) {
+ if ((ret = dmc_expr(context, heap, text, p[i], &c)) != retOk)
+ return ret;
+ if (c)
+ do_emit_constant(context, text, p[i]);
+ if (i == a) {
+ DMC_PUSH(*text, matchJump);
+ } else {
+ DMC_PUSH(*text, matchAndAlso);
+ }
+ DMC_PUSH(*text, lbl);
+ lbl = DMC_STACK_NUM(*text)-1;
+ --(context->stack_used);
+ }
+ DMC_PUSH(*text, matchPushC);
+ DMC_PUSH(*text, am_true);
+ lbl_val = DMC_STACK_NUM(*text);
+ while (lbl) {
+ lbl_next = DMC_PEEK(*text, lbl);
+ DMC_POKE(*text, lbl, lbl_val-lbl-1);
+ lbl = lbl_next;
+ }
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+static DMCRet dmc_orelse(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int i;
+ int c;
+ Uint lbl;
+ Uint lbl_next;
+ Uint lbl_val;
+
+ if (a < 2) {
+ RETURN_TERM_ERROR("Special form 'orelse' called without arguments "
+ "in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ lbl = 0;
+ for (i = 2; i <= a; ++i) {
+ if ((ret = dmc_expr(context, heap, text, p[i], &c)) != retOk)
+ return ret;
+ if (c)
+ do_emit_constant(context, text, p[i]);
+ if (i == a) {
+ DMC_PUSH(*text, matchJump);
+ } else {
+ DMC_PUSH(*text, matchOrElse);
+ }
+ DMC_PUSH(*text, lbl);
+ lbl = DMC_STACK_NUM(*text)-1;
+ --(context->stack_used);
+ }
+ DMC_PUSH(*text, matchPushC);
+ DMC_PUSH(*text, am_false);
+ lbl_val = DMC_STACK_NUM(*text);
+ while (lbl) {
+ lbl_next = DMC_PEEK(*text, lbl);
+ DMC_POKE(*text, lbl, lbl_val-lbl-1);
+ lbl = lbl_next;
+ }
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+static DMCRet dmc_message(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int c;
+
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'message' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'message' called in guard context.",
+ context,
+ *constant);
+ }
+
+ if (a != 2) {
+ RETURN_TERM_ERROR("Special form 'message' called with wrong "
+ "number of arguments in %T.", t, context,
+ *constant);
+ }
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ DMC_PUSH(*text, matchReturn);
+ DMC_PUSH(*text, matchPushC);
+ DMC_PUSH(*text, am_true);
+ /* Push as much as we remove, stack_need is untouched */
+ return retOk;
+}
+
+static DMCRet dmc_self(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+
+ if (a != 1) {
+ RETURN_TERM_ERROR("Special form 'self' called with arguments "
+ "in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ DMC_PUSH(*text, matchSelf);
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+static DMCRet dmc_return_trace(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'return_trace' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'return_trace' called in "
+ "guard context.", context, *constant);
+ }
+
+ if (a != 1) {
+ RETURN_TERM_ERROR("Special form 'return_trace' called with "
+ "arguments in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ DMC_PUSH(*text, matchSetReturnTrace); /* Pushes 'true' on the stack */
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+static DMCRet dmc_exception_trace(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'exception_trace' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'exception_trace' called in "
+ "guard context.", context, *constant);
+ }
+
+ if (a != 1) {
+ RETURN_TERM_ERROR("Special form 'exception_trace' called with "
+ "arguments in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ DMC_PUSH(*text, matchSetExceptionTrace); /* Pushes 'true' on the stack */
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+
+
+static DMCRet dmc_is_seq_trace(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'is_seq_trace' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (a != 1) {
+ RETURN_TERM_ERROR("Special form 'is_seq_trace' called with "
+ "arguments in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ DMC_PUSH(*text, matchIsSeqTrace);
+ /* Pushes 'true' or 'false' on the stack */
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+static DMCRet dmc_set_seq_token(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int c;
+
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'set_seq_token' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'set_seq_token' called in "
+ "guard context.", context, *constant);
+ }
+
+ if (a != 3) {
+ RETURN_TERM_ERROR("Special form 'set_seq_token' called with wrong "
+ "number of arguments in %T.", t, context,
+ *constant);
+ }
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[3], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[3]);
+ }
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ if (context->cflags & DCOMP_FAKE_DESTRUCTIVE) {
+ DMC_PUSH(*text, matchSetSeqTokenFake);
+ } else {
+ DMC_PUSH(*text, matchSetSeqToken);
+ }
+ --context->stack_used; /* Remove two and add one */
+ return retOk;
+}
+
+static DMCRet dmc_get_seq_token(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'get_seq_token' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'get_seq_token' called in "
+ "guard context.", context, *constant);
+ }
+ if (a != 1) {
+ RETURN_TERM_ERROR("Special form 'get_seq_token' called with "
+ "arguments in %T.", t, context,
+ *constant);
+ }
+
+ *constant = 0;
+ DMC_PUSH(*text, matchGetSeqToken);
+ context->eheap_need += (6 /* A 5-tuple is built */
+ + EXTERNAL_THING_HEAD_SIZE + 2 /* Sender can
+ be an external
+ pid */);
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+
+
+static DMCRet dmc_display(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int c;
+
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'display' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'display' called in guard context.",
+ context,
+ *constant);
+ }
+
+ if (a != 2) {
+ RETURN_TERM_ERROR("Special form 'display' called with wrong "
+ "number of arguments in %T.", t, context,
+ *constant);
+ }
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ DMC_PUSH(*text, matchDisplay);
+ /* Push as much as we remove, stack_need is untouched */
+ return retOk;
+}
+
+static DMCRet dmc_process_dump(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'process_dump' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'process_dump' called in "
+ "guard context.", context, *constant);
+ }
+
+ if (a != 1) {
+ RETURN_TERM_ERROR("Special form 'process_dump' called with "
+ "arguments in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ DMC_PUSH(*text, matchProcessDump); /* Creates binary */
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+static DMCRet dmc_enable_trace(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int c;
+
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'enable_trace' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'enable_trace' called in guard context.",
+ context,
+ *constant);
+ }
+
+ switch (a) {
+ case 2:
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ DMC_PUSH(*text, matchEnableTrace);
+ /* Push as much as we remove, stack_need is untouched */
+ break;
+ case 3:
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[3], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[3]);
+ }
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ DMC_PUSH(*text, matchEnableTrace2);
+ --context->stack_used; /* Remove two and add one */
+ break;
+ default:
+ RETURN_TERM_ERROR("Special form 'enable_trace' called with wrong "
+ "number of arguments in %T.", t, context,
+ *constant);
+ }
+ return retOk;
+}
+
+static DMCRet dmc_disable_trace(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int c;
+
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'disable_trace' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'disable_trace' called in guard context.",
+ context,
+ *constant);
+ }
+
+ switch (a) {
+ case 2:
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ DMC_PUSH(*text, matchDisableTrace);
+ /* Push as much as we remove, stack_need is untouched */
+ break;
+ case 3:
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[3], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[3]);
+ }
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ DMC_PUSH(*text, matchDisableTrace2);
+ --context->stack_used; /* Remove two and add one */
+ break;
+ default:
+ RETURN_TERM_ERROR("Special form 'disable_trace' called with wrong "
+ "number of arguments in %T.", t, context,
+ *constant);
+ }
+ return retOk;
+}
+
+static DMCRet dmc_trace(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int c;
+
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'trace' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'trace' called in guard context.",
+ context,
+ *constant);
+ }
+
+ switch (a) {
+ case 3:
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[3], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[3]);
+ }
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ DMC_PUSH(*text, matchTrace2);
+ --context->stack_used; /* Remove two and add one */
+ break;
+ case 4:
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[4], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[4]);
+ }
+ if ((ret = dmc_expr(context, heap, text, p[3], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[3]);
+ }
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ DMC_PUSH(*text, matchTrace3);
+ context->stack_used -= 2; /* Remove three and add one */
+ break;
+ default:
+ RETURN_TERM_ERROR("Special form 'trace' called with wrong "
+ "number of arguments in %T.", t, context,
+ *constant);
+ }
+ return retOk;
+}
+
+
+
+static DMCRet dmc_caller(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'caller' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'caller' called in "
+ "guard context.", context, *constant);
+ }
+
+ if (a != 1) {
+ RETURN_TERM_ERROR("Special form 'caller' called with "
+ "arguments in %T.", t, context, *constant);
+ }
+ *constant = 0;
+ DMC_PUSH(*text, matchCaller); /* Creates binary */
+ context->eheap_need += 4; /* A 3-tuple is built */
+ if (++context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+
+
+static DMCRet dmc_silent(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ DMCRet ret;
+ int c;
+
+ if (!(context->cflags & DCOMP_TRACE)) {
+ RETURN_ERROR("Special form 'silent' used in wrong dialect.",
+ context,
+ *constant);
+ }
+ if (context->is_guard) {
+ RETURN_ERROR("Special form 'silent' called in "
+ "guard context.", context, *constant);
+ }
+
+ if (a != 2) {
+ RETURN_TERM_ERROR("Special form 'silent' called with wrong "
+ "number of arguments in %T.", t, context,
+ *constant);
+ }
+ *constant = 0;
+ if ((ret = dmc_expr(context, heap, text, p[2], &c)) != retOk) {
+ return ret;
+ }
+ if (c) {
+ do_emit_constant(context, text, p[2]);
+ }
+ DMC_PUSH(*text, matchSilent);
+ DMC_PUSH(*text, matchPushC);
+ DMC_PUSH(*text, am_true);
+ /* Push as much as we remove, stack_need is untouched */
+ return retOk;
+}
+
+
+
+static DMCRet dmc_fun(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ Eterm *p = tuple_val(t);
+ Uint a = arityval(*p);
+ int c;
+ int i;
+ DMCRet ret;
+ DMCGuardBif *b;
+
+ /* Special forms. */
+ switch (p[1]) {
+ case am_const:
+ return dmc_const(context, heap, text, t, constant);
+ case am_and:
+ return dmc_and(context, heap, text, t, constant);
+ case am_or:
+ return dmc_or(context, heap, text, t, constant);
+ case am_andalso:
+ case am_andthen:
+ return dmc_andalso(context, heap, text, t, constant);
+ case am_orelse:
+ return dmc_orelse(context, heap, text, t, constant);
+ case am_self:
+ return dmc_self(context, heap, text, t, constant);
+ case am_message:
+ return dmc_message(context, heap, text, t, constant);
+ case am_is_seq_trace:
+ return dmc_is_seq_trace(context, heap, text, t, constant);
+ case am_set_seq_token:
+ return dmc_set_seq_token(context, heap, text, t, constant);
+ case am_get_seq_token:
+ return dmc_get_seq_token(context, heap, text, t, constant);
+ case am_return_trace:
+ return dmc_return_trace(context, heap, text, t, constant);
+ case am_exception_trace:
+ return dmc_exception_trace(context, heap, text, t, constant);
+ case am_display:
+ return dmc_display(context, heap, text, t, constant);
+ case am_process_dump:
+ return dmc_process_dump(context, heap, text, t, constant);
+ case am_enable_trace:
+ return dmc_enable_trace(context, heap, text, t, constant);
+ case am_disable_trace:
+ return dmc_disable_trace(context, heap, text, t, constant);
+ case am_trace:
+ return dmc_trace(context, heap, text, t, constant);
+ case am_caller:
+ return dmc_caller(context, heap, text, t, constant);
+ case am_silent:
+ return dmc_silent(context, heap, text, t, constant);
+ case am_set_tcw:
+ if (context->cflags & DCOMP_FAKE_DESTRUCTIVE) {
+ b = dmc_lookup_bif(am_set_tcw_fake, ((int) a) - 1);
+ } else {
+ b = dmc_lookup_bif(p[1], ((int) a) - 1);
+ }
+ break;
+ default:
+ b = dmc_lookup_bif(p[1], ((int) a) - 1);
+ }
+
+
+ if (b == NULL) {
+ if (context->err_info != NULL) {
+ /* Ugly, should define a better RETURN_TERM_ERROR interface... */
+ char buff[100];
+ sprintf(buff, "Function %%T/%d does_not_exist.", (int)a - 1);
+ RETURN_TERM_ERROR(buff, p[1], context, *constant);
+ } else {
+ return retFail;
+ }
+ }
+ ASSERT(b->arity == ((int) a) - 1);
+ if (! (b->flags &
+ (1 <<
+ ((context->cflags & DCOMP_DIALECT_MASK) +
+ (context->is_guard ? DBIF_GUARD : DBIF_BODY))))) {
+ /* Body clause used in wrong context. */
+ if (context->err_info != NULL) {
+ /* Ugly, should define a better RETURN_TERM_ERROR interface... */
+ char buff[100];
+ sprintf(buff,
+ "Function %%T/%d cannot be called in this context.",
+ (int)a - 1);
+ RETURN_TERM_ERROR(buff, p[1], context, *constant);
+ } else {
+ return retFail;
+ }
+ }
+
+ *constant = 0;
+
+ for (i = a; i > 1; --i) {
+ if ((ret = dmc_expr(context, heap, text, p[i], &c)) != retOk)
+ return ret;
+ if (c)
+ do_emit_constant(context, text, p[i]);
+ }
+ switch (b->arity) {
+ case 0:
+ DMC_PUSH(*text, matchCall0);
+ break;
+ case 1:
+ DMC_PUSH(*text, matchCall1);
+ break;
+ case 2:
+ DMC_PUSH(*text, matchCall2);
+ break;
+ case 3:
+ DMC_PUSH(*text, matchCall3);
+ break;
+ default:
+ erl_exit(1,"ets:match() internal error, "
+ "guard with more than 3 arguments.");
+ }
+ DMC_PUSH(*text, (Uint) b->biff);
+ context->stack_used -= (((int) a) - 2);
+ if (context->stack_used > context->stack_need)
+ context->stack_need = context->stack_used;
+ return retOk;
+}
+
+static DMCRet dmc_expr(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm t,
+ int *constant)
+{
+ DMCRet ret;
+ Eterm tmp;
+ Eterm *p;
+
+
+ switch (t & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ if ((ret = dmc_list(context, heap, text, t, constant)) != retOk)
+ return ret;
+ break;
+ case TAG_PRIMARY_BOXED:
+ if (!BOXED_IS_TUPLE(t)) {
+ goto simple_term;
+ }
+ p = tuple_val(t);
+#ifdef HARDDEBUG
+ erts_fprintf(stderr,"%d %d %d %d\n",arityval(*p),is_tuple(tmp = p[1]),
+ is_atom(p[1]),db_is_variable(p[1]));
+#endif
+ if (arityval(*p) == 1 && is_tuple(tmp = p[1])) {
+ if ((ret = dmc_tuple(context, heap, text, tmp, constant)) != retOk)
+ return ret;
+ } else if (arityval(*p) >= 1 && is_atom(p[1]) &&
+ !(db_is_variable(p[1]) >= 0)) {
+ if ((ret = dmc_fun(context, heap, text, t, constant)) != retOk)
+ return ret;
+ } else
+ RETURN_TERM_ERROR("%T is neither a function call, nor a tuple "
+ "(tuples are written {{ ... }}).", t,
+ context, *constant);
+ break;
+ case TAG_PRIMARY_IMMED1:
+ if (db_is_variable(t) >= 0) {
+ if ((ret = dmc_variable(context, heap, text, t, constant))
+ != retOk)
+ return ret;
+ break;
+ } else if (t == am_DollarUnderscore) {
+ if ((ret = dmc_whole_expression(context, heap, text, t, constant))
+ != retOk)
+ return ret;
+ break;
+ } else if (t == am_DollarDollar) {
+ if ((ret = dmc_all_bindings(context, heap, text, t, constant))
+ != retOk)
+ return ret;
+ break;
+ }
+ /* Fall through */
+ default:
+ simple_term:
+ *constant = 1;
+ }
+ return retOk;
+}
+
+
+static DMCRet compile_guard_expr(DMCContext *context,
+ DMCHeap *heap,
+ DMC_STACK_TYPE(Uint) *text,
+ Eterm l)
+{
+ DMCRet ret;
+ int constant;
+ Eterm t;
+
+ if (l != NIL) {
+ if (!is_list(l))
+ RETURN_ERROR("Match expression is not a list.",
+ context, constant);
+ if (!(context->is_guard)) {
+ DMC_PUSH(*text, matchCatch);
+ }
+ while (is_list(l)) {
+ constant = 0;
+ t = CAR(list_val(l));
+ if ((ret = dmc_expr(context, heap, text, t, &constant)) !=
+ retOk)
+ return ret;
+ if (constant) {
+ do_emit_constant(context, text, t);
+ }
+ l = CDR(list_val(l));
+ if (context->is_guard) {
+ DMC_PUSH(*text,matchTrue);
+ } else {
+ DMC_PUSH(*text,matchWaste);
+ }
+ --context->stack_used;
+ }
+ if (l != NIL)
+ RETURN_ERROR("Match expression is not a proper list.",
+ context, constant);
+ if (!(context->is_guard) && (context->cflags & DCOMP_TABLE)) {
+ ASSERT(matchWaste == DMC_TOP(*text));
+ (void) DMC_POP(*text);
+ DMC_PUSH(*text, matchReturn); /* Same impact on stack as
+ matchWaste */
+ }
+ }
+ return retOk;
+}
+
+
+
+
+/*
+** Match compilation utility code
+*/
+
+/*
+** Handling of bif's in match guard expressions
+*/
+
+static DMCGuardBif *dmc_lookup_bif(Eterm t, int arity)
+{
+ /*
+ ** Place for optimization, bsearch is slower than inlining it...
+ */
+ DMCGuardBif node = {0,NULL,0};
+ node.name = t;
+ node.arity = arity;
+ return bsearch(&node,
+ guard_tab,
+ sizeof(guard_tab) / sizeof(DMCGuardBif),
+ sizeof(DMCGuardBif),
+ (int (*)(const void *, const void *)) &cmp_guard_bif);
+}
+
+#ifdef DMC_DEBUG
+static Eterm dmc_lookup_bif_reversed(void *f)
+{
+ int i;
+ for (i = 0; i < (sizeof(guard_tab) / sizeof(DMCGuardBif)); ++i)
+ if (f == guard_tab[i].biff)
+ return guard_tab[i].name;
+ return am_undefined;
+}
+#endif
+
+/* For sorting. */
+static int cmp_uint(void *a, void *b)
+{
+ if (*((unsigned *)a) < *((unsigned *)b))
+ return -1;
+ else
+ return (*((unsigned *)a) > *((unsigned *)b));
+}
+
+static int cmp_guard_bif(void *a, void *b)
+{
+ int ret;
+ if (( ret = ((int) atom_val(((DMCGuardBif *) a)->name)) -
+ ((int) atom_val(((DMCGuardBif *) b)->name)) ) == 0) {
+ ret = ((DMCGuardBif *) a)->arity - ((DMCGuardBif *) b)->arity;
+ }
+ return ret;
+}
+
+/*
+** Compact the variables in a match expression i e make {$1, $100, $1000}
+** become {$0,$1,$2}.
+*/
+static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info)
+{
+ int i, j, a, n, x;
+ DMC_STACK_TYPE(unsigned) heap;
+ Eterm *p;
+ char buff[25] = "$"; /* large enough for 64 bit to */
+ int ret;
+
+ DMC_INIT_STACK(heap);
+
+ p = expr->mem;
+ i = expr->size;
+ while (i--) {
+ if (is_thing(*p)) {
+ a = thing_arityval(*p);
+ ASSERT(a <= i);
+ i -= a;
+ p += a;
+ } else if (is_atom(*p) && (n = db_is_variable(*p)) >= 0) {
+ x = DMC_STACK_NUM(heap);
+ for (j = 0; j < x && DMC_PEEK(heap,j) != n; ++j)
+ ;
+
+ if (j == x)
+ DMC_PUSH(heap,n);
+ }
+ ++p;
+ }
+ qsort(DMC_STACK_DATA(heap), DMC_STACK_NUM(heap), sizeof(unsigned),
+ (int (*)(const void *, const void *)) &cmp_uint);
+
+ if (err_info != NULL) { /* lint needs a translation table */
+ err_info->var_trans = erts_alloc(ERTS_ALC_T_DB_TRANS_TAB,
+ sizeof(unsigned)*DMC_STACK_NUM(heap));
+ sys_memcpy(err_info->var_trans, DMC_STACK_DATA(heap),
+ DMC_STACK_NUM(heap) * sizeof(unsigned));
+ err_info->num_trans = DMC_STACK_NUM(heap);
+ }
+
+ p = expr->mem;
+ i = expr->size;
+ while (i--) {
+ if (is_thing(*p)) {
+ a = thing_arityval(*p);
+ i -= a;
+ p += a;
+ } else if (is_atom(*p) && (n = db_is_variable(*p)) >= 0) {
+ x = DMC_STACK_NUM(heap);
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "%T");
+#endif
+ for (j = 0; j < x && DMC_PEEK(heap,j) != n; ++j)
+ ;
+ ASSERT(j < x);
+ sprintf(buff+1,"%u", (unsigned) j);
+ /* Yes, writing directly into terms, they ARE off heap */
+ *p = am_atom_put(buff, strlen(buff));
+ }
+ ++p;
+ }
+ ret = DMC_STACK_NUM(heap);
+ DMC_FREE(heap);
+ return ret;
+}
+
+/*
+** Simple size object that takes care of function calls and constant tuples
+*/
+static Uint my_size_object(Eterm t)
+{
+ Uint sum = 0;
+ Eterm tmp;
+ Eterm *p;
+ switch (t & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ sum += 2 + my_size_object(CAR(list_val(t))) +
+ my_size_object(CDR(list_val(t)));
+ break;
+ case TAG_PRIMARY_BOXED:
+ if ((((*boxed_val(t)) &
+ _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) !=
+ (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE)) {
+ goto simple_term;
+ }
+
+ if (arityval(*tuple_val(t)) == 1 && is_tuple(tmp = tuple_val(t)[1])) {
+ Uint i,n;
+ p = tuple_val(tmp);
+ n = arityval(p[0]);
+ sum += 1 + n;
+ for (i = 1; i <= n; ++i)
+ sum += my_size_object(p[i]);
+ } else if (arityval(*tuple_val(t)) == 2 &&
+ is_atom(tmp = tuple_val(t)[1]) &&
+ tmp == am_const) {
+ sum += size_object(tuple_val(t)[2]);
+ } else {
+ erl_exit(1,"Internal error, sizing unrecognized object in "
+ "(d)ets:match compilation.");
+ }
+ break;
+ default:
+ simple_term:
+ sum += size_object(t);
+ break;
+ }
+ return sum;
+}
+
+static Eterm my_copy_struct(Eterm t, Eterm **hp, ErlOffHeap* off_heap)
+{
+ Eterm ret = NIL, a, b;
+ Eterm *p;
+ Uint sz;
+ switch (t & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST:
+ a = my_copy_struct(CAR(list_val(t)), hp, off_heap);
+ b = my_copy_struct(CDR(list_val(t)), hp, off_heap);
+ ret = CONS(*hp, a, b);
+ *hp += 2;
+ break;
+ case TAG_PRIMARY_BOXED:
+ if (BOXED_IS_TUPLE(t)) {
+ if (arityval(*tuple_val(t)) == 1 &&
+ is_tuple(a = tuple_val(t)[1])) {
+ Uint i,n;
+ Eterm *savep = *hp;
+ ret = make_tuple(savep);
+ p = tuple_val(a);
+ n = arityval(p[0]);
+ *hp += n + 1;
+ *savep++ = make_arityval(n);
+ for(i = 1; i <= n; ++i)
+ *savep++ = my_copy_struct(p[i], hp, off_heap);
+ } else if (arityval(*tuple_val(t)) == 2 &&
+ is_atom(a = tuple_val(t)[1]) &&
+ a == am_const) {
+ /* A {const, XXX} expression */
+ b = tuple_val(t)[2];
+ sz = size_object(b);
+ ret = copy_struct(b,sz,hp,off_heap);
+ } else {
+ erl_exit(1, "Trying to constant-copy non constant expression "
+ "0x%08x in (d)ets:match compilation.", (unsigned long) t);
+ }
+ } else {
+ sz = size_object(t);
+ ret = copy_struct(t,sz,hp,off_heap);
+ }
+ break;
+ default:
+ ret = t;
+ }
+ return ret;
+}
+
+/*
+** Compiled match bif interface
+*/
+/*
+** erlang:match_spec_test(MatchAgainst, MatchSpec, Type) ->
+** {ok, Return, Flags, Errors} | {error, Errors}
+** MatchAgainst -> if Type == trace: list() else tuple()
+** MatchSpec -> MatchSpec with body corresponding to Type
+** Type -> trace | table (only trace implemented in R5C)
+** Return -> if Type == trace TraceReturn else {BodyReturn, VariableBindings}
+** TraceReturn -> {true | false | term()}
+** BodyReturn -> term()
+** VariableBindings -> [term(), ...]
+** Errors -> [OneError, ...]
+** OneError -> {error, string()} | {warning, string()}
+** Flags -> [Flag, ...]
+** Flag -> return_trace (currently only flag)
+*/
+BIF_RETTYPE match_spec_test_3(BIF_ALIST_3)
+{
+ Eterm res;
+#ifdef DMC_DEBUG
+ if (BIF_ARG_3 == am_atom_put("dis",3)) {
+ test_disassemble_next = 1;
+ BIF_RET(am_true);
+ } else
+#endif
+ if (BIF_ARG_3 == am_trace) {
+ res = match_spec_test(BIF_P, BIF_ARG_1, BIF_ARG_2, 1);
+ if (is_value(res)) {
+ BIF_RET(res);
+ }
+ } else if (BIF_ARG_3 == am_table) {
+ res = match_spec_test(BIF_P, BIF_ARG_1, BIF_ARG_2, 0);
+ if (is_value(res)) {
+ BIF_RET(res);
+ }
+ }
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
+{
+ Eterm lint_res;
+ Binary *mps;
+ Eterm res;
+ Eterm ret;
+ Eterm flg;
+ Eterm *hp;
+ Eterm *arr;
+ int n;
+ Eterm l;
+ Uint32 ret_flags;
+ Uint sz;
+ Eterm *save_cp;
+
+ if (trace && !(is_list(against) || against == NIL)) {
+ return THE_NON_VALUE;
+ }
+ if (trace) {
+ lint_res = db_match_set_lint(p, spec, DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE);
+ mps = db_match_set_compile(p, spec, DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE);
+ } else {
+ lint_res = db_match_set_lint(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
+ mps = db_match_set_compile(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
+ }
+
+ if (mps == NULL) {
+ hp = HAlloc(p,3);
+ ret = TUPLE2(hp, am_error, lint_res);
+ } else {
+#ifdef DMC_DEBUG
+ if (test_disassemble_next) {
+ test_disassemble_next = 0;
+ db_match_dis(mps);
+ }
+#endif /* DMC_DEBUG */
+ l = against;
+ n = 0;
+ while (is_list(l)) {
+ ++n;
+ l = CDR(list_val(l));
+ }
+ if (trace) {
+ if (n)
+ arr = erts_alloc(ERTS_ALC_T_DB_TMP, sizeof(Eterm) * n);
+ else
+ arr = NULL;
+ l = against;
+ n = 0;
+ while (is_list(l)) {
+ arr[n] = CAR(list_val(l));
+ ++n;
+ l = CDR(list_val(l));
+ }
+ } else {
+ n = 0;
+ arr = (Eterm *) against;
+ }
+
+ /* We are in the context of a BIF,
+ {caller} should return 'undefined' */
+ save_cp = p->cp;
+ p->cp = NULL;
+ res = erts_match_set_run(p, mps, arr, n, &ret_flags);
+ p->cp = save_cp;
+ if (is_non_value(res)) {
+ res = am_false;
+ }
+ sz = size_object(res);
+ if (ret_flags & MATCH_SET_EXCEPTION_TRACE) sz += 2;
+ if (ret_flags & MATCH_SET_RETURN_TRACE) sz += 2;
+ hp = HAlloc(p, 5 + sz);
+ res = copy_struct(res, sz, &hp, &MSO(p));
+ flg = NIL;
+ if (ret_flags & MATCH_SET_EXCEPTION_TRACE) {
+ flg = CONS(hp, am_exception_trace, flg);
+ hp += 2;
+ }
+ if (ret_flags & MATCH_SET_RETURN_TRACE) {
+ flg = CONS(hp, am_return_trace, flg);
+ hp += 2;
+ }
+ if (trace && arr != NULL) {
+ erts_free(ERTS_ALC_T_DB_TMP, arr);
+ }
+ erts_bin_free(mps);
+ ret = TUPLE4(hp, am_atom_put("ok",2), res, flg, lint_res);
+ }
+ return ret;
+}
+
+static Eterm seq_trace_fake(Process *p, Eterm arg1)
+{
+ Eterm result = seq_trace_info_1(p,arg1);
+ if (is_tuple(result) && *tuple_val(result) == 2) {
+ return (tuple_val(result))[2];
+ }
+ return result;
+}
+
+#ifdef DMC_DEBUG
+/*
+** Disassemble match program
+*/
+static void db_match_dis(Binary *bp)
+{
+ MatchProg *prog = Binary2MatchProg(bp);
+ Uint *t = prog->text;
+ Uint n;
+ Eterm p;
+ int first;
+ ErlHeapFragment *tmp;
+
+ while (t < prog->prog_end) {
+ switch (*t) {
+ case matchTryMeElse:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("TryMeElse\t%bpu\n", n);
+ break;
+ case matchArray:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("Array\t%bpu\n", n);
+ break;
+ case matchArrayBind:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("ArrayBind\t%bpu\n", n);
+ break;
+ case matchTuple:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("Tuple\t%bpu\n", n);
+ break;
+ case matchPushT:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("PushT\t%bpu\n", n);
+ break;
+ case matchPushL:
+ ++t;
+ erts_printf("PushL\n");
+ break;
+ case matchPop:
+ ++t;
+ erts_printf("Pop\n");
+ break;
+ case matchBind:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("Bind\t%bpu\n", n);
+ break;
+ case matchCmp:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("Cmp\t%bpu\n", n);
+ break;
+ case matchEqBin:
+ ++t;
+ p = (Eterm) *t;
+ ++t;
+ erts_printf("EqBin\t%p (%T)\n", t, p);
+ break;
+ case matchEqRef:
+ ++t;
+ n = thing_arityval(*t);
+ ++t;
+ erts_printf("EqRef\t(%d) {", (int) n);
+ first = 1;
+ while (n--) {
+ if (first)
+ first = 0;
+ else
+ erts_printf(", ");
+#ifdef ARCH_64
+ erts_printf("0x%016bpx", *t);
+#else
+ erts_printf("0x%08bpx", *t);
+#endif
+ ++t;
+ }
+ erts_printf("}\n");
+ break;
+ case matchEqBig:
+ ++t;
+ n = thing_arityval(*t);
+ ++t;
+ erts_printf("EqBig\t(%d) {", (int) n);
+ first = 1;
+ while (n--) {
+ if (first)
+ first = 0;
+ else
+ erts_printf(", ");
+#ifdef ARCH_64
+ erts_printf("0x%016bpx", *t);
+#else
+ erts_printf("0x%08bpx", *t);
+#endif
+ ++t;
+ }
+ erts_printf("}\n");
+ break;
+ case matchEqFloat:
+ ++t;
+ {
+ double num;
+ memcpy(&num,t, 2 * sizeof(*t));
+ t += 2;
+ erts_printf("EqFloat\t%f\n", num);
+ }
+ break;
+ case matchEq:
+ ++t;
+ p = (Eterm) *t;
+ ++t;
+ erts_printf("Eq \t%T\n", p);
+ break;
+ case matchList:
+ ++t;
+ erts_printf("List\n");
+ break;
+ case matchHalt:
+ ++t;
+ erts_printf("Halt\n");
+ break;
+ case matchSkip:
+ ++t;
+ erts_printf("Skip\n");
+ break;
+ case matchPushC:
+ ++t;
+ p = (Eterm) *t;
+ ++t;
+ erts_printf("PushC\t%T\n", p);
+ break;
+ case matchConsA:
+ ++t;
+ erts_printf("ConsA\n");
+ break;
+ case matchConsB:
+ ++t;
+ erts_printf("ConsB\n");
+ break;
+ case matchMkTuple:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("MkTuple\t%bpu\n", n);
+ break;
+ case matchOr:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("Or\t%bpu\n", n);
+ break;
+ case matchAnd:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("And\t%bpu\n", n);
+ break;
+ case matchOrElse:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("OrElse\t%bpu\n", n);
+ break;
+ case matchAndAlso:
+ ++t;
+ n = *t;
+ ++t;
+ erts_printf("AndAlso\t%bpu\n", n);
+ break;
+ case matchCall0:
+ ++t;
+ p = dmc_lookup_bif_reversed((void *) *t);
+ ++t;
+ erts_printf("Call0\t%T\n", p);
+ break;
+ case matchCall1:
+ ++t;
+ p = dmc_lookup_bif_reversed((void *) *t);
+ ++t;
+ erts_printf("Call1\t%T\n", p);
+ break;
+ case matchCall2:
+ ++t;
+ p = dmc_lookup_bif_reversed((void *) *t);
+ ++t;
+ erts_printf("Call2\t%T\n", p);
+ break;
+ case matchCall3:
+ ++t;
+ p = dmc_lookup_bif_reversed((void *) *t);
+ ++t;
+ erts_printf("Call3\t%T\n", p);
+ break;
+ case matchPushV:
+ ++t;
+ n = (Uint) *t;
+ ++t;
+ erts_printf("PushV\t%bpu\n", n);
+ break;
+ case matchTrue:
+ ++t;
+ erts_printf("True\n");
+ break;
+ case matchPushExpr:
+ ++t;
+ erts_printf("PushExpr\n");
+ break;
+ case matchPushArrayAsList:
+ ++t;
+ erts_printf("PushArrayAsList\n");
+ break;
+ case matchPushArrayAsListU:
+ ++t;
+ erts_printf("PushArrayAsListU\n");
+ break;
+ case matchSelf:
+ ++t;
+ erts_printf("Self\n");
+ break;
+ case matchWaste:
+ ++t;
+ erts_printf("Waste\n");
+ break;
+ case matchReturn:
+ ++t;
+ erts_printf("Return\n");
+ break;
+ case matchProcessDump:
+ ++t;
+ erts_printf("ProcessDump\n");
+ break;
+ case matchDisplay:
+ ++t;
+ erts_printf("Display\n");
+ break;
+ case matchIsSeqTrace:
+ ++t;
+ erts_printf("IsSeqTrace\n");
+ break;
+ case matchSetSeqToken:
+ ++t;
+ erts_printf("SetSeqToken\n");
+ break;
+ case matchSetSeqTokenFake:
+ ++t;
+ erts_printf("SetSeqTokenFake\n");
+ break;
+ case matchGetSeqToken:
+ ++t;
+ erts_printf("GetSeqToken\n");
+ break;
+ case matchSetReturnTrace:
+ ++t;
+ erts_printf("SetReturnTrace\n");
+ break;
+ case matchSetExceptionTrace:
+ ++t;
+ erts_printf("SetReturnTrace\n");
+ break;
+ case matchCatch:
+ ++t;
+ erts_printf("Catch\n");
+ break;
+ case matchEnableTrace:
+ ++t;
+ erts_printf("EnableTrace\n");
+ break;
+ case matchDisableTrace:
+ ++t;
+ erts_printf("DisableTrace\n");
+ break;
+ case matchEnableTrace2:
+ ++t;
+ erts_printf("EnableTrace2\n");
+ break;
+ case matchDisableTrace2:
+ ++t;
+ erts_printf("DisableTrace2\n");
+ break;
+ case matchTrace2:
+ ++t;
+ erts_printf("Trace2\n");
+ break;
+ case matchTrace3:
+ ++t;
+ erts_printf("Trace3\n");
+ break;
+ case matchCaller:
+ ++t;
+ erts_printf("Caller\n");
+ break;
+ default:
+ erts_printf("??? (0x%08x)\n", *t);
+ ++t;
+ break;
+ }
+ }
+ erts_printf("\n\nterm_save: {");
+ first = 1;
+ for (tmp = prog->term_save; tmp; tmp = tmp->next) {
+ if (first)
+ first = 0;
+ else
+ erts_printf(", ");
+ erts_printf("0x%08x", (unsigned long) tmp);
+ }
+ erts_printf("}\n");
+ erts_printf("num_bindings: %d\n", prog->num_bindings);
+ erts_printf("heap_size: %bpu\n", prog->heap_size);
+ erts_printf("eheap_offset: %bpu\n", prog->eheap_offset);
+ erts_printf("stack_offset: %bpu\n", prog->stack_offset);
+ erts_printf("text: 0x%08x\n", (unsigned long) prog->text);
+ erts_printf("stack_size: %d (words)\n", prog->heap_size-prog->stack_offset);
+
+}
+
+#endif /* DMC_DEBUG */
+
+
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
new file mode 100644
index 0000000000..4fc7b4f52e
--- /dev/null
+++ b/erts/emulator/beam/erl_db_util.h
@@ -0,0 +1,405 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef _DB_UTIL_H
+#define _DB_UTIL_H
+
+#include "global.h"
+#include "erl_message.h"
+
+/*#define HARDDEBUG 1*/
+
+#ifdef DEBUG
+/*
+** DMC_DEBUG does NOT need DEBUG, but DEBUG needs DMC_DEBUG
+*/
+#define DMC_DEBUG 1
+#endif
+
+/*
+ * These values can be returned from the functions performing the
+ * BIF operation for different types of tables. When the
+ * actual operations have been performed, the BIF function
+ * checks for negative returns and issues BIF_ERRORS based
+ * upon these values.
+ */
+#define DB_ERROR_NONE 0 /* No error */
+#define DB_ERROR_BADITEM -1 /* The item was malformed ie no
+ tuple or to small*/
+#define DB_ERROR_BADTABLE -2 /* The Table is inconsisitent */
+#define DB_ERROR_SYSRES -3 /* Out of system resources */
+#define DB_ERROR_BADKEY -4 /* Returned if a key that should
+ exist does not. */
+#define DB_ERROR_BADPARAM -5 /* Returned if a specified slot does
+ not exist (hash table only) or
+ the state parameter in db_match_object
+ is broken.*/
+#define DB_ERROR_UNSPEC -10 /* Unspecified error */
+
+
+/*
+ * A datatype for a database entry stored out of a process heap
+ */
+typedef struct db_term {
+ ErlOffHeap off_heap; /* Off heap data for term. */
+ Uint size; /* Size of term in "words" */
+ Eterm tpl[1]; /* Untagged "constant pointer" to top tuple */
+ /* (assumed to be first in buffer) */
+} DbTerm;
+
+/* "Assign" a value to DbTerm.tpl */
+#define DBTERM_SET_TPL(dbtermPtr,tplPtr) ASSERT((tplPtr)==(dbtermPtr->tpl))
+/* Get start of term buffer */
+#define DBTERM_BUF(dbtermPtr) ((dbtermPtr)->tpl)
+
+union db_table;
+typedef union db_table DbTable;
+
+/* Info about a database entry while it's being updated
+ * (by update_counter or update_element)
+ */
+typedef struct {
+ DbTable* tb;
+ DbTerm* dbterm;
+ void** bp; /* {Hash|Tree}DbTerm** */
+ Uint new_size;
+ int mustResize;
+ void* lck;
+} DbUpdateHandle;
+
+
+typedef struct db_table_method
+{
+ int (*db_create)(Process *p, DbTable* tb);
+ int (*db_first)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm* ret /* [out] */);
+ int (*db_next)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm key, /* [in] */
+ Eterm* ret /* [out] */);
+ int (*db_last)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm* ret /* [out] */);
+ int (*db_prev)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm key,
+ Eterm* ret);
+ int (*db_put)(DbTable* tb, /* [in out] */
+ Eterm obj,
+ int key_clash_fail); /* DB_ERROR_BADKEY if key exists */
+ int (*db_get)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm key,
+ Eterm* ret);
+ int (*db_get_element)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm key,
+ int index,
+ Eterm* ret);
+ int (*db_member)(DbTable* tb, /* [in out] */
+ Eterm key,
+ Eterm* ret);
+ int (*db_erase)(DbTable* tb, /* [in out] */
+ Eterm key,
+ Eterm* ret);
+ int (*db_erase_object)(DbTable* tb, /* [in out] */
+ Eterm obj,
+ Eterm* ret);
+ int (*db_slot)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm slot,
+ Eterm* ret);
+ int (*db_select_chunk)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm pattern,
+ Sint chunk_size,
+ int reverse,
+ Eterm* ret);
+ int (*db_select)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm pattern,
+ int reverse,
+ Eterm* ret);
+ int (*db_select_delete)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm pattern,
+ Eterm* ret);
+ int (*db_select_continue)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm continuation,
+ Eterm* ret);
+ int (*db_select_delete_continue)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm continuation,
+ Eterm* ret);
+ int (*db_select_count)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm pattern,
+ Eterm* ret);
+ int (*db_select_count_continue)(Process* p,
+ DbTable* tb, /* [in out] */
+ Eterm continuation,
+ Eterm* ret);
+
+ int (*db_delete_all_objects)(Process* p,
+ DbTable* db /* [in out] */ );
+
+ int (*db_free_table)(DbTable* db /* [in out] */ );
+ int (*db_free_table_continue)(DbTable* db); /* [in out] */
+
+ void (*db_print)(int to,
+ void* to_arg,
+ int show,
+ DbTable* tb /* [in out] */ );
+
+ void (*db_foreach_offheap)(DbTable* db, /* [in out] */
+ void (*func)(ErlOffHeap *, void *),
+ void *arg);
+ void (*db_check_table)(DbTable* tb);
+
+ /* Lookup a dbterm for updating. Return false if not found.
+ */
+ int (*db_lookup_dbterm)(DbTable*, Eterm key,
+ DbUpdateHandle* handle); /* [out] */
+
+ /* Must be called for each db_lookup_dbterm that returned true,
+ ** even if dbterm was not updated.
+ */
+ void (*db_finalize_dbterm)(DbUpdateHandle* handle);
+
+} DbTableMethod;
+
+/*
+ * This structure contains data for all different types of database
+ * tables. Note that these fields must match the same fields
+ * in the table-type specific structures.
+ * The reason it is placed here and not in db.h is that some table
+ * operations may be the same on different types of tables.
+ */
+
+typedef struct db_fixation {
+ Eterm pid;
+ Uint counter;
+ struct db_fixation *next;
+} DbFixation;
+
+
+typedef struct db_table_common {
+ erts_refc_t ref;
+ erts_refc_t fixref; /* fixation counter */
+#ifdef ERTS_SMP
+ erts_smp_rwmtx_t rwlock; /* rw lock on table */
+ erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */
+ int is_thread_safe; /* No fine locking inside table needed */
+ Uint32 type; /* table type, *read only* after creation */
+#endif
+ Eterm owner; /* Pid of the creator */
+ Eterm heir; /* Pid of the heir */
+ Eterm heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */
+ SysTimeval heir_started; /* To further identify the heir */
+ Eterm the_name; /* an atom */
+ Eterm id; /* atom | integer */
+ DbTableMethod* meth; /* table methods */
+ erts_smp_atomic_t nitems; /* Total number of items in table */
+ erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */
+ Uint megasec,sec,microsec; /* Last fixation time */
+ DbFixation* fixations; /* List of processes who have done safe_fixtable,
+ "local" fixations not included. */
+ /* All 32-bit fields */
+ Uint32 status; /* bit masks defined below */
+ int slot; /* slot index in meta_main_tab */
+ int keypos; /* defaults to 1 */
+} DbTableCommon;
+
+/* These are status bit patterns */
+#define DB_NORMAL (1 << 0)
+#define DB_PRIVATE (1 << 1)
+#define DB_PROTECTED (1 << 2)
+#define DB_PUBLIC (1 << 3)
+#define DB_BAG (1 << 4)
+#define DB_SET (1 << 5)
+/*#define DB_LHASH (1 << 6)*/
+#define DB_FINE_LOCKED (1 << 7) /* fine grained locking enabled */
+#define DB_DUPLICATE_BAG (1 << 8)
+#define DB_ORDERED_SET (1 << 9)
+#define DB_DELETE (1 << 10) /* table is being deleted */
+
+#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED)
+
+#define IS_HASH_TABLE(Status) (!!((Status) & \
+ (DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
+#define IS_TREE_TABLE(Status) (!!((Status) & \
+ DB_ORDERED_SET))
+#define NFIXED(T) (erts_refc_read(&(T)->common.fixref,0))
+#define IS_FIXED(T) (NFIXED(T) != 0)
+
+Eterm erts_ets_copy_object(Eterm, Process*);
+
+/* optimised version of copy_object (normal case? atomic object) */
+#define COPY_OBJECT(obj, p, objp) \
+ if (IS_CONST(obj)) { *(objp) = (obj); } \
+ else { *objp = erts_ets_copy_object(obj, p); }
+
+#define DB_READ (DB_PROTECTED|DB_PUBLIC)
+#define DB_WRITE DB_PUBLIC
+#define DB_INFO (DB_PROTECTED|DB_PUBLIC|DB_PRIVATE)
+
+/* tb is an DbTableCommon and obj is an Eterm (tagged) */
+#define TERM_GETKEY(tb, obj) db_getkey((tb)->common.keypos, (obj))
+
+#define ONLY_WRITER(P,T) (((T)->common.status & (DB_PRIVATE|DB_PROTECTED)) \
+ && (T)->common.owner == (P)->id)
+
+#define ONLY_READER(P,T) (((T)->common.status & DB_PRIVATE) && \
+(T)->common.owner == (P)->id)
+
+/* Function prototypes */
+Eterm db_get_trace_control_word_0(Process *p);
+Eterm db_set_trace_control_word_1(Process *p, Eterm val);
+
+void db_initialize_util(void);
+Eterm db_getkey(int keypos, Eterm obj);
+void db_free_term_data(DbTerm* p);
+void* db_get_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj);
+int db_has_variable(Eterm obj);
+int db_is_variable(Eterm obj);
+void db_do_update_element(DbUpdateHandle* handle,
+ Sint position,
+ Eterm newval);
+void db_finalize_update_element(DbUpdateHandle* handle);
+Eterm db_add_counter(Eterm** hpp, Eterm counter, Eterm incr);
+Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags);
+Binary *db_match_set_compile(Process *p, Eterm matchexpr,
+ Uint flags);
+void erts_db_match_prog_destructor(Binary *);
+
+typedef struct match_prog {
+ ErlHeapFragment *term_save; /* Only if needed, a list of message
+ buffers for off heap copies
+ (i.e. binaries)*/
+ int single_variable; /* ets:match needs to know this. */
+ int num_bindings; /* Size of heap */
+ /* The following two are only filled in when match specs
+ are used for tracing */
+ struct erl_heap_fragment *saved_program_buf;
+ Eterm saved_program;
+ Uint heap_size; /* size of: heap + eheap + stack */
+ Uint eheap_offset;
+ Uint stack_offset;
+#ifdef DMC_DEBUG
+ Uint* prog_end; /* End of program */
+#endif
+ Uint text[1]; /* Beginning of program */
+} MatchProg;
+
+/*
+ * The heap-eheap-stack block of a MatchProg is nowadays allocated
+ * when the match program is run.
+ * - heap: variable bindings
+ * - eheap: erlang heap storage
+ * - eheap: a "large enough" stack
+ */
+
+#define DMC_ERR_STR_LEN 100
+
+typedef enum { dmcWarning, dmcError} DMCErrorSeverity;
+
+typedef struct dmc_error {
+ char error_string[DMC_ERR_STR_LEN + 1]; /* printf format string
+ with %d for the variable
+ number (if applicable) */
+ int variable; /* -1 if no variable is referenced
+ in error string */
+ struct dmc_error *next;
+ DMCErrorSeverity severity; /* Error or warning */
+} DMCError;
+
+typedef struct dmc_err_info {
+ unsigned int *var_trans; /* Translations of variable names,
+ initiated to NULL
+ and free'd with sys_free if != NULL
+ after compilation */
+ int num_trans;
+ int error_added; /* indicates if the error list contains
+ any fatal errors (dmcError severity) */
+ DMCError *first; /* List of errors */
+} DMCErrInfo;
+
+/*
+** Compilation flags
+**
+** The dialect is in the 3 least significant bits and are to be interspaced by
+** by at least 2 (decimal), thats why ((Uint) 2) isn't used. This is to be
+** able to add DBIF_GUARD or DBIF BODY to it to use in the match_spec bif
+** table. The rest of the word is used like ordinary flags, one bit for each
+** flag. Note that DCOMP_TABLE and DCOMP_TRACE are mutually exclusive.
+*/
+#define DCOMP_TABLE ((Uint) 1) /* Ets and dets. The body returns a value,
+ * and the parameter to the execution is a tuple. */
+#define DCOMP_TRACE ((Uint) 4) /* Trace. More functions are allowed, and the
+ * parameter to the execution will be an array. */
+#define DCOMP_DIALECT_MASK ((Uint) 0x7) /* To mask out the bits marking
+ dialect */
+#define DCOMP_FAKE_DESTRUCTIVE ((Uint) 8) /* When this is active, no setting of
+ trace control words or seq_trace tokens will be done. */
+
+
+Binary *db_match_compile(Eterm *matchexpr, Eterm *guards,
+ Eterm *body, int num_matches,
+ Uint flags,
+ DMCErrInfo *err_info);
+/* Returns newly allocated MatchProg binary with refc == 0*/
+Eterm db_prog_match(Process *p, Binary *prog, Eterm term, int arity,
+ Uint32 *return_flags /* Zeroed on enter */);
+/* returns DB_ERROR_NONE if matches, 1 if not matches and some db error on
+ error. */
+DMCErrInfo *db_new_dmc_err_info(void);
+/* Returns allocated error info, where errors are collected for lint. */
+Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei);
+/* Formats an error info structure into a list of tuples. */
+void db_free_dmc_err_info(DMCErrInfo *ei);
+/* Completely free's an error info structure, including all recorded
+ errors */
+Eterm db_make_mp_binary(Process *p, Binary *mp, Eterm **hpp);
+/* Convert a match program to a erlang "magic" binary to be returned to userspace,
+ increments the reference counter. */
+int erts_db_is_compiled_ms(Eterm term);
+
+/*
+** Convenience when compiling into Binary structures
+*/
+#define IsMatchProgBinary(BP) \
+ (((BP)->flags & BIN_FLAG_MAGIC) \
+ && ERTS_MAGIC_BIN_DESTRUCTOR((BP)) == erts_db_match_prog_destructor)
+
+#define Binary2MatchProg(BP) \
+ (ASSERT_EXPR(IsMatchProgBinary((BP))), \
+ ((MatchProg *) ERTS_MAGIC_BIN_DATA((BP))))
+/*
+** Debugging
+*/
+#ifdef HARDDEBUG
+void db_check_tables(void); /* in db.c */
+#define CHECK_TABLES() db_check_tables()
+#else
+#define CHECK_TABLES()
+#endif
+
+#endif /* _DB_UTIL_H */
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
new file mode 100644
index 0000000000..34ce87bc5d
--- /dev/null
+++ b/erts/emulator/beam/erl_debug.c
@@ -0,0 +1,899 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "erl_nmgc.h"
+#include "big.h"
+#include "bif.h"
+#include "beam_catches.h"
+#include "erl_debug.h"
+
+#define WITHIN(ptr, x, y) ((x) <= (ptr) && (ptr) < (y))
+
+#if defined(HYBRID)
+#if defined(INCREMENTAL)
+/* Hybrid + Incremental */
+#define IN_HEAP(p, ptr) \
+ (WITHIN((ptr), p->heap, p->hend) || \
+ (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))) || \
+ WITHIN((ptr), global_heap, global_hend) || \
+ (inc_fromspc && WITHIN((ptr), inc_fromspc, inc_fromend)) || \
+ WITHIN((ptr), global_old_heap, global_old_hend))
+
+#define IN_MA(ptr) \
+ (WITHIN((ptr), global_heap, global_hend) || \
+ (inc_fromspc && WITHIN((ptr), inc_fromspc, inc_fromend)) || \
+ WITHIN((ptr), global_old_heap, global_old_hend))
+#else
+/* Hybrid */
+#define IN_HEAP(p, ptr) \
+ (WITHIN((ptr), p->heap, p->hend) || \
+ (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))) || \
+ WITHIN((ptr), global_heap, global_hend) || \
+ (global_old_heap && WITHIN((ptr),global_old_heap,global_old_hend)))
+#endif
+#else
+/* Private */
+#define IN_HEAP(p, ptr) \
+ (WITHIN((ptr), p->heap, p->hend) || \
+ (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))))
+#endif
+
+
+#ifdef __GNUC__
+/*
+ * Does not work in Microsoft C. Since this is debugging code that will
+ * hardly be used on Windows, get rid of it unless we have Gnu compiler.
+ */
+#define PTR_SIZE 2*(int)sizeof(long)
+
+static const char dashes[PTR_SIZE+3] = {
+ [0 ... PTR_SIZE+1] = '-'
+};
+#endif
+
+#if defined(DEBUG) && defined(__GNUC__)
+
+/*
+ * This file defines functions for use within a debugger like gdb
+ * and the declarations below is just to make gcc quiet.
+ */
+
+void pps(Process*, Eterm*);
+void ptd(Process*, Eterm);
+void paranoid_display(int, void*, Process*, Eterm);
+static int dcount;
+
+static int pdisplay1(int to, void *to_arg, Process* p, Eterm obj);
+
+void ptd(Process* p, Eterm x)
+{
+ pdisplay1(ERTS_PRINT_STDERR, NULL, p, x);
+ erts_putc(ERTS_PRINT_STDERR, NULL, '\n');
+}
+
+/*
+ * Paranoid version of display which doesn't crasch as easily if there
+ * are errors in the data structures.
+ */
+
+void
+paranoid_display(int to, void *to_arg, Process* p, Eterm obj)
+{
+ dcount = 100000;
+ pdisplay1(to, to_arg, p, obj);
+}
+
+static int
+pdisplay1(int to, void *to_arg, Process* p, Eterm obj)
+{
+ int i, k;
+ Eterm* nobj;
+
+ if (dcount-- <= 0)
+ return(1);
+
+ if (is_CP(obj)) {
+ erts_print(to, to_arg, "<cp/header:%0*lX",PTR_SIZE,obj);
+ return 0;
+ }
+
+ switch (tag_val_def(obj)) {
+ case NIL_DEF:
+ erts_print(to, to_arg, "[]");
+ break;
+ case ATOM_DEF:
+ erts_print(to, to_arg, "%T", obj);
+ break;
+ case SMALL_DEF:
+ erts_print(to, to_arg, "%ld", signed_val(obj));
+ break;
+
+ case BIG_DEF:
+ nobj = big_val(obj);
+ if (!IN_HEAP(p, nobj)) {
+ erts_print(to, to_arg, "#<bad big %X>#", obj);
+ return 1;
+ }
+
+ i = BIG_SIZE(nobj);
+ if (BIG_SIGN(nobj))
+ erts_print(to, to_arg, "-#integer(%d) = {", i);
+ else
+ erts_print(to, to_arg, "#integer(%d) = {", i);
+ erts_print(to, to_arg, "%d", BIG_DIGIT(nobj, 0));
+ for (k = 1; k < i; k++)
+ erts_print(to, to_arg, ",%d", BIG_DIGIT(nobj, k));
+ erts_putc(to, to_arg, '}');
+ break;
+ case REF_DEF:
+ case EXTERNAL_REF_DEF: {
+ Uint32 *ref_num;
+ erts_print(to, to_arg, "#Ref<%lu", ref_channel_no(obj));
+ ref_num = ref_numbers(obj);
+ for (i = ref_no_of_numbers(obj)-1; i >= 0; i--)
+ erts_print(to, to_arg, ",%lu", ref_num[i]);
+ erts_print(to, to_arg, ">");
+ break;
+ }
+ case PID_DEF:
+ case EXTERNAL_PID_DEF:
+ erts_print(to, to_arg, "<%lu.%lu.%lu>",
+ pid_channel_no(obj),
+ pid_number(obj),
+ pid_serial(obj));
+ break;
+ case PORT_DEF:
+ case EXTERNAL_PORT_DEF:
+ erts_print(to, to_arg, "#Port<%lu.%lu>",
+ port_channel_no(obj),
+ port_number(obj));
+ break;
+ case LIST_DEF:
+ erts_putc(to, to_arg, '[');
+ nobj = list_val(obj);
+ while (1) {
+ if (!IN_HEAP(p, nobj)) {
+ erts_print(to, to_arg, "#<bad list %X>", obj);
+ return 1;
+ }
+ if (pdisplay1(to, to_arg, p, *nobj++) != 0)
+ return(1);
+ if (is_not_list(*nobj))
+ break;
+ erts_putc(to, to_arg, ',');
+ nobj = list_val(*nobj);
+ }
+ if (is_not_nil(*nobj)) {
+ erts_putc(to, to_arg, '|');
+ if (pdisplay1(to, to_arg, p, *nobj) != 0)
+ return(1);
+ }
+ erts_putc(to, to_arg, ']');
+ break;
+ case TUPLE_DEF:
+ nobj = tuple_val(obj); /* pointer to arity */
+ i = arityval(*nobj); /* arity */
+ erts_putc(to, to_arg, '{');
+ while (i--) {
+ if (pdisplay1(to, to_arg, p, *++nobj) != 0) return(1);
+ if (i >= 1) erts_putc(to, to_arg, ',');
+ }
+ erts_putc(to, to_arg, '}');
+ break;
+ case FLOAT_DEF: {
+ FloatDef ff;
+ GET_DOUBLE(obj, ff);
+#ifdef _OSE_
+ erts_print(to, to_arg, "%e", ff.fd);
+#else
+ erts_print(to, to_arg, "%.20e", ff.fd);
+#endif
+ }
+ break;
+ case BINARY_DEF:
+ erts_print(to, to_arg, "#Bin");
+ break;
+ default:
+ erts_print(to, to_arg, "unknown object %x", obj);
+ }
+ return(0);
+}
+
+void
+pps(Process* p, Eterm* stop)
+{
+ int to = ERTS_PRINT_STDOUT;
+ void *to_arg = NULL;
+ Eterm* sp = STACK_START(p) - 1;
+
+ if (stop <= STACK_END(p)) {
+ stop = STACK_END(p) + 1;
+ }
+
+ while(sp >= stop) {
+ erts_print(to, to_arg, "%0*lx: ", PTR_SIZE, (Eterm) sp);
+ if (is_catch(*sp)) {
+ erts_print(to, to_arg, "catch %d", (Uint)catch_pc(*sp));
+ } else {
+ paranoid_display(to, to_arg, p, *sp);
+ }
+ erts_putc(to, to_arg, '\n');
+ sp--;
+ }
+}
+
+#endif /* DEBUG */
+
+static int verify_eterm(Process *p,Eterm element);
+static int verify_eterm(Process *p,Eterm element)
+{
+ Eterm *ptr;
+ ErlHeapFragment* mbuf;
+
+ switch (primary_tag(element)) {
+ case TAG_PRIMARY_LIST: ptr = list_val(element); break;
+ case TAG_PRIMARY_BOXED: ptr = boxed_val(element); break;
+ default: /* Immediate or header/cp */ return 1;
+ }
+
+ if (p) {
+ if (IN_HEAP(p, ptr))
+ return 1;
+
+ for (mbuf = p->mbuf; mbuf; mbuf = mbuf->next) {
+ if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->size)) {
+ return 1;
+ }
+ }
+ }
+#ifdef INCREMENTAL
+ else {
+ if (IN_MA(ptr))
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+void erts_check_stack(Process *p)
+{
+ Eterm *elemp;
+ Eterm *stack_start = p->heap + p->heap_sz;
+ Eterm *stack_end = p->htop;
+
+ if (p->stop > stack_start)
+ erl_exit(1,
+ "<%lu.%lu.%lu>: Stack underflow\n",
+ internal_pid_channel_no(p->id),
+ internal_pid_number(p->id),
+ internal_pid_serial(p->id));
+
+ if (p->stop < stack_end)
+ erl_exit(1,
+ "<%lu.%lu.%lu>: Stack overflow\n",
+ internal_pid_channel_no(p->id),
+ internal_pid_number(p->id),
+ internal_pid_serial(p->id));
+
+ for (elemp = p->stop; elemp < stack_start; elemp++) {
+ int in_mbuf = 0;
+ Eterm *ptr;
+ ErlHeapFragment* mbuf;
+ switch (primary_tag(*elemp)) {
+ case TAG_PRIMARY_LIST: ptr = list_val(*elemp); break;
+ case TAG_PRIMARY_BOXED: ptr = boxed_val(*elemp); break;
+ default: /* Immediate or cp */ continue;
+ }
+ if (IN_HEAP(p, ptr))
+ continue;
+ for (mbuf = p->mbuf; mbuf; mbuf = mbuf->next)
+ if (WITHIN(ptr, &mbuf->mem[0], &mbuf->mem[0] + mbuf->size)) {
+ in_mbuf = 1;
+ break;
+ }
+ if (in_mbuf)
+ continue;
+
+ erl_exit(1,
+ "<%lu.%lu.%lu>: Wild stack pointer\n",
+ internal_pid_channel_no(p->id),
+ internal_pid_number(p->id),
+ internal_pid_serial(p->id));
+ }
+
+}
+
+#if defined(CHECK_FOR_HOLES)
+static void check_memory(Eterm *start, Eterm *end);
+
+void erts_check_for_holes(Process* p)
+{
+ ErlHeapFragment* hf;
+ Eterm* start;
+
+ start = p->last_htop ? p->last_htop : HEAP_START(p);
+ check_memory(start, HEAP_TOP(p));
+ p->last_htop = HEAP_TOP(p);
+
+ for (hf = MBUF(p); hf != 0; hf = hf->next) {
+ if (hf == p->last_mbuf) {
+ break;
+ }
+ check_memory(hf->mem, hf->mem+hf->size);
+ }
+ p->last_mbuf = MBUF(p);
+}
+
+static void check_memory(Eterm *start, Eterm *end)
+{
+ Eterm *pos = start;
+
+ while (pos < end) {
+ Eterm hval = *pos++;
+
+ if (hval == ERTS_HOLE_MARKER) {
+ erts_fprintf(stderr,"%s, line %d: ERTS_HOLE_MARKER found at 0x%0*lx\n",
+ __FILE__, __LINE__,PTR_SIZE,(unsigned long)(pos-1));
+ print_untagged_memory(start,end); /* DEBUGSTUFF */
+ abort();
+ } else if (is_thing(hval)) {
+ pos += (thing_arityval(hval));
+ }
+ }
+}
+#endif
+
+#ifdef __GNUC__
+
+/*
+ * erts_check_heap and erts_check_memory will run through the heap
+ * silently if everything is ok. If there are strange (untagged) data
+ * in the heap or wild pointers, the system will be halted with an
+ * error message.
+ */
+void erts_check_heap(Process *p)
+{
+ ErlHeapFragment* bp = MBUF(p);
+
+ erts_check_memory(p,HEAP_START(p),HEAP_TOP(p));
+ if (OLD_HEAP(p) != NULL) {
+ erts_check_memory(p,OLD_HEAP(p),OLD_HTOP(p));
+ }
+
+ while (bp) {
+ erts_check_memory(p,bp->mem,bp->mem + bp->size);
+ bp = bp->next;
+ }
+}
+
+void erts_check_memory(Process *p, Eterm *start, Eterm *end)
+{
+ Eterm *pos = start;
+
+ while (pos < end) {
+ Eterm hval = *pos++;
+
+#ifdef DEBUG
+ if (hval == DEBUG_BAD_WORD) {
+ print_untagged_memory(start, end);
+ erl_exit(1, "Uninitialized HAlloc'ed memory found @ 0x%0*lx!\n",
+ PTR_SIZE,(unsigned long)(pos - 1));
+ }
+#endif
+
+ if (is_thing(hval)) {
+ pos += thing_arityval(hval);
+ continue;
+ }
+
+ if (verify_eterm(p,hval))
+ continue;
+
+ erl_exit(1, "Wild pointer found @ 0x%0*lx!\n",
+ PTR_SIZE,(unsigned long)(pos - 1));
+ }
+}
+
+void verify_process(Process *p)
+{
+#define VERIFY_AREA(name,ptr,sz) { \
+ int n = (sz); \
+ while (n--) if(!verify_eterm(p,*(ptr+n))) \
+ erl_exit(1,"Wild pointer found in " name " of %T!\n",p->id); }
+
+#define VERIFY_ETERM(name,eterm) { \
+ if(!verify_eterm(p,eterm)) \
+ erl_exit(1,"Wild pointer found in " name " of %T!\n",p->id); }
+
+
+ ErlMessage* mp = p->msg.first;
+
+ VERBOSE(DEBUG_MEMORY,("Verify process: %T...\n",p->id));
+
+ while (mp != NULL) {
+ VERIFY_ETERM("message term",ERL_MESSAGE_TERM(mp));
+ VERIFY_ETERM("message token",ERL_MESSAGE_TOKEN(mp));
+ mp = mp->next;
+ }
+
+ erts_check_stack(p);
+ erts_check_heap(p);
+
+ if (p->dictionary)
+ VERIFY_AREA("dictionary",p->dictionary->data, p->dictionary->used);
+ VERIFY_ETERM("seq trace token",p->seq_trace_token);
+ VERIFY_ETERM("group leader",p->group_leader);
+ VERIFY_ETERM("fvalue",p->fvalue);
+ VERIFY_ETERM("ftrace",p->ftrace);
+
+#ifdef HYBRID
+ VERIFY_AREA("rrma",p->rrma,p->nrr);
+#endif
+
+ VERBOSE(DEBUG_MEMORY,("...done\n"));
+
+#undef VERIFY_AREA
+#undef VERIFY_ETERM
+}
+
+void verify_everything()
+{
+#ifdef HYBRID
+ Uint i;
+ Uint n = erts_num_active_procs;
+
+#ifdef INCREMENTAL_FREE_SIZES_NEEDS_TO_BE_TAGGED_AS_HEADERS_WITH_ARITY
+ INC_Page *page = inc_used_mem;
+#endif
+
+ for (i = 0; i < n; i++) {
+ verify_process(erts_active_procs[i]);
+ }
+
+ erts_check_memory(NULL,global_heap,global_htop);
+
+#ifdef INCREMENTAL_FREE_SIZES_NEEDS_TO_BE_TAGGED_AS_HEADERS_WITH_ARITY
+ while (page)
+ {
+ Eterm *end = page + INC_PAGE_SIZE;
+ Eterm *pos = page->start;
+
+ while( pos < end) {
+ Eterm val = *pos++;
+ if(is_header(val))
+ pos += thing_arityval(val);
+ else
+ verify_eterm(NULL,val);
+ }
+ page = page->next;
+ }
+#endif
+#endif /* HYBRID */
+}
+
+/*
+ * print_untagged_memory will print the contents of given memory area.
+ */
+void print_untagged_memory(Eterm *pos, Eterm *end)
+{
+ int i = 0;
+ erts_printf("| %*s | Range: 0x%0*lx - 0x%0*lx%*s|\n",
+ PTR_SIZE, "",
+ PTR_SIZE,(unsigned long)pos,
+ PTR_SIZE,(unsigned long)(end - 1),2 * PTR_SIZE - 2,"");
+ erts_printf("| %-*s | %-*s |\n",PTR_SIZE+2,"Address",
+ 4*PTR_SIZE+11,"Contents");
+ erts_printf("|-%s-|-%s-%s-%s-%s-|\n",dashes,dashes,dashes,dashes,dashes);
+ while( pos < end ) {
+ if (i == 0)
+ erts_printf("| 0x%0*lx | ", PTR_SIZE, (unsigned long)pos);
+ erts_printf("0x%0*lx ",PTR_SIZE,(unsigned long)*pos);
+ pos++; i++;
+ if (i == 4) {
+ erts_printf("|\n");
+ i = 0;
+ }
+ }
+ while (i && i < 4) {
+ erts_printf("%*s",PTR_SIZE+3,"");
+ i++;
+ }
+ if (i != 0)
+ erts_printf("|\n");
+ erts_printf("+-%s-+-%s-%s-%s-%s-+\n",dashes,dashes,dashes,dashes,dashes);
+}
+
+/*
+ * print_tagged_memory will print contents of given memory area and
+ * display it as if it was tagged Erlang terms (which it hopefully
+ * is). This function knows about forwarding pointers to be able to
+ * print a heap during garbage collection. erts_printf("%T",val)
+ * do not know about forwarding pointers though, so it will still
+ * crash if they are encoutered...
+ */
+void print_tagged_memory(Eterm *pos, Eterm *end)
+{
+ erts_printf("+-%s-+-%s-+\n",dashes,dashes);
+ erts_printf("| 0x%0*lx - 0x%0*lx |\n",
+ PTR_SIZE,(unsigned long)pos,
+ PTR_SIZE,(unsigned long)(end - 1));
+ erts_printf("| %-*s | %-*s |\n",PTR_SIZE,"Address",PTR_SIZE,"Contents");
+ erts_printf("|-%s-|-%s-|\n",dashes,dashes);
+ while( pos < end ) {
+ Eterm val = pos[0];
+ erts_printf("| 0x%0*lx | 0x%0*lx | ",
+ PTR_SIZE,(unsigned long)pos, PTR_SIZE,(unsigned long)val);
+ ++pos;
+ if( is_arity_value(val) ) {
+ erts_printf("Arity(%lu)", arityval(val));
+ } else if( is_thing(val) ) {
+ unsigned int ari = thing_arityval(val);
+ erts_printf("Thing Arity(%u) Tag(%lu)", ari, thing_subtag(val));
+ while( ari ) {
+ erts_printf("\n| 0x%0*lx | 0x%0*lx | THING",
+ PTR_SIZE, (unsigned long)pos,
+ PTR_SIZE, (unsigned long)*pos);
+ ++pos;
+ --ari;
+ }
+ } else {
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ if (!is_header(*boxed_val(val))) {
+ erts_printf("Moved -> 0x%0*lx\n",PTR_SIZE,
+ (unsigned long)*boxed_val(val));
+ continue;
+ }
+ break;
+
+ case TAG_PRIMARY_LIST:
+ if (is_non_value(*list_val(val))) {
+ erts_printf("Moved -> 0x%0*lx\n",PTR_SIZE,
+ (unsigned long)*(list_val(val) + 1));
+ continue;
+ }
+ break;
+ }
+ erts_printf("%.30T", val);
+ }
+ erts_printf("\n");
+ }
+ erts_printf("+-%s-+-%s-+\n",dashes,dashes);
+}
+
+#ifdef HYBRID
+void print_ma_info(void)
+{
+ erts_printf("Message Area (start - top - end): "
+ "0x%0*lx - 0x%0*lx - 0x%0*lx\n",
+ PTR_SIZE, (unsigned long)global_heap,
+ PTR_SIZE, (unsigned long)global_htop,
+ PTR_SIZE, (unsigned long)global_hend);
+#ifndef INCREMENTAL
+ erts_printf(" High water: 0x%0*lx "
+ "Old gen: 0x%0*lx - 0x%0*lx - 0x%0*lx\n",
+ PTR_SIZE, (unsigned long)global_high_water,
+ PTR_SIZE, (unsigned long)global_old_heap,
+ PTR_SIZE, (unsigned long)global_old_htop,
+ PTR_SIZE, (unsigned long)global_old_hend);
+#endif
+}
+
+void print_message_area(void)
+{
+ Eterm *pos = global_heap;
+ Eterm *end = global_htop;
+
+ erts_printf("From: 0x%0*lx to 0x%0*lx\n",
+ PTR_SIZE,(unsigned long)pos,PTR_SIZE,(unsigned long)end);
+ erts_printf("(Old generation: 0x%0*lx to 0x%0*lx\n",
+ PTR_SIZE, (unsigned long)global_old_heap,
+ PTR_SIZE, (unsigned long)global_old_hend);
+ erts_printf("| %-*s | %-*s |\n",PTR_SIZE,"Address",PTR_SIZE,"Contents");
+ erts_printf("|-%s-|-%s-|\n",dashes,dashes);
+ while( pos < end ) {
+ Eterm val = pos[0];
+ erts_printf("| 0x%0*lx | 0x%0*lx | ",
+ PTR_SIZE,(unsigned long)pos,PTR_SIZE,(unsigned long)val);
+ ++pos;
+ if( is_arity_value(val) ) {
+ erts_printf("Arity(%lu)", arityval(val));
+ } else if( is_thing(val) ) {
+ unsigned int ari = thing_arityval(val);
+ erts_printf("Thing Arity(%u) Tag(%lu)", ari, thing_subtag(val));
+ while( ari ) {
+ erts_printf("\n| 0x%0*lx | 0x%0*lx | THING",
+ PTR_SIZE, (unsigned long)pos,
+ PTR_SIZE, (unsigned long)*pos);
+ ++pos;
+ --ari;
+ }
+ } else
+ erts_printf("%.30T", val);
+ erts_printf("\n");
+ }
+ erts_printf("+-%s-+-%s-+\n",dashes,dashes);
+}
+
+void check_message_area()
+{
+ Eterm *pos = global_heap;
+ Eterm *end = global_htop;
+
+ while( pos < end ) {
+ Eterm val = *pos++;
+ if(is_header(val))
+ pos += thing_arityval(val);
+ else if(!is_immed(val))
+ if ((ptr_val(val) < global_heap || ptr_val(val) >= global_htop) &&
+ (ptr_val(val) < global_old_heap ||
+ ptr_val(val) >= global_old_hend))
+ {
+ erts_printf("check_message_area: Stray pointer found\n");
+ print_message_area();
+ erts_printf("Crashing to make it look real...\n");
+ pos = 0;
+ }
+ }
+}
+#endif /* HYBRID */
+
+static void print_process_memory(Process *p);
+static void print_process_memory(Process *p)
+{
+ ErlHeapFragment* bp = MBUF(p);
+
+ erts_printf("==============================\n");
+ erts_printf("|| Memory info for %T ||\n",p->id);
+ erts_printf("==============================\n");
+
+ erts_printf("-- %-*s ---%s-%s-%s-%s--\n",
+ PTR_SIZE, "PCB", dashes, dashes, dashes, dashes);
+
+ if (p->msg.first != NULL) {
+ ErlMessage* mp;
+ erts_printf(" Message Queue:\n");
+ mp = p->msg.first;
+ while (mp != NULL) {
+ erts_printf("| 0x%0*lx | 0x%0*lx |\n",PTR_SIZE,
+ ERL_MESSAGE_TERM(mp),PTR_SIZE,ERL_MESSAGE_TOKEN(mp));
+ mp = mp->next;
+ }
+ }
+
+ if (p->dictionary != NULL) {
+ int n = p->dictionary->used;
+ Eterm *ptr = p->dictionary->data;
+ erts_printf(" Dictionary: ");
+ while (n--) erts_printf("0x%0*lx ",PTR_SIZE,(unsigned long)ptr++);
+ erts_printf("\n");
+ }
+
+ if (p->arity > 0) {
+ int n = p->arity;
+ Eterm *ptr = p->arg_reg;
+ erts_printf(" Argument Registers: ");
+ while (n--) erts_printf("0x%0*lx ",PTR_SIZE,(unsigned long)*ptr++);
+ erts_printf("\n");
+ }
+
+ erts_printf(" Trace Token: 0x%0*lx\n",PTR_SIZE,p->seq_trace_token);
+ erts_printf(" Group Leader: 0x%0*lx\n",PTR_SIZE,p->group_leader);
+ erts_printf(" Fvalue: 0x%0*lx\n",PTR_SIZE,p->fvalue);
+ erts_printf(" Ftrace: 0x%0*lx\n",PTR_SIZE,p->ftrace);
+
+#ifdef HYBRID
+ if (p->nrr > 0) {
+ int i;
+ erts_printf(" Remembered Roots:\n");
+ for (i = 0; i < p->nrr; i++)
+ if (p->rrsrc[i] != NULL)
+ erts_printf("0x%0*lx -> 0x%0*lx\n",
+ PTR_SIZE, (unsigned long)p->rrsrc[i],
+ PTR_SIZE, (unsigned long)p->rrma[i]);
+ erts_printf("\n");
+ }
+#endif
+
+ erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx %s-%s-+\n",
+ PTR_SIZE, "Stack",
+ PTR_SIZE, (unsigned long)STACK_TOP(p),
+ PTR_SIZE, (unsigned long)STACK_START(p),
+ dashes, dashes);
+ print_untagged_memory(STACK_TOP(p),STACK_START(p));
+
+ erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx 0x%0*lx 0x%0*lx +\n",
+ PTR_SIZE, "Heap",
+ PTR_SIZE, (unsigned long)HEAP_START(p),
+ PTR_SIZE, (unsigned long)HIGH_WATER(p),
+ PTR_SIZE, (unsigned long)HEAP_TOP(p),
+ PTR_SIZE, (unsigned long)HEAP_END(p));
+ print_untagged_memory(HEAP_START(p),HEAP_TOP(p));
+
+ if (OLD_HEAP(p)) {
+ erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx 0x%0*lx %s-+\n",
+ PTR_SIZE, "Old Heap",
+ PTR_SIZE, (unsigned long)OLD_HEAP(p),
+ PTR_SIZE, (unsigned long)OLD_HTOP(p),
+ PTR_SIZE, (unsigned long)OLD_HEND(p),
+ dashes);
+ print_untagged_memory(OLD_HEAP(p),OLD_HTOP(p));
+ }
+
+ if (bp)
+ erts_printf("+- %-*s -+-%s-%s-%s-%s-+\n",
+ PTR_SIZE, "heap fragments",
+ dashes, dashes, dashes, dashes);
+ while (bp) {
+ print_untagged_memory(bp->mem,bp->mem + bp->size);
+ bp = bp->next;
+ }
+}
+
+
+void print_memory(Process *p)
+{
+ if (p != NULL) {
+ print_process_memory(p);
+ }
+#ifdef HYBRID
+ else {
+ Uint i;
+ Uint n = erts_num_active_procs;
+
+ for (i = 0; i < n; i++) {
+ Process *p = erts_active_procs[i];
+ print_process_memory(p);
+ }
+
+ erts_printf("==================\n");
+ erts_printf("|| Message area ||\n");
+ erts_printf("==================\n");
+ erts_printf("+-%s-+-%s-%s-%s-%s-+\n",
+ dashes,dashes,dashes,dashes,dashes);
+ erts_printf("| %-*s | 0x%0*lx - 0x%0*lx - 0x%0*lx%*s|\n",
+ PTR_SIZE, "Young",
+ PTR_SIZE, (unsigned long)global_heap,
+ PTR_SIZE, (unsigned long)global_htop,
+ PTR_SIZE, (unsigned long)global_hend,
+ PTR_SIZE, "");
+ erts_printf("+-%s-+-%s-%s-%s-%s-+\n",
+ dashes,dashes,dashes,dashes,dashes);
+
+ print_untagged_memory(global_heap,global_htop);
+
+
+ erts_printf("+-%s-+-%s-%s-%s-%s-+\n",
+ dashes,dashes,dashes,dashes,dashes);
+ erts_printf("| %-*s | 0x%0*lx - 0x%0*lx %*s |\n",
+ PTR_SIZE, "Old",
+ PTR_SIZE, (unsigned long)global_old_heap,
+ PTR_SIZE, (unsigned long)global_old_hend,
+ 2 * PTR_SIZE, "");
+ erts_printf("+-%s-+-%s-%s-%s-%s-+\n",
+ dashes,dashes,dashes,dashes,dashes);
+
+#ifdef INCREMENTAL
+ {
+ INC_Page *page = inc_used_mem;
+ /* Genom att g� igenom fri-listan f�rst kan vi markera de
+ omr�den som inte �r allokerade och bara skriva ut de som
+ lever.
+ char markarea[INC_PAGESIZE];
+ */
+
+ while (page) {
+ Eterm *ptr = (Eterm*)page->start;
+ Eterm *end = (Eterm*)page->start + INC_PAGESIZE;
+
+ erts_printf("| %*s | This: 0x%0*lx Next: 0x%0*lx %*s|\n",
+ PTR_SIZE, "",
+ PTR_SIZE, (unsigned long)page,
+ PTR_SIZE, (unsigned long)page->next,
+ 2 * PTR_SIZE - 8, "");
+ print_untagged_memory(ptr,end);
+ page = page->next;
+ }
+ }
+
+ {
+ INC_MemBlock *this = inc_free_list;
+
+ erts_printf("-- %-*s --%s-%s-%s-%s-\n",PTR_SIZE+2,"Free list",
+ dashes,dashes,dashes,dashes);
+ while (this) {
+ erts_printf("Block @ 0x%0*lx sz: %8d prev: 0x%0*lx next: 0x%0*lx\n",
+ PTR_SIZE, (unsigned long)this,this->size,
+ PTR_SIZE, (unsigned long)this->prev,
+ PTR_SIZE, (unsigned long)this->next);
+ this = this->next;
+ }
+ erts_printf("--%s---%s-%s-%s-%s--\n",
+ dashes,dashes,dashes,dashes,dashes);
+ }
+
+ if (inc_fromspc != NULL) {
+ erts_printf("-- fromspace - 0x%0*lx 0x%0*lx "
+ "------------------------------\n",
+ PTR_SIZE, (unsigned long)inc_fromspc,
+ PTR_SIZE, (unsigned long)inc_fromend);
+ print_untagged_memory(inc_fromspc,inc_fromend);
+ }
+#endif /* INCREMENTAL */
+ }
+#endif /* HYBRID */
+}
+
+void print_memory_info(Process *p)
+{
+ if (p != NULL) {
+ erts_printf("======================================\n");
+ erts_printf("|| Memory info for %-12T ||\n",p->id);
+ erts_printf("======================================\n");
+ erts_printf("+- local heap ----%s-%s-%s-%s-+\n",
+ dashes,dashes,dashes,dashes);
+ erts_printf("| Young | 0x%0*lx - (0x%0*lx) - 0x%0*lx - 0x%0*lx |\n",
+ PTR_SIZE, (unsigned long)HEAP_START(p),
+ PTR_SIZE, (unsigned long)HIGH_WATER(p),
+ PTR_SIZE, (unsigned long)HEAP_TOP(p),
+ PTR_SIZE, (unsigned long)HEAP_END(p));
+ if (OLD_HEAP(p) != NULL)
+ erts_printf("| Old | 0x%0*lx - 0x%0*lx - 0x%0*lx %*s |\n",
+ PTR_SIZE, (unsigned long)OLD_HEAP(p),
+ PTR_SIZE, (unsigned long)OLD_HTOP(p),
+ PTR_SIZE, (unsigned long)OLD_HEND(p),
+ PTR_SIZE, "");
+ } else {
+ erts_printf("=================\n");
+ erts_printf("|| Memory info ||\n");
+ erts_printf("=================\n");
+ }
+#ifdef HYBRID
+ erts_printf("|- message area --%s-%s-%s-%s-|\n",
+ dashes,dashes,dashes,dashes);
+ erts_printf("| Young | 0x%0*lx - 0x%0*lx - 0x%0*lx %*s |\n",
+ PTR_SIZE, (unsigned long)global_heap,
+ PTR_SIZE, (unsigned long)global_htop,
+ PTR_SIZE, (unsigned long)global_hend,
+ PTR_SIZE, "");
+ erts_printf("| Old | 0x%0*lx - 0x%0*lx %*s |\n",
+ PTR_SIZE, (unsigned long)global_old_heap,
+ PTR_SIZE, (unsigned long)global_old_hend,
+ 2 * PTR_SIZE, "");
+#endif
+#ifdef INCREMENTAL
+ if (inc_fromspc != NULL)
+ erts_printf("| Frmsp | 0x%0*lx - 0x%0*lx %*s |\n",
+ PTR_SIZE, (unsigned long)inc_fromspc,
+ PTR_SIZE, (unsigned long)inc_fromend,
+ 2 * PTR_SIZE, "");
+#endif
+ erts_printf("+-----------------%s-%s-%s-%s-+\n",dashes,dashes,dashes,dashes);
+}
+#endif
+
diff --git a/erts/emulator/beam/erl_debug.h b/erts/emulator/beam/erl_debug.h
new file mode 100644
index 0000000000..74f4a00b63
--- /dev/null
+++ b/erts/emulator/beam/erl_debug.h
@@ -0,0 +1,102 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifndef _ERL_DEBUG_H_
+#define _ERL_DEBUG_H_
+
+#ifdef DEBUG
+
+#ifdef HIPE
+#include "hipe_debug.h"
+#endif
+
+/* Heap areas will be filled with this value when they are deallocated
+ * after a garbage collection. This value used to be 0xff, but that is
+ * an immediate and might not crash the system if it is encountered.
+ * The value is now 0x01, the cons of death.
+ */
+#define DEBUG_BAD_BYTE 0x01
+#define DEBUG_BAD_WORD 0x01010101
+
+/*
+ * VERBOSE. Use the -v option to enable the different categories.
+ */
+#define VERBOSE(flag, format) (flag & verbose ? erts_printf format : 0)
+
+#define DEBUG_DEFAULT 0x0000 /* No flags are set per default */
+#define DEBUG_SYSTEM 0x0001 /* Misc system info at startup and end */
+#define DEBUG_PRIVATE_GC 0x0002 /* GC of private heaps */
+#define DEBUG_HYBRID_GC 0x0004 /* GC of the message area */
+#define DEBUG_ALLOCATION 0x0008 /* HAlloc. To find holes in the heap */
+#define DEBUG_MESSAGES 0x0010 /* Message passing */
+#define DEBUG_THREADS 0x0020 /* Thread-related stuff */
+#define DEBUG_PROCESSES 0x0040 /* Process creation and removal */
+#define DEBUG_MEMORY 0x0080 /* Display results of memory checks */
+
+extern Uint32 verbose;
+
+void upp(byte*, int);
+void pat(Eterm);
+void pinfo(void);
+void pp(Process*);
+void ppi(Eterm);
+void pba(Process*, int);
+void td(Eterm);
+void ps(Process*, Eterm*);
+
+#undef ERTS_OFFHEAP_DEBUG
+#define ERTS_OFFHEAP_DEBUG
+
+#else /* Non-debug mode */
+
+#define VERBOSE(flag,format)
+
+#endif /* DEBUG */
+
+#ifdef ERTS_OFFHEAP_DEBUG
+#define ERTS_CHK_OFFHEAP(P) erts_check_off_heap((P))
+#define ERTS_CHK_OFFHEAP2(P, HT) erts_check_off_heap2((P), (HT))
+void erts_check_off_heap(Process *);
+void erts_check_off_heap2(Process *, Eterm *);
+#else
+#define ERTS_CHK_OFFHEAP(P)
+#define ERTS_CHK_OFFHEAP2(P, HT)
+#endif
+
+/*
+ * These functions can be handy when developing, and perhaps useful
+ * even outside debugging.
+ */
+extern void erts_check_off_heap(Process *p);
+extern void erts_check_stack(Process *p);
+extern void erts_check_heap(Process *p);
+extern void erts_check_memory(Process *p, Eterm *start, Eterm *end);
+extern void verify_process(Process *p);
+extern void verify_everything(void);
+extern void print_tagged_memory(Eterm *start, Eterm *end);
+extern void print_untagged_memory(Eterm *start, Eterm *end);
+extern void print_memory(Process *p);
+extern void print_memory_info(Process *p);
+
+#ifdef HYBRID
+extern void print_ma_info(void);
+extern void print_message_area(void);
+extern void check_message_area(void);
+#endif
+
+#endif /* _ERL_DEBUG_H_ */
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
new file mode 100644
index 0000000000..cdb584b282
--- /dev/null
+++ b/erts/emulator/beam/erl_driver.h
@@ -0,0 +1,626 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Include file for erlang driver writers.
+ */
+
+#ifndef __ERL_DRIVER_H__
+#define __ERL_DRIVER_H__
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef SIZEOF_CHAR
+# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
+# undef SIZEOF_CHAR
+#endif
+#ifdef SIZEOF_SHORT
+# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
+# undef SIZEOF_SHORT
+#endif
+#ifdef SIZEOF_INT
+# define SIZEOF_INT_SAVED__ SIZEOF_INT
+# undef SIZEOF_INT
+#endif
+#ifdef SIZEOF_LONG
+# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
+# undef SIZEOF_LONG
+#endif
+#ifdef SIZEOF_LONG_LONG
+# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
+# undef SIZEOF_LONG_LONG
+#endif
+#include "erl_int_sizes_config.h"
+#if defined(SIZEOF_CHAR_SAVED__) && SIZEOF_CHAR_SAVED__ != SIZEOF_CHAR
+# error SIZEOF_CHAR mismatch
+#endif
+#if defined(SIZEOF_SHORT_SAVED__) && SIZEOF_SHORT_SAVED__ != SIZEOF_SHORT
+# error SIZEOF_SHORT mismatch
+#endif
+#if defined(SIZEOF_INT_SAVED__) && SIZEOF_INT_SAVED__ != SIZEOF_INT
+# error SIZEOF_INT mismatch
+#endif
+#if defined(SIZEOF_LONG_SAVED__) && SIZEOF_LONG_SAVED__ != SIZEOF_LONG
+# error SIZEOF_LONG mismatch
+#endif
+#if defined(SIZEOF_LONG_LONG_SAVED__) && SIZEOF_LONG_LONG_SAVED__ != SIZEOF_LONG_LONG
+# error SIZEOF_LONG_LONG mismatch
+#endif
+
+#include <stdlib.h>
+
+#if defined(VXWORKS)
+# include <ioLib.h>
+typedef struct iovec SysIOVec;
+#elif defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)
+#ifndef STATIC_ERLANG_DRIVER
+ /* Windows dynamic drivers, everything is different... */
+#define ERL_DRIVER_TYPES_ONLY
+#define WIN32_DYNAMIC_ERL_DRIVER
+#endif
+/*
+ * This structure can be cast to a WSABUF structure.
+ */
+typedef struct _SysIOVec {
+ unsigned long iov_len;
+ char* iov_base;
+} SysIOVec;
+#else /* Unix */
+# ifdef HAVE_SYS_UIO_H
+# include <sys/types.h>
+# include <sys/uio.h>
+typedef struct iovec SysIOVec;
+# else
+typedef struct {
+ char* iov_base;
+ size_t iov_len;
+} SysIOVec;
+# endif
+#endif
+
+#ifndef EXTERN
+# ifdef __cplusplus
+# define EXTERN extern "C"
+# else
+# define EXTERN extern
+# endif
+#endif
+
+/* Values for mode arg to driver_select() */
+#define ERL_DRV_READ (1 << 0)
+#define ERL_DRV_WRITE (1 << 1)
+#define ERL_DRV_USE (1 << 2)
+#define ERL_DRV_USE_NO_CALLBACK (ERL_DRV_USE | (1 << 3))
+
+/* Old deprecated */
+#define DO_READ ERL_DRV_READ
+#define DO_WRITE ERL_DRV_WRITE
+
+#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
+#define ERL_DRV_EXTENDED_MAJOR_VERSION 1
+#define ERL_DRV_EXTENDED_MINOR_VERSION 4
+
+/*
+ * The emulator will refuse to load a driver with different major
+ * version than the one used by the emulator.
+ */
+
+
+/* Values for set_port_control_flags() */
+
+#define PORT_CONTROL_FLAG_BINARY (1 << 0)
+#define PORT_CONTROL_FLAG_HEAVY (1 << 1)
+
+/* Values for get_port_flags() */
+
+#define PORT_FLAG_BINARY (1 << 0)
+#define PORT_FLAG_LINE (1 << 1)
+
+
+#define ERL_DRV_FLAG_USE_PORT_LOCKING (1 << 0)
+#define ERL_DRV_FLAG_SOFT_BUSY (1 << 1)
+
+/*
+ * A binary as seen in a driver. Note that a binary should never be
+ * altered by the driver when it has been sent to Erlang.
+ */
+
+typedef struct erl_drv_binary {
+ long orig_size; /* total length of binary */
+ char orig_bytes[1]; /* the data (char instead of byte!) */
+} ErlDrvBinary;
+
+
+/*
+ * Note: These types are incomplete to catch type errors easier.
+ */
+
+typedef struct _erl_drv_data* ErlDrvData; /* Data to be used by the driver itself. */
+#ifndef ERL_SYS_DRV
+typedef struct _erl_drv_event* ErlDrvEvent; /* An event to be selected on. */
+typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */
+#endif
+typedef struct _erl_drv_port* ErlDrvThreadData; /* Thread data. */
+
+#if !defined(__WIN32__) && !defined(_WIN32) && !defined(_WIN32_) && !defined(USE_SELECT)
+struct erl_drv_event_data {
+ short events;
+ short revents;
+};
+#endif
+typedef struct erl_drv_event_data *ErlDrvEventData; /* Event data */
+
+/*
+ * Used in monitors...
+ */
+typedef unsigned long ErlDrvTermData;
+typedef unsigned long ErlDrvUInt;
+typedef signed long ErlDrvSInt;
+
+#if defined(__WIN32__)
+typedef unsigned __int64 ErlDrvUInt64;
+typedef __int64 ErlDrvSInt64;
+#elif SIZEOF_LONG == 8
+typedef unsigned long ErlDrvUInt64;
+typedef long ErlDrvSInt64;
+#elif SIZEOF_LONG_LONG == 8
+typedef unsigned long long ErlDrvUInt64;
+typedef long long ErlDrvSInt64;
+#else
+#error No 64-bit integer type
+#endif
+
+/*
+ * A driver monitor
+ */
+typedef struct {
+ unsigned char data[sizeof(void *)*4];
+} ErlDrvMonitor;
+
+
+/*
+ * System info
+ */
+
+typedef struct {
+ int driver_major_version;
+ int driver_minor_version;
+ char *erts_version;
+ char *otp_release;
+ int thread_support;
+ int smp_support;
+ int async_threads;
+ int scheduler_threads;
+} ErlDrvSysInfo;
+
+typedef struct {
+ unsigned long megasecs;
+ unsigned long secs;
+ unsigned long microsecs;
+} ErlDrvNowData;
+
+/*
+ * Error codes that can be return from driver.
+ */
+
+/*
+ * Exception code from open_port/2 will be {'EXIT',{einval,Where}}.
+ */
+#define ERL_DRV_ERROR_GENERAL ((ErlDrvData) -1)
+
+/*
+ * Exception code from open_port/2 will be {'EXIT',{Errno,Where}},
+ * where Errno is a textual representation of the errno variable
+ * (e.g. eacces if errno is EACCES).
+ */
+#define ERL_DRV_ERROR_ERRNO ((ErlDrvData) -2)
+
+/*
+ * Exception code from open_port/2 will be {'EXIT',{badarg,Where}}.
+ */
+#define ERL_DRV_ERROR_BADARG ((ErlDrvData) -3)
+
+typedef struct erl_io_vec {
+ int vsize; /* length of vectors */
+ int size; /* total size in bytes */
+ SysIOVec* iov;
+ ErlDrvBinary** binv;
+} ErlIOVec;
+
+/*
+ * erl driver thread types
+ */
+
+typedef struct ErlDrvTid_ *ErlDrvTid;
+typedef struct ErlDrvMutex_ ErlDrvMutex;
+typedef struct ErlDrvCond_ ErlDrvCond;
+typedef struct ErlDrvRWLock_ ErlDrvRWLock;
+typedef int ErlDrvTSDKey;
+
+typedef struct {
+ int suggested_stack_size;
+} ErlDrvThreadOpts;
+
+/*
+ *
+ */
+typedef struct erl_drv_port_data_lock * ErlDrvPDL;
+
+/*
+ * This structure defines a driver.
+ */
+
+typedef struct erl_drv_entry {
+ int (*init)(void); /* called at system start up for statically
+ linked drivers, and after loading for
+ dynamically loaded drivers */
+
+#ifndef ERL_SYS_DRV
+ ErlDrvData (*start)(ErlDrvPort port, char *command);
+ /* called when open_port/2 is invoked.
+ return value -1 means failure. */
+#else
+ ErlDrvData (*start)(ErlDrvPort port, char *command, SysDriverOpts* opts);
+ /* special options, only for system driver */
+#endif
+ void (*stop)(ErlDrvData drv_data);
+ /* called when port is closed, and when the
+ emulator is halted. */
+ void (*output)(ErlDrvData drv_data, char *buf, int len);
+ /* called when we have output from erlang to
+ the port */
+ void (*ready_input)(ErlDrvData drv_data, ErlDrvEvent event);
+ /* called when we have input from one of
+ the driver's handles) */
+ void (*ready_output)(ErlDrvData drv_data, ErlDrvEvent event);
+ /* called when output is possible to one of
+ the driver's handles */
+ char *driver_name; /* name supplied as command
+ in open_port XXX ? */
+ void (*finish)(void); /* called before unloading the driver -
+ DYNAMIC DRIVERS ONLY */
+ void *handle; /* Reserved -- Used by emulator internally */
+ int (*control)(ErlDrvData drv_data, unsigned int command, char *buf,
+ int len, char **rbuf, int rlen);
+ /* "ioctl" for drivers - invoked by
+ port_control/3) */
+ void (*timeout)(ErlDrvData drv_data); /* Handling of timeout in driver */
+ void (*outputv)(ErlDrvData drv_data, ErlIOVec *ev);
+ /* called when we have output from erlang
+ to the port */
+ void (*ready_async)(ErlDrvData drv_data, ErlDrvThreadData thread_data);
+ void (*flush)(ErlDrvData drv_data);
+ /* called when the port is about to be
+ closed, and there is data in the
+ driver queue that needs to be flushed
+ before 'stop' can be called */
+ int (*call)(ErlDrvData drv_data, unsigned int command, char *buf,
+ int len, char **rbuf, int rlen, unsigned int *flags);
+ /* Works mostly like 'control', a syncronous
+ call into the driver. */
+ void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
+ ErlDrvEventData event_data);
+ /* Called when an event selected by
+ driver_event() has occurred */
+ int extended_marker; /* ERL_DRV_EXTENDED_MARKER */
+ int major_version; /* ERL_DRV_EXTENDED_MAJOR_VERSION */
+ int minor_version; /* ERL_DRV_EXTENDED_MINOR_VERSION */
+ int driver_flags; /* ERL_DRV_FLAGs */
+ void *handle2; /* Reserved -- Used by emulator internally */
+ void (*process_exit)(ErlDrvData drv_data, ErlDrvMonitor *monitor);
+ /* Called when a process monitor fires */
+ void (*stop_select)(ErlDrvEvent event, void* reserved);
+ /* Called on behalf of driver_select when
+ it is safe to release 'event'. A typical
+ unix driver would call close(event) */
+ /* When adding entries here, dont forget to pad in obsolete/driver.h */
+} ErlDrvEntry;
+
+/*
+ * This macro is used to name a dynamic driver's init function in
+ * a way that doesn't lead to conflicts. This is crucial when using
+ * operating systems that has one namespace for all symbols
+ * (e.g. VxWorks). Example: if you have an dynamic driver C source
+ * file named echo_drv.c, you use the macro like this:
+ *
+ * DRIVER_INIT(echo_drv)
+ * {
+ * ....
+ * }
+ *
+ * This function will be called by the Erlang I/O system when the driver is loaded.
+ * It must initialize a ErlDrvEntry structure and return a pointer to it.
+ */
+
+/* For windows dynamic drivers */
+#ifndef ERL_DRIVER_TYPES_ONLY
+
+#if defined(VXWORKS)
+# define DRIVER_INIT(DRIVER_NAME) ErlDrvEntry* DRIVER_NAME ## _init(void)
+#elif defined(__WIN32__)
+# define DRIVER_INIT(DRIVER_NAME) __declspec(dllexport) ErlDrvEntry* driver_init(void)
+#else
+# define DRIVER_INIT(DRIVER_NAME) ErlDrvEntry* driver_init(void)
+#endif
+
+/*
+ * These are the functions available for driver writers.
+ */
+EXTERN int driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on);
+EXTERN int driver_event(ErlDrvPort port, ErlDrvEvent event,
+ ErlDrvEventData event_data);
+EXTERN int driver_output(ErlDrvPort port, char *buf, int len);
+EXTERN int driver_output2(ErlDrvPort port, char *hbuf, int hlen,
+ char *buf, int len);
+EXTERN int driver_output_binary(ErlDrvPort port, char *hbuf, int hlen,
+ ErlDrvBinary* bin, int offset, int len);
+EXTERN int driver_outputv(ErlDrvPort port, char* hbuf, int hlen, ErlIOVec *ev,
+ int skip);
+EXTERN int driver_vec_to_buf(ErlIOVec *ev, char *buf, int len);
+EXTERN int driver_set_timer(ErlDrvPort port, unsigned long time);
+EXTERN int driver_cancel_timer(ErlDrvPort port);
+EXTERN int driver_read_timer(ErlDrvPort port, unsigned long *time_left);
+
+/*
+ * Get plain-text error message from within a driver
+ */
+EXTERN char* erl_errno_id(int error);
+
+/*
+ * The following functions are used to initiate a close of a port
+ * from a driver.
+ */
+EXTERN int driver_failure_eof(ErlDrvPort port);
+EXTERN int driver_failure_atom(ErlDrvPort port, char *string);
+EXTERN int driver_failure_posix(ErlDrvPort port, int error);
+EXTERN int driver_failure(ErlDrvPort port, int error);
+EXTERN int driver_exit (ErlDrvPort port, int err);
+
+
+/*
+ * Port Data Lock
+ */
+
+EXTERN ErlDrvPDL driver_pdl_create(ErlDrvPort);
+EXTERN void driver_pdl_lock(ErlDrvPDL);
+EXTERN void driver_pdl_unlock(ErlDrvPDL);
+EXTERN long driver_pdl_get_refc(ErlDrvPDL);
+EXTERN long driver_pdl_inc_refc(ErlDrvPDL);
+EXTERN long driver_pdl_dec_refc(ErlDrvPDL);
+
+/*
+ * Process monitors
+ */
+EXTERN int
+driver_monitor_process(ErlDrvPort port, ErlDrvTermData process,
+ ErlDrvMonitor *monitor);
+EXTERN int
+driver_demonitor_process(ErlDrvPort port, const ErlDrvMonitor *monitor);
+EXTERN ErlDrvTermData
+driver_get_monitored_process(ErlDrvPort port, const ErlDrvMonitor *monitor);
+EXTERN int driver_compare_monitors(const ErlDrvMonitor *monitor1,
+ const ErlDrvMonitor *monitor2);
+
+/*
+ * Port attributes
+ */
+EXTERN void set_busy_port(ErlDrvPort port, int on);
+EXTERN void set_port_control_flags(ErlDrvPort port, int flags);
+
+EXTERN int get_port_flags(ErlDrvPort port);
+
+
+/* Binary interface */
+
+/*
+ * NOTE: DO NOT overwrite a binary with new data (if the data is delivered);
+ * since the binary is a shared object it MUST be written once.
+ */
+
+EXTERN ErlDrvBinary* driver_alloc_binary(int size);
+EXTERN ErlDrvBinary* driver_realloc_binary(ErlDrvBinary *bin, int size);
+EXTERN void driver_free_binary(ErlDrvBinary *bin);
+
+/* Referenc count on driver binaries */
+EXTERN long driver_binary_get_refc(ErlDrvBinary *dbp);
+EXTERN long driver_binary_inc_refc(ErlDrvBinary *dbp);
+EXTERN long driver_binary_dec_refc(ErlDrvBinary *dbp);
+
+/* Allocation interface */
+EXTERN void *driver_alloc(size_t size);
+EXTERN void *driver_realloc(void *ptr, size_t size);
+EXTERN void driver_free(void *ptr);
+
+/* Queue interface */
+EXTERN int driver_enq(ErlDrvPort port, char* buf, int len);
+EXTERN int driver_pushq(ErlDrvPort port, char* buf, int len);
+EXTERN int driver_deq(ErlDrvPort port, int size);
+EXTERN int driver_sizeq(ErlDrvPort port);
+EXTERN int driver_enq_bin(ErlDrvPort port, ErlDrvBinary *bin, int offset,
+ int len);
+EXTERN int driver_pushq_bin(ErlDrvPort port, ErlDrvBinary *bin, int offset,
+ int len);
+
+EXTERN int driver_peekqv(ErlDrvPort port, ErlIOVec *ev);
+EXTERN SysIOVec* driver_peekq(ErlDrvPort port, int *vlen);
+EXTERN int driver_enqv(ErlDrvPort port, ErlIOVec *ev, int skip);
+EXTERN int driver_pushqv(ErlDrvPort port, ErlIOVec *ev, int skip);
+
+/*
+ * Add and remove driver entries.
+ */
+EXTERN void add_driver_entry(ErlDrvEntry *de);
+EXTERN int remove_driver_entry(ErlDrvEntry *de);
+
+/*
+ * System info
+ */
+EXTERN void driver_system_info(ErlDrvSysInfo *sip, size_t si_size);
+
+/*
+ * erl driver thread functions.
+ */
+
+EXTERN ErlDrvMutex *erl_drv_mutex_create(char *name);
+EXTERN void erl_drv_mutex_destroy(ErlDrvMutex *mtx);
+EXTERN int erl_drv_mutex_trylock(ErlDrvMutex *mtx);
+EXTERN void erl_drv_mutex_lock(ErlDrvMutex *mtx);
+EXTERN void erl_drv_mutex_unlock(ErlDrvMutex *mtx);
+EXTERN ErlDrvCond *erl_drv_cond_create(char *name);
+EXTERN void erl_drv_cond_destroy(ErlDrvCond *cnd);
+EXTERN void erl_drv_cond_signal(ErlDrvCond *cnd);
+EXTERN void erl_drv_cond_broadcast(ErlDrvCond *cnd);
+EXTERN void erl_drv_cond_wait(ErlDrvCond *cnd, ErlDrvMutex *mtx);
+EXTERN ErlDrvRWLock *erl_drv_rwlock_create(char *name);
+EXTERN void erl_drv_rwlock_destroy(ErlDrvRWLock *rwlck);
+EXTERN int erl_drv_rwlock_tryrlock(ErlDrvRWLock *rwlck);
+EXTERN void erl_drv_rwlock_rlock(ErlDrvRWLock *rwlck);
+EXTERN void erl_drv_rwlock_runlock(ErlDrvRWLock *rwlck);
+EXTERN int erl_drv_rwlock_tryrwlock(ErlDrvRWLock *rwlck);
+EXTERN void erl_drv_rwlock_rwlock(ErlDrvRWLock *rwlck);
+EXTERN void erl_drv_rwlock_rwunlock(ErlDrvRWLock *rwlck);
+EXTERN int erl_drv_tsd_key_create(char *name, ErlDrvTSDKey *key);
+EXTERN void erl_drv_tsd_key_destroy(ErlDrvTSDKey key);
+EXTERN void erl_drv_tsd_set(ErlDrvTSDKey key, void *data);
+EXTERN void *erl_drv_tsd_get(ErlDrvTSDKey key);
+EXTERN ErlDrvThreadOpts *erl_drv_thread_opts_create(char *name);
+EXTERN void erl_drv_thread_opts_destroy(ErlDrvThreadOpts *opts);
+EXTERN int erl_drv_thread_create(char *name,
+ ErlDrvTid *tid,
+ void * (*func)(void *),
+ void *args,
+ ErlDrvThreadOpts *opts);
+EXTERN ErlDrvTid erl_drv_thread_self(void);
+EXTERN int erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2);
+EXTERN void erl_drv_thread_exit(void *resp);
+EXTERN int erl_drv_thread_join(ErlDrvTid, void **respp);
+
+/*
+ * Misc.
+ */
+EXTERN int null_func(void);
+
+#endif /* !ERL_DRIVER_TYPES_ONLY */
+
+/* Constants for return flags from the 'port_call' callback */
+#define DRIVER_CALL_KEEP_BUFFER 0x1
+
+/* ErlDrvTerm is the type to use for casts when building
+ * terms that should be sent to connected process,
+ * for instance a tuple on the form {tcp, Port, [Tag|Binary]}
+ *
+ * ErlDrvTerm spec[] = {
+ * ERL_DRV_ATOM, driver_mk_atom("tcp"),
+ * ERL_DRV_PORT, driver_mk_port(drv->ix),
+ * ERL_DRV_INT, REPLY_TAG,
+ * ERL_DRV_BINARY, (ErlDrvTerm)bin, 50, 0,
+ * ERL_DRV_LIST, 2,
+ * ERL_DRV_TUPLE, 3,
+ * }
+ *
+ */
+
+#define TERM_DATA(x) ((ErlDrvTermData) (x))
+
+/* Possible types to send from driver Argument type */
+#define ERL_DRV_NIL ((ErlDrvTermData) 1) /* None */
+#define ERL_DRV_ATOM ((ErlDrvTermData) 2) /* driver_mk_atom(string) */
+#define ERL_DRV_INT ((ErlDrvTermData) 3) /* ErlDrvSInt */
+#define ERL_DRV_PORT ((ErlDrvTermData) 4) /* driver_mk_port(ix) */
+#define ERL_DRV_BINARY ((ErlDrvTermData) 5) /* ErlDrvBinary*,
+ * ErlDrvUInt size,
+ * ErlDrvUInt offs */
+#define ERL_DRV_STRING ((ErlDrvTermData) 6) /* char*, ErlDrvUInt */
+#define ERL_DRV_TUPLE ((ErlDrvTermData) 7) /* ErlDrvUInt */
+#define ERL_DRV_LIST ((ErlDrvTermData) 8) /* ErlDrvUInt */
+#define ERL_DRV_STRING_CONS ((ErlDrvTermData) 9) /* char*, ErlDrvUInt */
+#define ERL_DRV_PID ((ErlDrvTermData) 10) /* driver_connected,... */
+
+#define ERL_DRV_FLOAT ((ErlDrvTermData) 11) /* double * */
+#define ERL_DRV_EXT2TERM ((ErlDrvTermData) 12) /* char *, ErlDrvUInt */
+#define ERL_DRV_UINT ((ErlDrvTermData) 13) /* ErlDrvUInt */
+#define ERL_DRV_BUF2BINARY ((ErlDrvTermData) 14) /* char *, ErlDrvUInt */
+#define ERL_DRV_INT64 ((ErlDrvTermData) 15) /* ErlDrvSInt64 * */
+#define ERL_DRV_UINT64 ((ErlDrvTermData) 16) /* ErlDrvUInt64 * */
+
+#ifndef ERL_DRIVER_TYPES_ONLY
+
+/* make terms for driver_output_term and driver_send_term */
+EXTERN ErlDrvTermData driver_mk_atom(char*);
+EXTERN ErlDrvTermData driver_mk_port(ErlDrvPort);
+EXTERN ErlDrvTermData driver_connected(ErlDrvPort);
+EXTERN ErlDrvTermData driver_caller(ErlDrvPort);
+extern const ErlDrvTermData driver_term_nil;
+EXTERN ErlDrvTermData driver_mk_term_nil(void);
+EXTERN ErlDrvPort driver_create_port(ErlDrvPort creator_port,
+ ErlDrvTermData connected, /* pid */
+ char* name, /* driver name */
+ ErlDrvData drv_data);
+
+
+/* output term data to the port owner */
+EXTERN int driver_output_term(ErlDrvPort ix, ErlDrvTermData* data, int len);
+/* output term data to a specific process */
+EXTERN int driver_send_term(ErlDrvPort ix, ErlDrvTermData to,
+ ErlDrvTermData* data, int len);
+
+/* Async IO functions */
+EXTERN long driver_async(ErlDrvPort ix,
+ unsigned int* key,
+ void (*async_invoke)(void*),
+ void* async_data,
+ void (*async_free)(void*));
+
+
+EXTERN int driver_async_cancel(unsigned int key);
+
+/* Locks the driver in the machine "forever", there is
+ no unlock function. Note that this is almost never useful, as an open
+ port towards the driver locks it until the port is closed, why unexpected
+ unloading "never" happens. */
+EXTERN int driver_lock_driver(ErlDrvPort ix);
+
+/* Get the current 'now' timestamp (analogue to erlang:now()) */
+EXTERN int driver_get_now(ErlDrvNowData *now);
+
+
+/* These were removed from the ANSI version, now they're back. */
+
+EXTERN void *driver_dl_open(char *);
+EXTERN void *driver_dl_sym(void *, char *);
+EXTERN int driver_dl_close(void *);
+EXTERN char *driver_dl_error(void);
+
+/* environment */
+EXTERN int erl_drv_putenv(char *key, char *value);
+EXTERN int erl_drv_getenv(char *key, char *value, size_t *value_size);
+
+#endif /* !ERL_DRIVER_TYPES_ONLY */
+
+#ifdef WIN32_DYNAMIC_ERL_DRIVER
+# include "erl_win_dyn_driver.h"
+#endif
+
+#endif
+
+
+
+
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
new file mode 100644
index 0000000000..50d8c25c46
--- /dev/null
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -0,0 +1,706 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2007-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "global.h"
+#include <string.h>
+
+#define ERL_DRV_THR_OPTS_SIZE(LAST_FIELD) \
+ (((size_t) &((ErlDrvThreadOpts *) 0)->LAST_FIELD) \
+ + sizeof(((ErlDrvThreadOpts *) 0)->LAST_FIELD))
+
+static void
+fatal_error(int err, char *func)
+{
+ char *estr = strerror(err);
+ if (!estr) {
+ if (err == ENOTSUP)
+ estr = "Not supported";
+ else
+ estr = "Unknown error";
+ }
+ erl_exit(ERTS_ABORT_EXIT, "Fatal error in %s: %s [%d]\n", func, estr, err);
+}
+
+#define ERL_DRV_TSD_KEYS_INC 10
+#define ERL_DRV_TSD_EXTRA 10
+#define ERL_DRV_INVALID_TSD_KEY INT_MAX
+
+#ifdef USE_THREADS
+
+struct ErlDrvMutex_ {
+ ethr_mutex mtx;
+ char *name;
+};
+
+struct ErlDrvCond_ {
+ ethr_cond cnd;
+ char *name;
+};
+
+struct ErlDrvRWLock_ {
+ ethr_rwmutex rwmtx;
+ char *name;
+};
+
+struct ErlDrvTid_ {
+ ethr_tid tid;
+ void* (*func)(void*);
+ void* arg;
+ int drv_thr;
+ Uint tsd_len;
+ void **tsd;
+ char *name;
+};
+
+static ethr_tsd_key tid_key;
+
+static ethr_thr_opts def_ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
+
+#else /* USE_THREADS */
+static Uint tsd_len;
+static void **tsd;
+#endif
+
+static ErlDrvTSDKey next_tsd_key;
+static ErlDrvTSDKey max_used_tsd_key;
+static ErlDrvTSDKey used_tsd_keys_len;
+static char **used_tsd_keys;
+static erts_mtx_t tsd_mtx;
+static char *no_name;
+
+#ifdef USE_THREADS
+
+static void
+thread_exit_handler(void)
+{
+ struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
+ if (dtid) {
+ if (dtid->tsd)
+ erts_free(ERTS_ALC_T_DRV_TSD, dtid->tsd);
+ if (!dtid->drv_thr)
+ erts_free(ERTS_ALC_T_DRV_TID, dtid);
+ }
+}
+
+static void *
+erl_drv_thread_wrapper(void *vdtid)
+{
+ int res;
+ struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) vdtid;
+ res = ethr_tsd_set(tid_key, vdtid);
+ if (res != 0)
+ fatal_error(res, "erl_drv_thread_wrapper()");
+ return (*dtid->func)(dtid->arg);
+}
+
+#endif
+
+void erl_drv_thr_init(void)
+{
+ int i;
+#ifdef USE_THREADS
+ int res = ethr_tsd_key_create(&tid_key);
+ if (res == 0)
+ res = ethr_install_exit_handler(thread_exit_handler);
+ if (res != 0)
+ fatal_error(res, "erl_drv_thr_init()");
+#else
+ tsd_len = 0;
+ tsd = NULL;
+#endif
+
+ no_name = "unknown";
+ next_tsd_key = 0;
+ max_used_tsd_key = -1;
+ used_tsd_keys_len = ERL_DRV_TSD_KEYS_INC;
+ used_tsd_keys = erts_alloc(ERTS_ALC_T_DRV_TSD,
+ sizeof(char *)*ERL_DRV_TSD_KEYS_INC);
+ for (i = 0; i < ERL_DRV_TSD_KEYS_INC; i++)
+ used_tsd_keys[i] = NULL;
+ erts_mtx_init(&tsd_mtx, "drv_tsd");
+}
+
+/*
+ * These functions implement the driver thread interface in erl_driver.h.
+ * NOTE: Only use this interface from drivers. From within the emulator use
+ * either the erl_threads.h, the erl_smp.h or the ethread.h interface.
+ */
+
+ErlDrvMutex *
+erl_drv_mutex_create(char *name)
+{
+#ifdef USE_THREADS
+ ErlDrvMutex *dmtx = erts_alloc_fnf(ERTS_ALC_T_DRV_MTX,
+ (sizeof(ErlDrvMutex)
+ + (name ? sys_strlen(name) + 1 : 0)));
+ if (dmtx) {
+ if (ethr_mutex_init(&dmtx->mtx) != 0) {
+ erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
+ dmtx = NULL;
+ }
+ else if (!name)
+ dmtx->name = no_name;
+ else {
+ dmtx->name = ((char *) dmtx) + sizeof(ErlDrvMutex);
+ sys_strcpy(dmtx->name, name);
+ }
+ }
+ return dmtx;
+#else
+ return (ErlDrvMutex *) NULL;
+#endif
+}
+
+void
+erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
+{
+#ifdef USE_THREADS
+ int res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_mutex_destroy()");
+ erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
+#endif
+}
+
+int
+erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
+{
+#ifdef USE_THREADS
+ int res = dmtx ? ethr_mutex_trylock(&dmtx->mtx) : EINVAL;
+ if (res != 0 && res != EBUSY)
+ fatal_error(res, "erl_drv_mutex_trylock()");
+ return res;
+#else
+ return 0;
+#endif
+}
+
+void
+erl_drv_mutex_lock(ErlDrvMutex *dmtx)
+{
+#ifdef USE_THREADS
+ int res = dmtx ? ethr_mutex_lock(&dmtx->mtx) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_mutex_lock()");
+#endif
+}
+
+void
+erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
+{
+#ifdef USE_THREADS
+ int res = dmtx ? ethr_mutex_unlock(&dmtx->mtx) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_mutex_unlock()");
+#endif
+}
+
+ErlDrvCond *
+erl_drv_cond_create(char *name)
+{
+#ifdef USE_THREADS
+ ErlDrvCond *dcnd = erts_alloc_fnf(ERTS_ALC_T_DRV_CND,
+ (sizeof(ErlDrvCond)
+ + (name ? sys_strlen(name) + 1 : 0)));
+ if (dcnd) {
+ if (ethr_cond_init(&dcnd->cnd) != 0) {
+ erts_free(ERTS_ALC_T_DRV_CND, (void *) dcnd);
+ dcnd = NULL;
+ }
+ else if (!name)
+ dcnd->name = no_name;
+ else {
+ dcnd->name = ((char *) dcnd) + sizeof(ErlDrvCond);
+ sys_strcpy(dcnd->name, name);
+ }
+ }
+ return dcnd;
+#else
+ return (ErlDrvCond *) NULL;
+#endif
+}
+
+void
+erl_drv_cond_destroy(ErlDrvCond *dcnd)
+{
+#ifdef USE_THREADS
+ int res = dcnd ? ethr_cond_destroy(&dcnd->cnd) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_cond_destroy()");
+ erts_free(ERTS_ALC_T_DRV_CND, (void *) dcnd);
+#endif
+}
+
+
+void
+erl_drv_cond_signal(ErlDrvCond *dcnd)
+{
+#ifdef USE_THREADS
+ int res = dcnd ? ethr_cond_signal(&dcnd->cnd) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_cond_signal()");
+#endif
+}
+
+void
+erl_drv_cond_broadcast(ErlDrvCond *dcnd)
+{
+#ifdef USE_THREADS
+ int res = dcnd ? ethr_cond_broadcast(&dcnd->cnd) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_cond_broadcast()");
+#endif
+}
+
+
+void
+erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
+{
+#ifdef USE_THREADS
+ int res;
+ if (!dcnd || !dmtx) {
+ res = EINVAL;
+ error:
+ fatal_error(res, "erl_drv_cond_wait()");
+ }
+ while (1) {
+ res = ethr_cond_wait(&dcnd->cnd, &dmtx->mtx);
+ if (res == 0)
+ break;
+ if (res != EINTR)
+ goto error;
+ }
+#endif
+}
+
+ErlDrvRWLock *
+erl_drv_rwlock_create(char *name)
+{
+#ifdef USE_THREADS
+ ErlDrvRWLock *drwlck = erts_alloc_fnf(ERTS_ALC_T_DRV_RWLCK,
+ (sizeof(ErlDrvRWLock)
+ + (name ? sys_strlen(name) + 1 : 0)));
+ if (drwlck) {
+ if (ethr_rwmutex_init(&drwlck->rwmtx) != 0) {
+ erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
+ drwlck = NULL;
+ }
+ else if (!name)
+ drwlck->name = no_name;
+ else {
+ drwlck->name = ((char *) drwlck) + sizeof(ErlDrvRWLock);
+ sys_strcpy(drwlck->name, name);
+ }
+ }
+ return drwlck;
+#else
+ return (ErlDrvRWLock *) NULL;
+#endif
+}
+
+void
+erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
+{
+#ifdef USE_THREADS
+ int res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_rwlock_destroy()");
+ erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
+#endif
+}
+
+int
+erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
+{
+#ifdef USE_THREADS
+ int res = drwlck ? ethr_rwmutex_tryrlock(&drwlck->rwmtx) : EINVAL;
+ if (res != 0 && res != EBUSY)
+ fatal_error(res, "erl_drv_rwlock_tryrlock()");
+ return res;
+#else
+ return 0;
+#endif
+}
+
+void
+erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
+{
+#ifdef USE_THREADS
+ int res = drwlck ? ethr_rwmutex_rlock(&drwlck->rwmtx) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_rwlock_rlock()");
+#endif
+}
+
+void
+erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
+{
+#ifdef USE_THREADS
+ int res = drwlck ? ethr_rwmutex_runlock(&drwlck->rwmtx) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_rwlock_runlock()");
+#endif
+}
+
+int
+erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
+{
+#ifdef USE_THREADS
+ int res = drwlck ? ethr_rwmutex_tryrwlock(&drwlck->rwmtx) : EINVAL;
+ if (res != 0 && res != EBUSY)
+ fatal_error(res, "erl_drv_rwlock_tryrwlock()");
+ return res;
+#else
+ return 0;
+#endif
+}
+
+void
+erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
+{
+#ifdef USE_THREADS
+ int res = drwlck ? ethr_rwmutex_rwlock(&drwlck->rwmtx) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_rwlock_rwlock()");
+#endif
+}
+
+void
+erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
+{
+#ifdef USE_THREADS
+ int res = drwlck ? ethr_rwmutex_rwunlock(&drwlck->rwmtx) : EINVAL;
+ if (res != 0)
+ fatal_error(res, "erl_drv_rwlock_rwunlock()");
+#endif
+}
+
+int
+erl_drv_tsd_key_create(char *name, ErlDrvTSDKey *key)
+{
+ char *name_copy;
+ Uint old_used_tsd_keys_len;
+ ErlDrvTSDKey res;
+
+ if (!key)
+ fatal_error(EINVAL, "erl_drv_tsd_key_create()");
+
+ if (!name)
+ name_copy = no_name;
+ else {
+ name_copy = erts_alloc_fnf(ERTS_ALC_T_DRV_TSD,
+ sizeof(char)*(strlen(name) + 1));
+ if (!name_copy) {
+ *key = -1;
+ return ENOMEM;
+ }
+ sys_strcpy(name_copy, name);
+ }
+
+ erts_mtx_lock(&tsd_mtx);
+
+ *key = next_tsd_key;
+
+ if (next_tsd_key < 0)
+ res = ENOMEM;
+ else {
+ res = 0;
+
+ ASSERT(!used_tsd_keys[next_tsd_key]);
+ used_tsd_keys[next_tsd_key] = name_copy;
+
+ if (max_used_tsd_key < next_tsd_key)
+ max_used_tsd_key = next_tsd_key;
+
+ if (max_used_tsd_key + 1 >= used_tsd_keys_len) {
+ int i;
+ old_used_tsd_keys_len = used_tsd_keys_len;
+ if (used_tsd_keys_len + ERL_DRV_TSD_KEYS_INC >= INT_MAX)
+ next_tsd_key = -1;
+ else {
+ char **new_used_tsd_keys;
+ used_tsd_keys_len += ERL_DRV_TSD_KEYS_INC;
+ new_used_tsd_keys = erts_realloc_fnf(ERTS_ALC_T_DRV_TSD,
+ used_tsd_keys,
+ (sizeof(char *)
+ * used_tsd_keys_len));
+ if (!new_used_tsd_keys)
+ next_tsd_key = -1;
+ else {
+ used_tsd_keys = new_used_tsd_keys;
+ for (i = old_used_tsd_keys_len; i < used_tsd_keys_len; i++)
+ used_tsd_keys[i] = NULL;
+ }
+ }
+ }
+
+ if (next_tsd_key >= 0) {
+ do {
+ next_tsd_key++;
+ } while (used_tsd_keys[next_tsd_key]);
+ }
+ ASSERT(next_tsd_key < used_tsd_keys_len);
+ }
+
+ erts_mtx_unlock(&tsd_mtx);
+
+ return res;
+}
+
+void
+erl_drv_tsd_key_destroy(ErlDrvTSDKey key)
+{
+ erts_mtx_lock(&tsd_mtx);
+
+ if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
+ fatal_error(EINVAL, "erl_drv_tsd_key_destroy()");
+
+ if (used_tsd_keys[key] != no_name)
+ erts_free(ERTS_ALC_T_DRV_TSD, used_tsd_keys[key]);
+
+ used_tsd_keys[key] = NULL;
+ if (next_tsd_key < 0 || key < next_tsd_key)
+ next_tsd_key = key;
+
+ erts_mtx_unlock(&tsd_mtx);
+}
+
+
+#ifdef USE_THREADS
+#define ERL_DRV_TSD__ (dtid->tsd)
+#define ERL_DRV_TSD_LEN__ (dtid->tsd_len)
+#else
+#define ERL_DRV_TSD__ (tsd)
+#define ERL_DRV_TSD_LEN__ (tsd_len)
+#endif
+
+void
+erl_drv_tsd_set(ErlDrvTSDKey key, void *data)
+{
+#ifdef USE_THREADS
+ struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) erl_drv_thread_self();
+#endif
+
+ if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
+ fatal_error(EINVAL, "erl_drv_tsd_set()");
+
+ if (!ERL_DRV_TSD__) {
+ ErlDrvTSDKey i;
+ ERL_DRV_TSD_LEN__ = key + ERL_DRV_TSD_EXTRA;
+ ERL_DRV_TSD__ = erts_alloc(ERTS_ALC_T_DRV_TSD,
+ sizeof(void *)*ERL_DRV_TSD_LEN__);
+ for (i = 0; i < ERL_DRV_TSD_LEN__; i++)
+ ERL_DRV_TSD__[i] = NULL;
+ }
+ else if (ERL_DRV_TSD_LEN__ <= key) {
+ ErlDrvTSDKey i = ERL_DRV_TSD_LEN__;
+ ERL_DRV_TSD_LEN__ = key + ERL_DRV_TSD_EXTRA;
+ ERL_DRV_TSD__ = erts_realloc(ERTS_ALC_T_DRV_TSD,
+ ERL_DRV_TSD__,
+ sizeof(void *)*ERL_DRV_TSD_LEN__);
+ for (; i < ERL_DRV_TSD_LEN__; i++)
+ ERL_DRV_TSD__[i] = NULL;
+ }
+ ERL_DRV_TSD__[key] = data;
+}
+
+void *
+erl_drv_tsd_get(ErlDrvTSDKey key)
+{
+#ifdef USE_THREADS
+ struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
+#endif
+ if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
+ fatal_error(EINVAL, "erl_drv_tsd_get()");
+#ifdef USE_THREADS
+ if (!dtid)
+ return NULL;
+#endif
+ if (ERL_DRV_TSD_LEN__ < key)
+ return NULL;
+ return ERL_DRV_TSD__[key];
+}
+
+#undef ERL_DRV_TSD_LEN__
+#undef ERL_DRV_TSD__
+
+ErlDrvThreadOpts *
+erl_drv_thread_opts_create(char *name)
+{
+ ErlDrvThreadOpts *opts = erts_alloc_fnf(ERTS_ALC_T_DRV_THR_OPTS,
+ sizeof(ErlDrvThreadOpts));
+ if (!opts)
+ return NULL;
+ opts->suggested_stack_size = -1;
+ return opts;
+}
+
+void
+erl_drv_thread_opts_destroy(ErlDrvThreadOpts *opts)
+{
+ if (!opts)
+ fatal_error(EINVAL, "erl_drv_thread_opts_destroy()");
+ erts_free(ERTS_ALC_T_DRV_THR_OPTS, opts);
+}
+
+int
+erl_drv_thread_create(char *name,
+ ErlDrvTid *tid,
+ void* (*func)(void*),
+ void* arg,
+ ErlDrvThreadOpts *opts)
+{
+#ifdef USE_THREADS
+ int res;
+ struct ErlDrvTid_ *dtid;
+ ethr_thr_opts ethr_opts;
+ ethr_thr_opts *use_opts;
+
+ if (!opts)
+ use_opts = NULL;
+ else {
+ sys_memcpy((void *) &ethr_opts,
+ (void *) &def_ethr_opts,
+ sizeof(ethr_thr_opts));
+ ethr_opts.suggested_stack_size = opts->suggested_stack_size;
+ use_opts = &ethr_opts;
+ }
+
+ dtid = erts_alloc_fnf(ERTS_ALC_T_DRV_TID,
+ (sizeof(struct ErlDrvTid_)
+ + (name ? sys_strlen(name) + 1 : 0)));
+ if (!dtid)
+ return ENOMEM;
+
+ dtid->drv_thr = 1;
+ dtid->func = func;
+ dtid->arg = arg;
+ dtid->tsd = NULL;
+ dtid->tsd_len = 0;
+ if (!name)
+ dtid->name = no_name;
+ else {
+ dtid->name = ((char *) dtid) + sizeof(struct ErlDrvTid_);
+ sys_strcpy(dtid->name, name);
+ }
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ res = erts_lcnt_thr_create(&dtid->tid, erl_drv_thread_wrapper, dtid, use_opts);
+#else
+ res = ethr_thr_create(&dtid->tid, erl_drv_thread_wrapper, dtid, use_opts);
+#endif
+
+ if (res != 0) {
+ erts_free(ERTS_ALC_T_DRV_TID, dtid);
+ return res;
+ }
+
+ *tid = (ErlDrvTid) dtid;
+ return 0;
+#else
+ return ENOTSUP;
+#endif
+}
+
+ErlDrvTid
+erl_drv_thread_self(void)
+{
+#ifdef USE_THREADS
+ struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
+ if (!dtid) {
+ int res;
+ /* This is a thread not spawned by this interface. thread_exit_handler()
+ will clean it up when it terminates. */
+ dtid = erts_alloc(ERTS_ALC_T_DRV_TID, sizeof(struct ErlDrvTid_));
+ dtid->drv_thr = 0; /* Not a driver thread */
+ dtid->tid = ethr_self();
+ dtid->func = NULL;
+ dtid->arg = NULL;
+ dtid->tsd = NULL;
+ dtid->tsd_len = 0;
+ dtid->name = no_name;
+ res = ethr_tsd_set(tid_key, (void *) dtid);
+ if (res != 0)
+ fatal_error(res, "erl_drv_thread_self()");
+ }
+ return (ErlDrvTid) dtid;
+#else
+ return (ErlDrvTid) NULL;
+#endif
+}
+
+int
+erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2)
+{
+#ifdef USE_THREADS
+ int res;
+ struct ErlDrvTid_ *dtid1 = (struct ErlDrvTid_ *) tid1;
+ struct ErlDrvTid_ *dtid2 = (struct ErlDrvTid_ *) tid2;
+ if (!dtid1 || !dtid2)
+ fatal_error(EINVAL, "erl_drv_equal_tids()");
+
+ res = dtid1 == dtid2;
+
+ ASSERT(res
+ ? ethr_equal_tids(dtid1->tid, dtid2->tid)
+ : !ethr_equal_tids(dtid1->tid, dtid2->tid));
+
+ return res;
+#else
+ return 1;
+#endif
+}
+
+void
+erl_drv_thread_exit(void *res)
+{
+#ifdef USE_THREADS
+ struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
+ if (dtid && dtid->drv_thr) {
+ ethr_thr_exit(res);
+ fatal_error(0, "erl_drv_thread_exit()");
+ }
+#endif
+ fatal_error(EACCES, "erl_drv_thread_exit()");
+}
+
+int
+erl_drv_thread_join(ErlDrvTid tid, void **respp)
+{
+#ifdef USE_THREADS
+ int res;
+ struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid;
+
+ ASSERT(dtid);
+
+ if (!dtid->drv_thr)
+ return EINVAL;
+
+ res = ethr_thr_join(dtid->tid, respp);
+ if (res == 0)
+ erts_free(ERTS_ALC_T_DRV_TID, dtid);
+ return res;
+#else
+ return ENOTSUP;
+#endif
+}
+
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
new file mode 100644
index 0000000000..79e844b315
--- /dev/null
+++ b/erts/emulator/beam/erl_fun.c
@@ -0,0 +1,315 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_fun.h"
+#include "hash.h"
+
+static Hash erts_fun_table;
+
+#include "erl_smp.h"
+
+static erts_smp_rwmtx_t erts_fun_table_lock;
+
+#define erts_fun_read_lock() erts_smp_rwmtx_rlock(&erts_fun_table_lock)
+#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock)
+#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock)
+#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock)
+#define erts_fun_init_lock() erts_smp_rwmtx_init(&erts_fun_table_lock, \
+ "fun_tab")
+
+static HashValue fun_hash(ErlFunEntry* obj);
+static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2);
+static ErlFunEntry* fun_alloc(ErlFunEntry* template);
+static void fun_free(ErlFunEntry* obj);
+
+/*
+ * The address field of every fun that has no loaded code will point
+ * to unloaded_fun[]. The -1 in unloaded_fun[0] will be interpreted
+ * as an illegal arity when attempting to call a fun.
+ */
+static Eterm unloaded_fun_code[3] = {NIL, -1, 0};
+static Eterm* unloaded_fun = unloaded_fun_code + 2;
+
+void
+erts_init_fun_table(void)
+{
+ HashFunctions f;
+
+ erts_fun_init_lock();
+ f.hash = (H_FUN) fun_hash;
+ f.cmp = (HCMP_FUN) fun_cmp;
+ f.alloc = (HALLOC_FUN) fun_alloc;
+ f.free = (HFREE_FUN) fun_free;
+
+ hash_init(ERTS_ALC_T_FUN_TABLE, &erts_fun_table, "fun_table", 16, f);
+}
+
+void
+erts_fun_info(int to, void *to_arg)
+{
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ erts_fun_read_lock();
+ hash_info(to, to_arg, &erts_fun_table);
+ if (lock)
+ erts_fun_read_unlock();
+}
+
+int erts_fun_table_sz(void)
+{
+ int sz;
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ erts_fun_read_lock();
+ sz = hash_table_sz(&erts_fun_table);
+ if (lock)
+ erts_fun_read_unlock();
+ return sz;
+}
+
+ErlFunEntry*
+erts_put_fun_entry(Eterm mod, int uniq, int index)
+{
+ ErlFunEntry template;
+ ErlFunEntry* fe;
+ long refc;
+ ASSERT(is_atom(mod));
+ template.old_uniq = uniq;
+ template.old_index = index;
+ template.module = mod;
+ erts_fun_write_lock();
+ fe = (ErlFunEntry *) hash_put(&erts_fun_table, (void*) &template);
+ sys_memset(fe->uniq, 0, sizeof(fe->uniq));
+ fe->index = 0;
+ refc = erts_refc_inctest(&fe->refc, 0);
+ if (refc < 2) /* New or pending delete */
+ erts_refc_inc(&fe->refc, 1);
+ erts_fun_write_unlock();
+ return fe;
+}
+
+ErlFunEntry*
+erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
+ byte* uniq, int index, int arity)
+{
+ ErlFunEntry template;
+ ErlFunEntry* fe;
+ long refc;
+
+ ASSERT(is_atom(mod));
+ template.old_uniq = old_uniq;
+ template.old_index = old_index;
+ template.module = mod;
+ erts_fun_write_lock();
+ fe = (ErlFunEntry *) hash_put(&erts_fun_table, (void*) &template);
+ sys_memcpy(fe->uniq, uniq, sizeof(fe->uniq));
+ fe->index = index;
+ fe->arity = arity;
+ refc = erts_refc_inctest(&fe->refc, 0);
+ if (refc < 2) /* New or pending delete */
+ erts_refc_inc(&fe->refc, 1);
+ erts_fun_write_unlock();
+ return fe;
+}
+
+struct my_key {
+ Eterm mod;
+ byte* uniq;
+ int index;
+ ErlFunEntry* fe;
+};
+
+ErlFunEntry*
+erts_get_fun_entry(Eterm mod, int uniq, int index)
+{
+ ErlFunEntry template;
+ ErlFunEntry *ret;
+
+ ASSERT(is_atom(mod));
+ template.old_uniq = uniq;
+ template.old_index = index;
+ template.module = mod;
+ erts_fun_read_lock();
+ ret = (ErlFunEntry *) hash_get(&erts_fun_table, (void*) &template);
+ if (ret) {
+ long refc = erts_refc_inctest(&ret->refc, 1);
+ if (refc < 2) /* Pending delete */
+ erts_refc_inc(&ret->refc, 1);
+ }
+ erts_fun_read_unlock();
+ return ret;
+}
+
+static void
+erts_erase_fun_entry_unlocked(ErlFunEntry* fe)
+{
+ hash_erase(&erts_fun_table, (void *) fe);
+}
+
+void
+erts_erase_fun_entry(ErlFunEntry* fe)
+{
+ erts_fun_write_lock();
+#ifdef ERTS_SMP
+ /*
+ * We have to check refc again since someone might have looked up
+ * the fun entry and incremented refc after last check.
+ */
+ if (erts_refc_dectest(&fe->refc, -1) <= 0)
+#endif
+ {
+ if (fe->address != unloaded_fun)
+ erl_exit(1,
+ "Internal error: "
+ "Invalid reference count found on #Fun<%T.%d.%d>: "
+ " About to erase fun still referred by code.\n",
+ fe->module, fe->old_index, fe->old_uniq);
+ erts_erase_fun_entry_unlocked(fe);
+ }
+ erts_fun_write_unlock();
+}
+
+#ifndef HYBRID /* FIND ME! */
+void
+erts_cleanup_funs(ErlFunThing* funp)
+{
+ while (funp) {
+ ErlFunEntry* fe = funp->fe;
+ if (erts_refc_dectest(&fe->refc, 0) == 0) {
+ erts_erase_fun_entry(fe);
+ }
+ funp = funp->next;
+ }
+}
+#endif
+
+void
+erts_cleanup_funs_on_purge(Eterm* start, Eterm* end)
+{
+ int limit;
+ HashBucket** bucket;
+ ErlFunEntry* to_delete = NULL;
+ int i;
+
+ erts_fun_write_lock();
+ limit = erts_fun_table.size;
+ bucket = erts_fun_table.bucket;
+ for (i = 0; i < limit; i++) {
+ HashBucket* b = bucket[i];
+
+ while (b) {
+ ErlFunEntry* fe = (ErlFunEntry *) b;
+ Eterm* addr = fe->address;
+
+ if (start <= addr && addr < end) {
+ fe->address = unloaded_fun;
+ if (erts_refc_dectest(&fe->refc, 0) == 0) {
+ fe->address = (void *) to_delete;
+ to_delete = fe;
+ }
+ }
+ b = b->next;
+ }
+ }
+
+ while (to_delete != NULL) {
+ ErlFunEntry* next = (ErlFunEntry *) to_delete->address;
+ erts_erase_fun_entry_unlocked(to_delete);
+ to_delete = next;
+ }
+ erts_fun_write_unlock();
+}
+
+void
+erts_dump_fun_entries(int to, void *to_arg)
+{
+ int limit;
+ HashBucket** bucket;
+ int i;
+ int lock = !ERTS_IS_CRASH_DUMPING;
+
+
+ if (lock)
+ erts_fun_read_lock();
+ limit = erts_fun_table.size;
+ bucket = erts_fun_table.bucket;
+ for (i = 0; i < limit; i++) {
+ HashBucket* b = bucket[i];
+
+ while (b) {
+ ErlFunEntry* fe = (ErlFunEntry *) b;
+ erts_print(to, to_arg, "=fun\n");
+ erts_print(to, to_arg, "Module: %T\n", fe->module);
+ erts_print(to, to_arg, "Uniq: %d\n", fe->old_uniq);
+ erts_print(to, to_arg, "Index: %d\n",fe->old_index);
+ erts_print(to, to_arg, "Address: %p\n", fe->address);
+#ifdef HIPE
+ erts_print(to, to_arg, "Native_address: %p\n", fe->native_address);
+#endif
+ erts_print(to, to_arg, "Refc: %d\n", erts_refc_read(&fe->refc, 1));
+ b = b->next;
+ }
+ }
+ if (lock)
+ erts_fun_read_unlock();
+}
+
+static HashValue
+fun_hash(ErlFunEntry* obj)
+{
+ return (HashValue) (obj->old_uniq ^ obj->old_index ^ atom_val(obj->module));
+}
+
+static int
+fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2)
+{
+ return !(obj1->module == obj2->module &&
+ obj1->old_uniq == obj2->old_uniq &&
+ obj1->old_index == obj2->old_index);
+}
+
+static ErlFunEntry*
+fun_alloc(ErlFunEntry* template)
+{
+ ErlFunEntry* obj = (ErlFunEntry *) erts_alloc(ERTS_ALC_T_FUN_ENTRY,
+ sizeof(ErlFunEntry));
+
+ obj->old_uniq = template->old_uniq;
+ obj->old_index = template->old_index;
+ obj->module = template->module;
+ erts_refc_init(&obj->refc, -1);
+ obj->address = unloaded_fun;
+#ifdef HIPE
+ obj->native_address = NULL;
+#endif
+ return obj;
+}
+
+static void
+fun_free(ErlFunEntry* obj)
+{
+ erts_free(ERTS_ALC_T_FUN_ENTRY, (void *) obj);
+}
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
new file mode 100644
index 0000000000..fb5e75649b
--- /dev/null
+++ b/erts/emulator/beam/erl_fun.h
@@ -0,0 +1,92 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ERLFUNTABLE_H__
+#define __ERLFUNTABLE_H__
+
+#include "erl_smp.h"
+
+/*
+ * Fun entry.
+ */
+
+typedef struct erl_fun_entry {
+ HashBucket bucket; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
+
+ byte uniq[16]; /* MD5 for module. */
+ int index; /* New style index. */
+ int old_uniq; /* Unique number (old_style) */
+ int old_index; /* Old style index */
+ Eterm* address; /* Pointer to code for fun */
+
+#ifdef HIPE
+ Eterm* native_address; /* Native entry code for fun. */
+#endif
+
+ Uint arity; /* The arity of the fun. */
+ Eterm module; /* Tagged atom for module. */
+ erts_refc_t refc; /* Reference count: One for code + one for each
+ fun object in each process. */
+} ErlFunEntry;
+
+/*
+ * This structure represents a 'fun' (lambda). It is stored on
+ * process heaps. It has variable size depending on the size
+ * of the environment.
+ */
+
+typedef struct erl_fun_thing {
+ Eterm thing_word; /* Subtag FUN_SUBTAG. */
+#ifndef HYBRID /* FIND ME! */
+ struct erl_fun_thing* next; /* Next fun in mso list. */
+#endif
+ ErlFunEntry* fe; /* Pointer to fun entry. */
+#ifdef HIPE
+ Eterm* native_address; /* Native code for the fun. */
+#endif
+ Uint arity; /* The arity of the fun. */
+ Uint num_free; /* Number of free variables (in env). */
+ /* -- The following may be compound Erlang terms ---------------------- */
+ Eterm creator; /* Pid of creator process (contains node). */
+ Eterm env[1]; /* Environment (free variables). */
+} ErlFunThing;
+
+/* ERL_FUN_SIZE does _not_ include space for the environment */
+#define ERL_FUN_SIZE ((sizeof(ErlFunThing)/sizeof(Eterm))-1)
+
+void erts_init_fun_table(void);
+void erts_fun_info(int, void *);
+int erts_fun_table_sz(void);
+
+ErlFunEntry* erts_put_fun_entry(Eterm mod, int uniq, int index);
+ErlFunEntry* erts_get_fun_entry(Eterm mod, int uniq, int index);
+
+ErlFunEntry* erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
+ byte* uniq, int index, int arity);
+ErlFunEntry* erts_get_fun_entry2(Eterm mod, int old_uniq, int old_index,
+ byte* uniq, int index, int arity);
+
+void erts_erase_fun_entry(ErlFunEntry* fe);
+#ifndef HYBRID /* FIND ME! */
+void erts_cleanup_funs(ErlFunThing* funp);
+#endif
+void erts_cleanup_funs_on_purge(Eterm* start, Eterm* end);
+void erts_dump_fun_entries(int, void *);
+
+#endif
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
new file mode 100644
index 0000000000..6945317e65
--- /dev/null
+++ b/erts/emulator/beam/erl_gc.c
@@ -0,0 +1,2690 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "erl_db.h"
+#include "beam_catches.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+#include "error.h"
+#include "big.h"
+#include "erl_gc.h"
+#if HIPE
+#include "hipe_stack.h"
+#endif
+
+#define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1
+#define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20
+#define ERTS_INACT_WR_PB_LEAVE_LIMIT 10
+#define ERTS_INACT_WR_PB_LEAVE_PERCENTAGE 10
+
+/*
+ * Returns number of elements in an array.
+ */
+#define ALENGTH(a) (sizeof(a)/sizeof(a[0]))
+
+static erts_smp_spinlock_t info_lck;
+static Uint garbage_cols; /* no of garbage collections */
+static Uint reclaimed; /* no of words reclaimed in GCs */
+
+# define STACK_SZ_ON_HEAP(p) ((p)->hend - (p)->stop)
+# define OverRunCheck(P) \
+ if ((P)->stop < (P)->htop) { \
+ erts_fprintf(stderr, "hend=%p\n", (p)->hend); \
+ erts_fprintf(stderr, "stop=%p\n", (p)->stop); \
+ erts_fprintf(stderr, "htop=%p\n", (p)->htop); \
+ erts_fprintf(stderr, "heap=%p\n", (p)->heap); \
+ erl_exit(ERTS_ABORT_EXIT, "%s, line %d: %T: Overrun stack and heap\n", \
+ __FILE__,__LINE__,(P)->id); \
+ }
+
+#ifdef DEBUG
+#define ErtsGcQuickSanityCheck(P) \
+do { \
+ ASSERT((P)->heap < (P)->hend); \
+ ASSERT((P)->heap_sz == (P)->hend - (P)->heap); \
+ ASSERT((P)->heap <= (P)->htop && (P)->htop <= (P)->hend); \
+ ASSERT((P)->heap <= (P)->stop && (P)->stop <= (P)->hend); \
+ ASSERT((P)->heap <= (P)->high_water && (P)->high_water <= (P)->hend);\
+ OverRunCheck((P)); \
+} while (0)
+#else
+#define ErtsGcQuickSanityCheck(P) \
+do { \
+ OverRunCheck((P)); \
+} while (0)
+#endif
+/*
+ * This structure describes the rootset for the GC.
+ */
+typedef struct roots {
+ Eterm* v; /* Pointers to vectors with terms to GC
+ * (e.g. the stack).
+ */
+ Uint sz; /* Size of each vector. */
+} Roots;
+
+typedef struct {
+ Roots def[32]; /* Default storage. */
+ Roots* roots; /* Pointer to root set array. */
+ Uint size; /* Storage size. */
+ int num_roots; /* Number of root arrays. */
+} Rootset;
+
+static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
+static void cleanup_rootset(Rootset *rootset);
+static Uint combined_message_size(Process* p);
+static void remove_message_buffers(Process* p);
+static int major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
+static int minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
+static void do_minor(Process *p, int new_sz, Eterm* objv, int nobj);
+static Eterm* sweep_rootset(Rootset *rootset, Eterm* htop, char* src, Uint src_size);
+static Eterm* sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size);
+static Eterm* sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
+ char* src, Uint src_size);
+static Eterm* collect_heap_frags(Process* p, Eterm* heap,
+ Eterm* htop, Eterm* objv, int nobj);
+static Uint adjust_after_fullsweep(Process *p, int size_before,
+ int need, Eterm *objv, int nobj);
+static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
+static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
+static void sweep_proc_bins(Process *p, int fullsweep);
+static void sweep_proc_funs(Process *p, int fullsweep);
+static void sweep_proc_externals(Process *p, int fullsweep);
+static void offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
+static void offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size);
+static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
+ Eterm* objv, int nobj);
+static void offset_off_heap(Process* p, Sint offs, char* area, Uint area_size);
+static void offset_mqueue(Process *p, Sint offs, char* area, Uint area_size);
+
+#ifdef HARDDEBUG
+static void disallow_heap_frag_ref_in_heap(Process* p);
+static void disallow_heap_frag_ref_in_old_heap(Process* p);
+static void disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj);
+#endif
+
+#ifdef ARCH_64
+# define MAX_HEAP_SIZES 154
+#else
+# define MAX_HEAP_SIZES 55
+#endif
+
+static Sint heap_sizes[MAX_HEAP_SIZES]; /* Suitable heap sizes. */
+static int num_heap_sizes; /* Number of heap sizes. */
+
+Uint erts_test_long_gc_sleep; /* Only used for testing... */
+
+/*
+ * Initialize GC global data.
+ */
+void
+erts_init_gc(void)
+{
+ int i = 0;
+
+ erts_smp_spinlock_init(&info_lck, "gc_info");
+ garbage_cols = 0;
+ reclaimed = 0;
+ erts_test_long_gc_sleep = 0;
+
+ /*
+ * Heap sizes start growing in a Fibonacci sequence.
+ *
+ * Fib growth is not really ok for really large heaps, for
+ * example is fib(35) == 14meg, whereas fib(36) == 24meg;
+ * we really don't want that growth when the heaps are that big.
+ */
+
+ heap_sizes[0] = 34;
+ heap_sizes[1] = 55;
+ for (i = 2; i < 23; i++) {
+ heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-2];
+ }
+
+ /* At 1.3 mega words heap, we start to slow down. */
+ for (i = 23; i < ALENGTH(heap_sizes); i++) {
+ heap_sizes[i] = 5*(heap_sizes[i-1]/4);
+ if (heap_sizes[i] < 0) {
+ /* Size turned negative. Discard this last size. */
+ i--;
+ break;
+ }
+ }
+ num_heap_sizes = i;
+}
+
+/*
+ * Find the next heap size equal to or greater than the given size (if offset == 0).
+ *
+ * If offset is 1, the next higher heap size is returned (always greater than size).
+ */
+Uint
+erts_next_heap_size(Uint size, Uint offset)
+{
+ if (size < heap_sizes[0]) {
+ return heap_sizes[0];
+ } else {
+ Sint* low = heap_sizes;
+ Sint* high = heap_sizes + num_heap_sizes;
+ Sint* mid;
+
+ while (low < high) {
+ mid = low + (high-low) / 2;
+ if (size < mid[0]) {
+ high = mid;
+ } else if (size == mid[0]) {
+ ASSERT(mid+offset-heap_sizes < num_heap_sizes);
+ return mid[offset];
+ } else if (size < mid[1]) {
+ ASSERT(mid[0] < size && size <= mid[1]);
+ ASSERT(mid+offset-heap_sizes < num_heap_sizes);
+ return mid[offset+1];
+ } else {
+ low = mid + 1;
+ }
+ }
+ erl_exit(1, "no next heap size found: %d, offset %d\n", size, offset);
+ }
+ return 0;
+}
+/*
+ * Return the next heap size to use. Make sure we never return
+ * a smaller heap size than the minimum heap size for the process.
+ * (Use of the erlang:hibernate/3 BIF could have shrinked the
+ * heap below the minimum heap size.)
+ */
+static Uint
+next_heap_size(Process* p, Uint size, Uint offset)
+{
+ size = erts_next_heap_size(size, offset);
+ return size < p->min_heap_size ? p->min_heap_size : size;
+}
+
+Eterm
+erts_heap_sizes(Process* p)
+{
+ int i;
+ int n = 0;
+ int big = 0;
+ Eterm res = NIL;
+ Eterm* hp;
+ Eterm* bigp;
+
+ for (i = num_heap_sizes-1; i >= 0; i--) {
+ n += 2;
+ if (!MY_IS_SSMALL(heap_sizes[i])) {
+ big += BIG_UINT_HEAP_SIZE;
+ }
+ }
+
+ /*
+ * We store all big numbers first on the heap, followed
+ * by all the cons cells.
+ */
+ bigp = HAlloc(p, n+big);
+ hp = bigp+big;
+ for (i = num_heap_sizes-1; i >= 0; i--) {
+ Eterm num;
+ Sint sz = heap_sizes[i];
+
+ if (MY_IS_SSMALL(sz)) {
+ num = make_small(sz);
+ } else {
+ num = uint_to_big(sz, bigp);
+ bigp += BIG_UINT_HEAP_SIZE;
+ }
+ res = CONS(hp, num, res);
+ hp += 2;
+ }
+ return res;
+}
+
+void
+erts_gc_info(ErtsGCInfo *gcip)
+{
+ if (gcip) {
+ erts_smp_spin_lock(&info_lck);
+ gcip->garbage_collections = garbage_cols;
+ gcip->reclaimed = reclaimed;
+ erts_smp_spin_unlock(&info_lck);
+ }
+}
+
+void
+erts_offset_heap(Eterm* hp, Uint sz, Sint offs, Eterm* low, Eterm* high)
+{
+ offset_heap(hp, sz, offs, (char*) low, ((char *)high)-((char *)low));
+}
+
+void
+erts_offset_heap_ptr(Eterm* hp, Uint sz, Sint offs,
+ Eterm* low, Eterm* high)
+{
+ offset_heap_ptr(hp, sz, offs, (char *) low, ((char *)high)-((char *)low));
+}
+
+#define ptr_within(ptr, low, high) ((ptr) < (high) && (ptr) >= (low))
+
+void
+erts_offset_off_heap(ErlOffHeap *ohp, Sint offs, Eterm* low, Eterm* high)
+{
+ if (ohp->mso && ptr_within((Eterm *)ohp->mso, low, high)) {
+ Eterm** uptr = (Eterm**) (void *) &ohp->mso;
+ *uptr += offs;
+ }
+
+#ifndef HYBRID /* FIND ME! */
+ if (ohp->funs && ptr_within((Eterm *)ohp->funs, low, high)) {
+ Eterm** uptr = (Eterm**) (void *) &ohp->funs;
+ *uptr += offs;
+ }
+#endif
+
+ if (ohp->externals && ptr_within((Eterm *)ohp->externals, low, high)) {
+ Eterm** uptr = (Eterm**) (void *) &ohp->externals;
+ *uptr += offs;
+ }
+}
+#undef ptr_within
+
+Eterm
+erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
+{
+ int cost;
+
+ if (is_non_value(result)) {
+ if (p->freason == TRAP) {
+ cost = erts_garbage_collect(p, 0, p->def_arg_reg, p->arity);
+ } else {
+ cost = erts_garbage_collect(p, 0, regs, arity);
+ }
+ } else {
+ Eterm val[1];
+
+ val[0] = result;
+ cost = erts_garbage_collect(p, 0, val, 1);
+ result = val[0];
+ }
+ BUMP_REDS(p, cost);
+ return result;
+}
+
+/*
+ * Garbage collect a process.
+ *
+ * p: Pointer to the process structure.
+ * need: Number of Eterm words needed on the heap.
+ * objv: Array of terms to add to rootset; that is to preserve.
+ * nobj: Number of objects in objv.
+ */
+int
+erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
+{
+ Uint reclaimed_now = 0;
+ int done = 0;
+ Uint ms1, s1, us1;
+
+ if (IS_TRACED_FL(p, F_TRACE_GC)) {
+ trace_gc(p, am_gc_start);
+ }
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ p->gcstatus = p->status;
+ p->status = P_GARBING;
+ if (erts_system_monitor_long_gc != 0) {
+ get_now(&ms1, &s1, &us1);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC);
+
+ ERTS_CHK_OFFHEAP(p);
+
+ ErtsGcQuickSanityCheck(p);
+ if (GEN_GCS(p) >= MAX_GEN_GCS(p)) {
+ FLAGS(p) |= F_NEED_FULLSWEEP;
+ }
+
+ /*
+ * Test which type of GC to do.
+ */
+ while (!done) {
+ if ((FLAGS(p) & F_NEED_FULLSWEEP) != 0) {
+ done = major_collection(p, need, objv, nobj, &reclaimed_now);
+ } else {
+ done = minor_collection(p, need, objv, nobj, &reclaimed_now);
+ }
+ }
+
+ /*
+ * Finish.
+ */
+
+ ERTS_CHK_OFFHEAP(p);
+
+ ErtsGcQuickSanityCheck(p);
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ p->status = p->gcstatus;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (IS_TRACED_FL(p, F_TRACE_GC)) {
+ trace_gc(p, am_gc_end);
+ }
+
+ erts_smp_locked_activity_end(ERTS_ACTIVITY_GC);
+
+ if (erts_system_monitor_long_gc != 0) {
+ Uint ms2, s2, us2;
+ Sint t;
+ if (erts_test_long_gc_sleep)
+ while (0 != erts_milli_sleep(erts_test_long_gc_sleep));
+ get_now(&ms2, &s2, &us2);
+ t = ms2 - ms1;
+ t = t*1000000 + s2 - s1;
+ t = t*1000 + ((Sint) (us2 - us1))/1000;
+ if (t > 0 && (Uint)t > erts_system_monitor_long_gc) {
+ monitor_long_gc(p, t);
+ }
+ }
+ if (erts_system_monitor_large_heap != 0) {
+ Uint size = HEAP_SIZE(p);
+ size += OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0;
+ if (size >= erts_system_monitor_large_heap)
+ monitor_large_heap(p);
+ }
+
+ erts_smp_spin_lock(&info_lck);
+ garbage_cols++;
+ reclaimed += reclaimed_now;
+ erts_smp_spin_unlock(&info_lck);
+
+ FLAGS(p) &= ~F_FORCE_GC;
+
+#ifdef CHECK_FOR_HOLES
+ /*
+ * We intentionally do not rescan the areas copied by the GC.
+ * We trust the GC not to leave any holes.
+ */
+ p->last_htop = p->htop;
+ p->last_mbuf = 0;
+#endif
+
+#ifdef DEBUG
+ /*
+ * The scanning for pointers from the old_heap into the new_heap or
+ * heap fragments turned out to be costly, so we remember how far we
+ * have scanned this time and will start scanning there next time.
+ * (We will not detect wild writes into the old heap, or modifications
+ * of the old heap in-between garbage collections.)
+ */
+ p->last_old_htop = p->old_htop;
+#endif
+
+ return ((int) (HEAP_TOP(p) - HEAP_START(p)) / 10);
+}
+
+/*
+ * Place all living data on a the new heap; deallocate any old heap.
+ * Meant to be used by hibernate/3.
+ */
+void
+erts_garbage_collect_hibernate(Process* p)
+{
+ Uint heap_size;
+ Eterm* heap;
+ Eterm* htop;
+ Rootset rootset;
+ int n;
+ char* src;
+ Uint src_size;
+ Uint actual_size;
+ char* area;
+ Uint area_size;
+ Sint offs;
+
+ /*
+ * Preliminaries.
+ */
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ p->gcstatus = p->status;
+ p->status = P_GARBING;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC);
+ ErtsGcQuickSanityCheck(p);
+ ASSERT(p->mbuf_sz == 0);
+ ASSERT(p->mbuf == 0);
+ ASSERT(p->stop == p->hend); /* Stack must be empty. */
+
+ /*
+ * Do it.
+ */
+
+
+ heap_size = p->heap_sz + (p->old_htop - p->old_heap);
+ heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_TMP_HEAP,
+ sizeof(Eterm)*heap_size);
+ htop = heap;
+
+ n = setup_rootset(p, p->arg_reg, p->arity, &rootset);
+
+ src = (char *) p->heap;
+ src_size = (char *) p->htop - src;
+ htop = sweep_rootset(&rootset, htop, src, src_size);
+ htop = sweep_one_area(heap, htop, src, src_size);
+
+ if (p->old_heap) {
+ src = (char *) p->old_heap;
+ src_size = (char *) p->old_htop - src;
+ htop = sweep_rootset(&rootset, htop, src, src_size);
+ htop = sweep_one_area(heap, htop, src, src_size);
+ }
+
+ cleanup_rootset(&rootset);
+
+ if (MSO(p).mso) {
+ sweep_proc_bins(p, 1);
+ }
+ if (MSO(p).funs) {
+ sweep_proc_funs(p, 1);
+ }
+ if (MSO(p).externals) {
+ sweep_proc_externals(p, 1);
+ }
+
+ /*
+ * Update all pointers.
+ */
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
+ (void*)HEAP_START(p),
+ HEAP_SIZE(p) * sizeof(Eterm));
+ if (p->old_heap) {
+ ERTS_HEAP_FREE(ERTS_ALC_T_OLD_HEAP,
+ (void*)p->old_heap,
+ (p->old_hend - p->old_heap) * sizeof(Eterm));
+ p->old_heap = p->old_htop = p->old_hend = 0;
+ }
+
+ p->heap = heap;
+ p->high_water = htop;
+ p->htop = htop;
+ p->hend = p->heap + heap_size;
+ p->stop = p->hend;
+ p->heap_sz = heap_size;
+
+ heap_size = actual_size = p->htop - p->heap;
+ if (heap_size == 0) {
+ heap_size = 1; /* We want a heap... */
+ }
+
+ FLAGS(p) &= ~F_FORCE_GC;
+
+ /*
+ * Move the heap to its final destination.
+ *
+ * IMPORTANT: We have garbage collected to a temporary heap and
+ * then copy the result to a newly allocated heap of exact size.
+ * This is intentional and important! Garbage collecting as usual
+ * and then shrinking the heap by reallocating it caused serious
+ * fragmentation problems when large amounts of processes were
+ * hibernated.
+ */
+
+ ASSERT(p->hend - p->stop == 0); /* Empty stack */
+ ASSERT(actual_size < p->heap_sz);
+
+ heap = ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*heap_size);
+ sys_memcpy((void *) heap, (void *) p->heap, actual_size*sizeof(Eterm));
+ ERTS_HEAP_FREE(ERTS_ALC_T_TMP_HEAP, p->heap, p->heap_sz*sizeof(Eterm));
+
+ p->stop = p->hend = heap + heap_size;
+
+ offs = heap - p->heap;
+ area = (char *) p->heap;
+ area_size = ((char *) p->htop) - area;
+ offset_heap(heap, actual_size, offs, area, area_size);
+ p->high_water = heap + (p->high_water - p->heap);
+ offset_rootset(p, offs, area, area_size, p->arg_reg, p->arity);
+ p->htop = heap + actual_size;
+ p->heap = heap;
+ p->heap_sz = heap_size;
+
+
+#ifdef CHECK_FOR_HOLES
+ p->last_htop = p->htop;
+ p->last_mbuf = 0;
+#endif
+#ifdef DEBUG
+ p->last_old_htop = NULL;
+#endif
+
+ /*
+ * Finishing.
+ */
+
+ ErtsGcQuickSanityCheck(p);
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ p->status = p->gcstatus;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_locked_activity_end(ERTS_ACTIVITY_GC);
+}
+
+
+void
+erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size)
+{
+ Uint byte_lit_size = sizeof(Eterm)*lit_size;
+ Uint old_heap_size;
+ Eterm* temp_lit;
+ Sint offs;
+ Rootset rootset; /* Rootset for GC (stack, dictionary, etc). */
+ Roots* roots;
+ char* area;
+ Uint area_size;
+ Eterm* old_htop;
+ int n;
+
+ /*
+ * Set GC state.
+ */
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ p->gcstatus = p->status;
+ p->status = P_GARBING;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_locked_activity_begin(ERTS_ACTIVITY_GC);
+
+ /*
+ * We assume that the caller has already done a major collection
+ * (which has discarded the old heap), so that we don't have to cope
+ * with pointer to literals on the old heap. We will now allocate
+ * an old heap to contain the literals.
+ */
+
+ ASSERT(p->old_heap == 0); /* Must NOT have an old heap yet. */
+ old_heap_size = erts_next_heap_size(lit_size, 0);
+ p->old_heap = p->old_htop = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_OLD_HEAP,
+ sizeof(Eterm)*old_heap_size);
+ p->old_hend = p->old_heap + old_heap_size;
+
+ /*
+ * We soon want to garbage collect the literals. But since a GC is
+ * destructive (MOVED markers are written), we must copy the literals
+ * to a temporary area and change all references to literals.
+ */
+ temp_lit = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, byte_lit_size);
+ sys_memcpy(temp_lit, literals, byte_lit_size);
+ offs = temp_lit - literals;
+ offset_heap(temp_lit, lit_size, offs, (char *) literals, byte_lit_size);
+ offset_heap(p->heap, p->htop - p->heap, offs, (char *) literals, byte_lit_size);
+ offset_rootset(p, offs, (char *) literals, byte_lit_size, p->arg_reg, p->arity);
+
+ /*
+ * Now the literals are placed in memory that is safe to write into,
+ * so now we GC the literals into the old heap. First we go through the
+ * rootset.
+ */
+
+ area = (char *) temp_lit;
+ area_size = byte_lit_size;
+ n = setup_rootset(p, p->arg_reg, p->arity, &rootset);
+ roots = rootset.roots;
+ old_htop = p->old_htop;
+ while (n--) {
+ Eterm* g_ptr = roots->v;
+ Uint g_sz = roots->sz;
+ Eterm* ptr;
+ Eterm val;
+
+ roots++;
+
+ while (g_sz--) {
+ Eterm gval = *g_ptr;
+
+ switch (primary_tag(gval)) {
+ case TAG_PRIMARY_BOXED:
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ *g_ptr++ = val;
+ } else if (in_area(ptr, area, area_size)) {
+ MOVE_BOXED(ptr,val,old_htop,g_ptr++);
+ } else {
+ g_ptr++;
+ }
+ break;
+ case TAG_PRIMARY_LIST:
+ ptr = list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) { /* Moved */
+ *g_ptr++ = ptr[1];
+ } else if (in_area(ptr, area, area_size)) {
+ MOVE_CONS(ptr,val,old_htop,g_ptr++);
+ } else {
+ g_ptr++;
+ }
+ break;
+ default:
+ g_ptr++;
+ break;
+ }
+ }
+ }
+ ASSERT(p->old_htop <= old_htop && old_htop <= p->old_hend);
+ cleanup_rootset(&rootset);
+
+ /*
+ * Now all references in the rootset to the literals have been updated.
+ * Now we'll have to go through all heaps updating all other references.
+ */
+
+ old_htop = sweep_one_heap(p->heap, p->htop, old_htop, area, area_size);
+ old_htop = sweep_one_area(p->old_heap, old_htop, area, area_size);
+ ASSERT(p->old_htop <= old_htop && old_htop <= p->old_hend);
+ p->old_htop = old_htop;
+
+ /*
+ * We no longer need this temporary area.
+ */
+ erts_free(ERTS_ALC_T_TMP, (void *) temp_lit);
+
+ /*
+ * Restore status.
+ */
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ p->status = p->gcstatus;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_locked_activity_end(ERTS_ACTIVITY_GC);
+}
+
+static int
+minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
+{
+ Uint mature = HIGH_WATER(p) - HEAP_START(p);
+
+ /*
+ * Allocate an old heap if we don't have one and if we'll need one.
+ */
+
+ if (OLD_HEAP(p) == NULL && mature != 0) {
+ Eterm* n_old;
+
+ /* Note: We choose a larger heap size than strictly needed,
+ * which seems to reduce the number of fullsweeps.
+ * This improved Estone by more than 1200 estones on my computer
+ * (Ultra Sparc 10).
+ */
+ size_t new_sz = erts_next_heap_size(HEAP_TOP(p) - HEAP_START(p), 1);
+
+ /* Create new, empty old_heap */
+ n_old = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_OLD_HEAP,
+ sizeof(Eterm)*new_sz);
+
+ OLD_HEND(p) = n_old + new_sz;
+ OLD_HEAP(p) = OLD_HTOP(p) = n_old;
+ }
+
+ /*
+ * Do a minor collection if there is an old heap and if it
+ * is large enough.
+ */
+
+ if (OLD_HEAP(p) && mature <= OLD_HEND(p) - OLD_HTOP(p)) {
+ ErlMessage *msgp;
+ Uint size_after;
+ Uint need_after;
+ Uint stack_size = STACK_SZ_ON_HEAP(p);
+ Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
+ Uint size_before = fragments + (HEAP_TOP(p) - HEAP_START(p));
+ Uint new_sz = next_heap_size(p, HEAP_SIZE(p) + fragments, 0);
+
+ do_minor(p, new_sz, objv, nobj);
+
+ /*
+ * Copy newly received message onto the end of the new heap.
+ */
+ ErtsGcQuickSanityCheck(p);
+ for (msgp = p->msg.first; msgp; msgp = msgp->next) {
+ if (msgp->data.attached) {
+ erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
+ ErtsGcQuickSanityCheck(p);
+ }
+ }
+ ErtsGcQuickSanityCheck(p);
+
+ GEN_GCS(p)++;
+ size_after = HEAP_TOP(p) - HEAP_START(p);
+ need_after = size_after + need + stack_size;
+ *recl += (size_before - size_after);
+
+ /*
+ * Excessively large heaps should be shrunk, but
+ * don't even bother on reasonable small heaps.
+ *
+ * The reason for this is that after tenuring, we often
+ * use a really small portion of new heap, therefore, unless
+ * the heap size is substantial, we don't want to shrink.
+ */
+
+ if ((HEAP_SIZE(p) > 3000) && (4 * need_after < HEAP_SIZE(p)) &&
+ ((HEAP_SIZE(p) > 8000) ||
+ (HEAP_SIZE(p) > (OLD_HEND(p) - OLD_HEAP(p))))) {
+ Uint wanted = 3 * need_after;
+ Uint old_heap_sz = OLD_HEND(p) - OLD_HEAP(p);
+
+ /*
+ * Additional test to make sure we don't make the heap too small
+ * compared to the size of the older generation heap.
+ */
+ if (wanted*9 < old_heap_sz) {
+ Uint new_wanted = old_heap_sz / 8;
+ if (new_wanted > wanted) {
+ wanted = new_wanted;
+ }
+ }
+
+ if (wanted < MIN_HEAP_SIZE(p)) {
+ wanted = MIN_HEAP_SIZE(p);
+ } else {
+ wanted = next_heap_size(p, wanted, 0);
+ }
+ if (wanted < HEAP_SIZE(p)) {
+ shrink_new_heap(p, wanted, objv, nobj);
+ }
+ ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
+ return 1; /* We are done. */
+ }
+
+ if (HEAP_SIZE(p) >= need_after) {
+ /*
+ * The heap size turned out to be just right. We are done.
+ */
+ ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
+ return 1;
+ }
+ }
+
+ /*
+ * Still not enough room after minor collection. Must force a major collection.
+ */
+ FLAGS(p) |= F_NEED_FULLSWEEP;
+ return 0;
+}
+
+/*
+ * HiPE native code stack scanning procedures:
+ * - fullsweep_nstack()
+ * - gensweep_nstack()
+ * - offset_nstack()
+ */
+#if defined(HIPE)
+
+#define GENSWEEP_NSTACK(p,old_htop,n_htop) \
+ do { \
+ Eterm *tmp_old_htop = old_htop; \
+ Eterm *tmp_n_htop = n_htop; \
+ gensweep_nstack((p), &tmp_old_htop, &tmp_n_htop); \
+ old_htop = tmp_old_htop; \
+ n_htop = tmp_n_htop; \
+ } while(0)
+
+/*
+ * offset_nstack() can ignore the descriptor-based traversal the other
+ * nstack procedures use and simply call offset_heap_ptr() instead.
+ * This relies on two facts:
+ * 1. The only live non-Erlang terms on an nstack are return addresses,
+ * and they will be skipped thanks to the low/high range check.
+ * 2. Dead values, even if mistaken for pointers into the low/high area,
+ * can be offset safely since they won't be dereferenced.
+ *
+ * XXX: WARNING: If HiPE starts storing other non-Erlang values on the
+ * nstack, such as floats, then this will have to be changed.
+ */
+#define offset_nstack(p,offs,area,area_size) offset_heap_ptr(hipe_nstack_start((p)),hipe_nstack_used((p)),(offs),(area),(area_size))
+
+#else /* !HIPE */
+
+#define fullsweep_nstack(p,n_htop) (n_htop)
+#define GENSWEEP_NSTACK(p,old_htop,n_htop) do{}while(0)
+#define offset_nstack(p,offs,area,area_size) do{}while(0)
+
+#endif /* HIPE */
+
+static void
+do_minor(Process *p, int new_sz, Eterm* objv, int nobj)
+{
+ Rootset rootset; /* Rootset for GC (stack, dictionary, etc). */
+ Roots* roots;
+ Eterm* n_htop;
+ int n;
+ Eterm* ptr;
+ Eterm val;
+ Eterm gval;
+ char* heap = (char *) HEAP_START(p);
+ Uint heap_size = (char *) HEAP_TOP(p) - heap;
+ Uint mature_size = (char *) HIGH_WATER(p) - heap;
+ Eterm* old_htop = OLD_HTOP(p);
+ Eterm* n_heap;
+
+ n_htop = n_heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
+ sizeof(Eterm)*new_sz);
+
+ if (MBUF(p) != NULL) {
+ n_htop = collect_heap_frags(p, n_heap, n_htop, objv, nobj);
+ }
+
+ n = setup_rootset(p, objv, nobj, &rootset);
+ roots = rootset.roots;
+
+ GENSWEEP_NSTACK(p, old_htop, n_htop);
+ while (n--) {
+ Eterm* g_ptr = roots->v;
+ Uint g_sz = roots->sz;
+
+ roots++;
+ while (g_sz--) {
+ gval = *g_ptr;
+
+ switch (primary_tag(gval)) {
+
+ case TAG_PRIMARY_BOXED: {
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ *g_ptr++ = val;
+ } else if (in_area(ptr, heap, mature_size)) {
+ MOVE_BOXED(ptr,val,old_htop,g_ptr++);
+ } else if (in_area(ptr, heap, heap_size)) {
+ MOVE_BOXED(ptr,val,n_htop,g_ptr++);
+ } else {
+ g_ptr++;
+ }
+ break;
+ }
+
+ case TAG_PRIMARY_LIST: {
+ ptr = list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) { /* Moved */
+ *g_ptr++ = ptr[1];
+ } else if (in_area(ptr, heap, mature_size)) {
+ MOVE_CONS(ptr,val,old_htop,g_ptr++);
+ } else if (in_area(ptr, heap, heap_size)) {
+ MOVE_CONS(ptr,val,n_htop,g_ptr++);
+ } else {
+ g_ptr++;
+ }
+ break;
+ }
+
+ default:
+ g_ptr++;
+ break;
+ }
+ }
+ }
+
+ cleanup_rootset(&rootset);
+
+ /*
+ * Now all references in the rootset point to the new heap. However,
+ * most references on the new heap point to the old heap so the next stage
+ * is to scan through the new heap evacuating data from the old heap
+ * until all is changed.
+ */
+
+ if (mature_size == 0) {
+ n_htop = sweep_one_area(n_heap, n_htop, heap, heap_size);
+ } else {
+ Eterm* n_hp = n_heap;
+
+ while (n_hp != n_htop) {
+ Eterm* ptr;
+ Eterm val;
+ Eterm gval = *n_hp;
+
+ switch (primary_tag(gval)) {
+ case TAG_PRIMARY_BOXED: {
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ *n_hp++ = val;
+ } else if (in_area(ptr, heap, mature_size)) {
+ MOVE_BOXED(ptr,val,old_htop,n_hp++);
+ } else if (in_area(ptr, heap, heap_size)) {
+ MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ } else {
+ n_hp++;
+ }
+ break;
+ }
+ case TAG_PRIMARY_LIST: {
+ ptr = list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) {
+ *n_hp++ = ptr[1];
+ } else if (in_area(ptr, heap, mature_size)) {
+ MOVE_CONS(ptr,val,old_htop,n_hp++);
+ } else if (in_area(ptr, heap, heap_size)) {
+ MOVE_CONS(ptr,val,n_htop,n_hp++);
+ } else {
+ n_hp++;
+ }
+ break;
+ }
+ case TAG_PRIMARY_HEADER: {
+ if (!header_is_thing(gval))
+ n_hp++;
+ else {
+ if (header_is_bin_matchstate(gval)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) n_hp;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ Eterm* origptr = &(mb->orig);
+ ptr = boxed_val(*origptr);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ *origptr = val;
+ mb->base = binary_bytes(val);
+ } else if (in_area(ptr, heap, mature_size)) {
+ MOVE_BOXED(ptr,val,old_htop,origptr);
+ mb->base = binary_bytes(mb->orig);
+ } else if (in_area(ptr, heap, heap_size)) {
+ MOVE_BOXED(ptr,val,n_htop,origptr);
+ mb->base = binary_bytes(mb->orig);
+ }
+ }
+ n_hp += (thing_arityval(gval)+1);
+ }
+ break;
+ }
+ default:
+ n_hp++;
+ break;
+ }
+ }
+ }
+
+ /*
+ * And also if we have been tenuring, references on the second generation
+ * may point to the old (soon to be deleted) new_heap.
+ */
+
+ if (OLD_HTOP(p) < old_htop) {
+ old_htop = sweep_one_area(OLD_HTOP(p), old_htop, heap, heap_size);
+ }
+ OLD_HTOP(p) = old_htop;
+ HIGH_WATER(p) = (HEAP_START(p) != HIGH_WATER(p)) ? n_heap : n_htop;
+
+ if (MSO(p).mso) {
+ sweep_proc_bins(p, 0);
+ }
+
+ if (MSO(p).funs) {
+ sweep_proc_funs(p, 0);
+ }
+ if (MSO(p).externals) {
+ sweep_proc_externals(p, 0);
+ }
+
+#ifdef HARDDEBUG
+ /*
+ * Go through the old_heap before, and try to find references from the old_heap
+ * into the old new_heap that has just been evacuated and is about to be freed
+ * (as well as looking for reference into heap fragments, of course).
+ */
+ disallow_heap_frag_ref_in_old_heap(p);
+#endif
+
+ /* Copy stack to end of new heap */
+ n = p->hend - p->stop;
+ sys_memcpy(n_heap + new_sz - n, p->stop, n * sizeof(Eterm));
+ p->stop = n_heap + new_sz - n;
+
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
+ (void*)HEAP_START(p),
+ HEAP_SIZE(p) * sizeof(Eterm));
+ HEAP_START(p) = n_heap;
+ HEAP_TOP(p) = n_htop;
+ HEAP_SIZE(p) = new_sz;
+ HEAP_END(p) = n_heap + new_sz;
+
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p);
+#endif
+ remove_message_buffers(p);
+}
+
+/*
+ * Major collection. DISCARD the old heap.
+ */
+
+static int
+major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
+{
+ Rootset rootset;
+ Roots* roots;
+ int size_before;
+ Eterm* n_heap;
+ Eterm* n_htop;
+ char* src = (char *) HEAP_START(p);
+ Uint src_size = (char *) HEAP_TOP(p) - src;
+ char* oh = (char *) OLD_HEAP(p);
+ Uint oh_size = (char *) OLD_HTOP(p) - oh;
+ int n;
+ Uint new_sz;
+ Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
+ ErlMessage *msgp;
+
+ size_before = fragments + (HEAP_TOP(p) - HEAP_START(p));
+
+ /*
+ * Do a fullsweep GC. First figure out the size of the heap
+ * to receive all live data.
+ */
+
+ new_sz = HEAP_SIZE(p) + fragments + (OLD_HTOP(p) - OLD_HEAP(p));
+ /*
+ * We used to do
+ *
+ * new_sz += STACK_SZ_ON_HEAP(p);
+ *
+ * here for no obvious reason. (The stack size is already counted once
+ * in HEAP_SIZE(p).)
+ */
+ new_sz = next_heap_size(p, new_sz, 0);
+
+ /*
+ * Should we grow although we don't actually need to?
+ */
+
+ if (new_sz == HEAP_SIZE(p) && FLAGS(p) & F_HEAP_GROW) {
+ new_sz = next_heap_size(p, HEAP_SIZE(p), 1);
+ }
+ FLAGS(p) &= ~(F_HEAP_GROW|F_NEED_FULLSWEEP);
+ n_htop = n_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
+ sizeof(Eterm)*new_sz);
+
+ /*
+ * Get rid of heap fragments.
+ */
+
+ if (MBUF(p) != NULL) {
+ n_htop = collect_heap_frags(p, n_heap, n_htop, objv, nobj);
+ }
+
+ /*
+ * Copy all top-level terms directly referenced by the rootset to
+ * the new new_heap.
+ */
+
+ n = setup_rootset(p, objv, nobj, &rootset);
+ n_htop = fullsweep_nstack(p, n_htop);
+ roots = rootset.roots;
+ while (n--) {
+ Eterm* g_ptr = roots->v;
+ Eterm g_sz = roots->sz;
+
+ roots++;
+ while (g_sz--) {
+ Eterm* ptr;
+ Eterm val;
+ Eterm gval = *g_ptr;
+
+ switch (primary_tag(gval)) {
+
+ case TAG_PRIMARY_BOXED: {
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ *g_ptr++ = val;
+ } else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
+ MOVE_BOXED(ptr,val,n_htop,g_ptr++);
+ } else {
+ g_ptr++;
+ }
+ continue;
+ }
+
+ case TAG_PRIMARY_LIST: {
+ ptr = list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) {
+ *g_ptr++ = ptr[1];
+ } else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
+ MOVE_CONS(ptr,val,n_htop,g_ptr++);
+ } else {
+ g_ptr++;
+ }
+ continue;
+ }
+
+ default: {
+ g_ptr++;
+ continue;
+ }
+ }
+ }
+ }
+
+ cleanup_rootset(&rootset);
+
+ /*
+ * Now all references on the stack point to the new heap. However,
+ * most references on the new heap point to the old heap so the next stage
+ * is to scan through the new heap evacuating data from the old heap
+ * until all is copied.
+ */
+
+ if (oh_size == 0) {
+ n_htop = sweep_one_area(n_heap, n_htop, src, src_size);
+ } else {
+ Eterm* n_hp = n_heap;
+
+ while (n_hp != n_htop) {
+ Eterm* ptr;
+ Eterm val;
+ Eterm gval = *n_hp;
+
+ switch (primary_tag(gval)) {
+ case TAG_PRIMARY_BOXED: {
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ *n_hp++ = val;
+ } else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
+ MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ } else {
+ n_hp++;
+ }
+ break;
+ }
+ case TAG_PRIMARY_LIST: {
+ ptr = list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) {
+ *n_hp++ = ptr[1];
+ } else if (in_area(ptr, src, src_size) || in_area(ptr, oh, oh_size)) {
+ MOVE_CONS(ptr,val,n_htop,n_hp++);
+ } else {
+ n_hp++;
+ }
+ break;
+ }
+ case TAG_PRIMARY_HEADER: {
+ if (!header_is_thing(gval))
+ n_hp++;
+ else {
+ if (header_is_bin_matchstate(gval)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) n_hp;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ Eterm* origptr;
+ origptr = &(mb->orig);
+ ptr = boxed_val(*origptr);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ *origptr = val;
+ mb->base = binary_bytes(*origptr);
+ } else if (in_area(ptr, src, src_size) ||
+ in_area(ptr, oh, oh_size)) {
+ MOVE_BOXED(ptr,val,n_htop,origptr);
+ mb->base = binary_bytes(*origptr);
+ ptr = boxed_val(*origptr);
+ val = *ptr;
+ }
+ }
+ n_hp += (thing_arityval(gval)+1);
+ }
+ break;
+ }
+ default:
+ n_hp++;
+ break;
+ }
+ }
+ }
+
+ if (MSO(p).mso) {
+ sweep_proc_bins(p, 1);
+ }
+ if (MSO(p).funs) {
+ sweep_proc_funs(p, 1);
+ }
+ if (MSO(p).externals) {
+ sweep_proc_externals(p, 1);
+ }
+
+ if (OLD_HEAP(p) != NULL) {
+ ERTS_HEAP_FREE(ERTS_ALC_T_OLD_HEAP,
+ OLD_HEAP(p),
+ (OLD_HEND(p) - OLD_HEAP(p)) * sizeof(Eterm));
+ OLD_HEAP(p) = OLD_HTOP(p) = OLD_HEND(p) = NULL;
+ }
+
+ /* Move the stack to the end of the heap */
+ n = HEAP_END(p) - p->stop;
+ sys_memcpy(n_heap + new_sz - n, p->stop, n * sizeof(Eterm));
+ p->stop = n_heap + new_sz - n;
+
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
+ (void *) HEAP_START(p),
+ (HEAP_END(p) - HEAP_START(p)) * sizeof(Eterm));
+ HEAP_START(p) = n_heap;
+ HEAP_TOP(p) = n_htop;
+ HEAP_SIZE(p) = new_sz;
+ HEAP_END(p) = n_heap + new_sz;
+ GEN_GCS(p) = 0;
+
+ HIGH_WATER(p) = HEAP_TOP(p);
+
+ ErtsGcQuickSanityCheck(p);
+ /*
+ * Copy newly received message onto the end of the new heap.
+ */
+ for (msgp = p->msg.first; msgp; msgp = msgp->next) {
+ if (msgp->data.attached) {
+ erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
+ ErtsGcQuickSanityCheck(p);
+ }
+ }
+
+ *recl += adjust_after_fullsweep(p, size_before, need, objv, nobj);
+
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p);
+#endif
+ remove_message_buffers(p);
+
+ ErtsGcQuickSanityCheck(p);
+ return 1; /* We are done. */
+}
+
+static Uint
+adjust_after_fullsweep(Process *p, int size_before, int need, Eterm *objv, int nobj)
+{
+ int wanted, sz, size_after, need_after;
+ int stack_size = STACK_SZ_ON_HEAP(p);
+ Uint reclaimed_now;
+
+ size_after = (HEAP_TOP(p) - HEAP_START(p));
+ reclaimed_now = (size_before - size_after);
+
+ /*
+ * Resize the heap if needed.
+ */
+
+ need_after = size_after + need + stack_size;
+ if (HEAP_SIZE(p) < need_after) {
+ /* Too small - grow to match requested need */
+ sz = next_heap_size(p, need_after, 0);
+ grow_new_heap(p, sz, objv, nobj);
+ } else if (3 * HEAP_SIZE(p) < 4 * need_after){
+ /* Need more than 75% of current, postpone to next GC.*/
+ FLAGS(p) |= F_HEAP_GROW;
+ } else if (4 * need_after < HEAP_SIZE(p) && HEAP_SIZE(p) > H_MIN_SIZE){
+ /* We need less than 25% of the current heap, shrink.*/
+ /* XXX - This is how it was done in the old GC:
+ wanted = 4 * need_after;
+ I think this is better as fullsweep is used mainly on
+ small memory systems, but I could be wrong... */
+ wanted = 2 * need_after;
+ if (wanted < p->min_heap_size) {
+ sz = p->min_heap_size;
+ } else {
+ sz = next_heap_size(p, wanted, 0);
+ }
+ if (sz < HEAP_SIZE(p)) {
+ shrink_new_heap(p, sz, objv, nobj);
+ }
+ }
+
+ return reclaimed_now;
+}
+
+/*
+ * Return the size of all message buffers that are NOT linked in the
+ * mbuf list.
+ */
+static Uint
+combined_message_size(Process* p)
+{
+ Uint sz = 0;
+ ErlMessage *msgp;
+
+ for (msgp = p->msg.first; msgp; msgp = msgp->next) {
+ if (msgp->data.attached) {
+ sz += erts_msg_attached_data_size(msgp);
+ }
+ }
+ return sz;
+}
+
+/*
+ * Remove all message buffers.
+ */
+static void
+remove_message_buffers(Process* p)
+{
+ ErlHeapFragment* bp = MBUF(p);
+
+ MBUF(p) = NULL;
+ MBUF_SIZE(p) = 0;
+ while (bp != NULL) {
+ ErlHeapFragment* next_bp = bp->next;
+ free_message_buffer(bp);
+ bp = next_bp;
+ }
+}
+
+/*
+ * Go through one root set array, move everything that it is one of the
+ * heap fragments to our new heap.
+ */
+static Eterm*
+collect_root_array(Process* p, Eterm* n_htop, Eterm* objv, int nobj)
+{
+ ErlHeapFragment* qb;
+ Eterm gval;
+ Eterm* ptr;
+ Eterm val;
+
+ ASSERT(p->htop != NULL);
+ while (nobj--) {
+ gval = *objv;
+
+ switch (primary_tag(gval)) {
+
+ case TAG_PRIMARY_BOXED: {
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ *objv++ = val;
+ } else {
+ for (qb = MBUF(p); qb != NULL; qb = qb->next) {
+ if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ MOVE_BOXED(ptr,val,n_htop,objv);
+ break;
+ }
+ }
+ objv++;
+ }
+ break;
+ }
+
+ case TAG_PRIMARY_LIST: {
+ ptr = list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) {
+ *objv++ = ptr[1];
+ } else {
+ for (qb = MBUF(p); qb != NULL; qb = qb->next) {
+ if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ MOVE_CONS(ptr,val,n_htop,objv);
+ break;
+ }
+ }
+ objv++;
+ }
+ break;
+ }
+
+ default: {
+ objv++;
+ break;
+ }
+ }
+ }
+ return n_htop;
+}
+
+#ifdef HARDDEBUG
+
+/*
+ * Routines to verify that we don't have pointer into heap fragments from
+ * that are not allowed to have them.
+ *
+ * For performance reasons, we use _unchecked_list_val(), _unchecked_boxed_val(),
+ * and so on to avoid a function call.
+ */
+
+static void
+disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int nobj)
+{
+ ErlHeapFragment* mbuf;
+ ErlHeapFragment* qb;
+ Eterm gval;
+ Eterm* ptr;
+ Eterm val;
+
+ ASSERT(p->htop != NULL);
+ mbuf = MBUF(p);
+
+ while (nobj--) {
+ gval = *objv;
+
+ switch (primary_tag(gval)) {
+
+ case TAG_PRIMARY_BOXED: {
+ ptr = _unchecked_boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ objv++;
+ } else {
+ for (qb = mbuf; qb != NULL; qb = qb->next) {
+ if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ abort();
+ }
+ }
+ objv++;
+ }
+ break;
+ }
+
+ case TAG_PRIMARY_LIST: {
+ ptr = _unchecked_list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) {
+ objv++;
+ } else {
+ for (qb = mbuf; qb != NULL; qb = qb->next) {
+ if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ abort();
+ }
+ }
+ objv++;
+ }
+ break;
+ }
+
+ default: {
+ objv++;
+ break;
+ }
+ }
+ }
+}
+
+static void
+disallow_heap_frag_ref_in_heap(Process* p)
+{
+ Eterm* hp;
+ Eterm* htop;
+ Eterm* heap;
+ Uint heap_size;
+
+ if (p->mbuf == 0) {
+ return;
+ }
+
+ htop = p->htop;
+ heap = p->heap;
+ heap_size = (htop - heap)*sizeof(Eterm);
+
+ hp = heap;
+ while (hp < htop) {
+ ErlHeapFragment* qb;
+ Eterm* ptr;
+ Eterm val;
+
+ val = *hp++;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ ptr = _unchecked_boxed_val(val);
+ if (!in_area(ptr, heap, heap_size)) {
+ for (qb = MBUF(p); qb != NULL; qb = qb->next) {
+ if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ abort();
+ }
+ }
+ }
+ break;
+ case TAG_PRIMARY_LIST:
+ ptr = _unchecked_list_val(val);
+ if (!in_area(ptr, heap, heap_size)) {
+ for (qb = MBUF(p); qb != NULL; qb = qb->next) {
+ if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ abort();
+ }
+ }
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (header_is_thing(val)) {
+ hp += _unchecked_thing_arityval(val);
+ }
+ break;
+ }
+ }
+}
+
+static void
+disallow_heap_frag_ref_in_old_heap(Process* p)
+{
+ Eterm* hp;
+ Eterm* htop;
+ Eterm* old_heap;
+ Uint old_heap_size;
+ Eterm* new_heap;
+ Uint new_heap_size;
+
+ htop = p->old_htop;
+ old_heap = p->old_heap;
+ old_heap_size = (htop - old_heap)*sizeof(Eterm);
+ new_heap = p->heap;
+ new_heap_size = (p->htop - new_heap)*sizeof(Eterm);
+
+ ASSERT(!p->last_old_htop
+ || (old_heap <= p->last_old_htop && p->last_old_htop <= htop));
+ hp = p->last_old_htop ? p->last_old_htop : old_heap;
+ while (hp < htop) {
+ ErlHeapFragment* qb;
+ Eterm* ptr;
+ Eterm val;
+
+ val = *hp++;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ ptr = (Eterm *) val;
+ if (!in_area(ptr, old_heap, old_heap_size)) {
+ if (in_area(ptr, new_heap, new_heap_size)) {
+ abort();
+ }
+ for (qb = MBUF(p); qb != NULL; qb = qb->next) {
+ if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ abort();
+ }
+ }
+ }
+ break;
+ case TAG_PRIMARY_LIST:
+ ptr = (Eterm *) val;
+ if (!in_area(ptr, old_heap, old_heap_size)) {
+ if (in_area(ptr, new_heap, new_heap_size)) {
+ abort();
+ }
+ for (qb = MBUF(p); qb != NULL; qb = qb->next) {
+ if (in_area(ptr, qb->mem, qb->size*sizeof(Eterm))) {
+ abort();
+ }
+ }
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (header_is_thing(val)) {
+ hp += _unchecked_thing_arityval(val);
+ if (!in_area(hp, old_heap, old_heap_size+1)) {
+ abort();
+ }
+ }
+ break;
+ }
+ }
+}
+#endif
+
+static Eterm*
+sweep_rootset(Rootset* rootset, Eterm* htop, char* src, Uint src_size)
+{
+ Roots* roots = rootset->roots;
+ Uint n = rootset->num_roots;
+ Eterm* ptr;
+ Eterm gval;
+ Eterm val;
+
+ while (n--) {
+ Eterm* g_ptr = roots->v;
+ Uint g_sz = roots->sz;
+
+ roots++;
+ while (g_sz--) {
+ gval = *g_ptr;
+
+ switch (primary_tag(gval)) {
+ case TAG_PRIMARY_BOXED: {
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ *g_ptr++ = val;
+ } else if (in_area(ptr, src, src_size)) {
+ MOVE_BOXED(ptr,val,htop,g_ptr++);
+ } else {
+ g_ptr++;
+ }
+ break;
+ }
+ case TAG_PRIMARY_LIST: {
+ ptr = list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) { /* Moved */
+ *g_ptr++ = ptr[1];
+ } else if (in_area(ptr, src, src_size)) {
+ MOVE_CONS(ptr,val,htop,g_ptr++);
+ } else {
+ g_ptr++;
+ }
+ break;
+ }
+
+ default:
+ g_ptr++;
+ break;
+ }
+ }
+ }
+ return htop;
+}
+
+
+static Eterm*
+sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size)
+{
+ while (n_hp != n_htop) {
+ Eterm* ptr;
+ Eterm val;
+ Eterm gval = *n_hp;
+
+ switch (primary_tag(gval)) {
+ case TAG_PRIMARY_BOXED: {
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ *n_hp++ = val;
+ } else if (in_area(ptr, src, src_size)) {
+ MOVE_BOXED(ptr,val,n_htop,n_hp++);
+ } else {
+ n_hp++;
+ }
+ break;
+ }
+ case TAG_PRIMARY_LIST: {
+ ptr = list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) {
+ *n_hp++ = ptr[1];
+ } else if (in_area(ptr, src, src_size)) {
+ MOVE_CONS(ptr,val,n_htop,n_hp++);
+ } else {
+ n_hp++;
+ }
+ break;
+ }
+ case TAG_PRIMARY_HEADER: {
+ if (!header_is_thing(gval)) {
+ n_hp++;
+ } else {
+ if (header_is_bin_matchstate(gval)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) n_hp;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ Eterm* origptr;
+ origptr = &(mb->orig);
+ ptr = boxed_val(*origptr);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ *origptr = val;
+ mb->base = binary_bytes(*origptr);
+ } else if (in_area(ptr, src, src_size)) {
+ MOVE_BOXED(ptr,val,n_htop,origptr);
+ mb->base = binary_bytes(*origptr);
+ }
+ }
+ n_hp += (thing_arityval(gval)+1);
+ }
+ break;
+ }
+ default:
+ n_hp++;
+ break;
+ }
+ }
+ return n_htop;
+}
+
+static Eterm*
+sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint src_size)
+{
+ while (heap_ptr < heap_end) {
+ Eterm* ptr;
+ Eterm val;
+ Eterm gval = *heap_ptr;
+
+ switch (primary_tag(gval)) {
+ case TAG_PRIMARY_BOXED: {
+ ptr = boxed_val(gval);
+ val = *ptr;
+ if (IS_MOVED(val)) {
+ ASSERT(is_boxed(val));
+ *heap_ptr++ = val;
+ } else if (in_area(ptr, src, src_size)) {
+ MOVE_BOXED(ptr,val,htop,heap_ptr++);
+ } else {
+ heap_ptr++;
+ }
+ break;
+ }
+ case TAG_PRIMARY_LIST: {
+ ptr = list_val(gval);
+ val = *ptr;
+ if (is_non_value(val)) {
+ *heap_ptr++ = ptr[1];
+ } else if (in_area(ptr, src, src_size)) {
+ MOVE_CONS(ptr,val,htop,heap_ptr++);
+ } else {
+ heap_ptr++;
+ }
+ break;
+ }
+ case TAG_PRIMARY_HEADER: {
+ if (!header_is_thing(gval)) {
+ heap_ptr++;
+ } else {
+ heap_ptr += (thing_arityval(gval)+1);
+ }
+ break;
+ }
+ default:
+ heap_ptr++;
+ break;
+ }
+ }
+ return htop;
+}
+
+/*
+ * Collect heap fragments and check that they point in the correct direction.
+ */
+
+static Eterm*
+collect_heap_frags(Process* p, Eterm* n_hstart, Eterm* n_htop,
+ Eterm* objv, int nobj)
+{
+ ErlHeapFragment* qb;
+ char* frag_begin;
+ Uint frag_size;
+ ErlMessage* mp;
+
+ /*
+ * We don't allow references to a heap fragments from the stack, heap,
+ * or process dictionary.
+ */
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref(p, n_htop, p->stop, STACK_START(p) - p->stop);
+ if (p->dictionary != NULL) {
+ disallow_heap_frag_ref(p, n_htop, p->dictionary->data, p->dictionary->used);
+ }
+ disallow_heap_frag_ref_in_heap(p);
+#endif
+
+ /*
+ * Go through the subset of the root set that is allowed to
+ * reference data in heap fragments and move data from heap fragments
+ * to our new heap.
+ */
+
+ if (nobj != 0) {
+ n_htop = collect_root_array(p, n_htop, objv, nobj);
+ }
+ if (is_not_immed(p->fvalue)) {
+ n_htop = collect_root_array(p, n_htop, &p->fvalue, 1);
+ }
+ if (is_not_immed(p->ftrace)) {
+ n_htop = collect_root_array(p, n_htop, &p->ftrace, 1);
+ }
+ if (is_not_immed(p->seq_trace_token)) {
+ n_htop = collect_root_array(p, n_htop, &p->seq_trace_token, 1);
+ }
+ if (is_not_immed(p->group_leader)) {
+ n_htop = collect_root_array(p, n_htop, &p->group_leader, 1);
+ }
+
+ /*
+ * Go through the message queue, move everything that is in one of the
+ * heap fragments to our new heap.
+ */
+
+ for (mp = p->msg.first; mp != NULL; mp = mp->next) {
+ /*
+ * In most cases, mp->data.attached points to a heap fragment which is
+ * self-contained and we will copy it to the heap at the
+ * end of the GC to avoid scanning it.
+ *
+ * In a few cases, however, such as in process_info(Pid, messages)
+ * and trace_delivered/1, a new message points to a term that has
+ * been allocated by HAlloc() and mp->data.attached is NULL. Therefore
+ * we need this loop.
+ */
+ if (mp->data.attached == NULL) {
+ n_htop = collect_root_array(p, n_htop, mp->m, 2);
+ }
+ }
+
+ /*
+ * Now all references in the root set point to the new heap. However,
+ * many references on the new heap point to heap fragments.
+ */
+
+ qb = MBUF(p);
+ while (qb != NULL) {
+ frag_begin = (char *) qb->mem;
+ frag_size = qb->size * sizeof(Eterm);
+ if (frag_size != 0) {
+ n_htop = sweep_one_area(n_hstart, n_htop, frag_begin, frag_size);
+ }
+ qb = qb->next;
+ }
+ return n_htop;
+}
+
+static Uint
+setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
+{
+ Uint avail;
+ Roots* roots;
+ ErlMessage* mp;
+ Uint n;
+
+ n = 0;
+ roots = rootset->roots = rootset->def;
+ rootset->size = ALENGTH(rootset->def);
+
+ roots[n].v = p->stop;
+ roots[n].sz = STACK_START(p) - p->stop;
+ ++n;
+
+ if (p->dictionary != NULL) {
+ roots[n].v = p->dictionary->data;
+ roots[n].sz = p->dictionary->used;
+ ++n;
+ }
+ if (nobj > 0) {
+ roots[n].v = objv;
+ roots[n].sz = nobj;
+ ++n;
+ }
+
+ ASSERT((is_nil(p->seq_trace_token) ||
+ is_tuple(p->seq_trace_token) ||
+ is_atom(p->seq_trace_token)));
+ if (is_not_immed(p->seq_trace_token)) {
+ roots[n].v = &p->seq_trace_token;
+ roots[n].sz = 1;
+ n++;
+ }
+
+ ASSERT(is_nil(p->tracer_proc) ||
+ is_internal_pid(p->tracer_proc) ||
+ is_internal_port(p->tracer_proc));
+
+ ASSERT(is_pid(p->group_leader));
+ if (is_not_immed(p->group_leader)) {
+ roots[n].v = &p->group_leader;
+ roots[n].sz = 1;
+ n++;
+ }
+
+ /*
+ * The process may be garbage-collected while it is terminating.
+ * (fvalue contains the EXIT reason and ftrace the saved stack trace.)
+ */
+ if (is_not_immed(p->fvalue)) {
+ roots[n].v = &p->fvalue;
+ roots[n].sz = 1;
+ n++;
+ }
+ if (is_not_immed(p->ftrace)) {
+ roots[n].v = &p->ftrace;
+ roots[n].sz = 1;
+ n++;
+ }
+ ASSERT(n <= rootset->size);
+
+ mp = p->msg.first;
+ avail = rootset->size - n;
+ while (mp != NULL) {
+ if (avail == 0) {
+ Uint new_size = 2*rootset->size;
+ if (roots == rootset->def) {
+ roots = erts_alloc(ERTS_ALC_T_ROOTSET,
+ new_size*sizeof(Roots));
+ sys_memcpy(roots, rootset->def, sizeof(rootset->def));
+ } else {
+ roots = erts_realloc(ERTS_ALC_T_ROOTSET,
+ (void *) roots,
+ new_size*sizeof(Roots));
+ }
+ rootset->size = new_size;
+ avail = new_size - n;
+ }
+ if (mp->data.attached == NULL) {
+ roots[n].v = mp->m;
+ roots[n].sz = 2;
+ n++;
+ avail--;
+ }
+ mp = mp->next;
+ }
+ rootset->roots = roots;
+ rootset->num_roots = n;
+ return n;
+}
+
+static
+void cleanup_rootset(Rootset* rootset)
+{
+ if (rootset->roots != rootset->def) {
+ erts_free(ERTS_ALC_T_ROOTSET, rootset->roots);
+ }
+}
+
+static void
+grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj)
+{
+ Eterm* new_heap;
+ int heap_size = HEAP_TOP(p) - HEAP_START(p);
+ int stack_size = p->hend - p->stop;
+ Sint offs;
+
+ ASSERT(HEAP_SIZE(p) < new_sz);
+ new_heap = (Eterm *) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP,
+ (void *) HEAP_START(p),
+ sizeof(Eterm)*(HEAP_SIZE(p)),
+ sizeof(Eterm)*new_sz);
+
+ if ((offs = new_heap - HEAP_START(p)) == 0) { /* No move. */
+ HEAP_END(p) = new_heap + new_sz;
+ sys_memmove(p->hend - stack_size, p->stop, stack_size * sizeof(Eterm));
+ p->stop = p->hend - stack_size;
+ } else {
+ char* area = (char *) HEAP_START(p);
+ Uint area_size = (char *) HEAP_TOP(p) - area;
+ Eterm* prev_stop = p->stop;
+
+ offset_heap(new_heap, heap_size, offs, area, area_size);
+
+ HIGH_WATER(p) = new_heap + (HIGH_WATER(p) - HEAP_START(p));
+
+ HEAP_END(p) = new_heap + new_sz;
+ prev_stop = new_heap + (p->stop - p->heap);
+ p->stop = p->hend - stack_size;
+ sys_memmove(p->stop, prev_stop, stack_size * sizeof(Eterm));
+
+ offset_rootset(p, offs, area, area_size, objv, nobj);
+ HEAP_TOP(p) = new_heap + heap_size;
+ HEAP_START(p) = new_heap;
+ }
+ HEAP_SIZE(p) = new_sz;
+}
+
+static void
+shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj)
+{
+ Eterm* new_heap;
+ int heap_size = HEAP_TOP(p) - HEAP_START(p);
+ Sint offs;
+
+ int stack_size = p->hend - p->stop;
+
+ ASSERT(new_sz < p->heap_sz);
+ sys_memmove(p->heap + new_sz - stack_size, p->stop, stack_size *
+ sizeof(Eterm));
+ new_heap = (Eterm *) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP,
+ (void*)p->heap,
+ sizeof(Eterm)*(HEAP_SIZE(p)),
+ sizeof(Eterm)*new_sz);
+ p->hend = new_heap + new_sz;
+ p->stop = p->hend - stack_size;
+
+ if ((offs = new_heap - HEAP_START(p)) != 0) {
+ char* area = (char *) HEAP_START(p);
+ Uint area_size = (char *) HEAP_TOP(p) - area;
+
+ /*
+ * Normally, we don't expect a shrunk heap to move, but you never
+ * know on some strange embedded systems... Or when using purify.
+ */
+
+ offset_heap(new_heap, heap_size, offs, area, area_size);
+
+ HIGH_WATER(p) = new_heap + (HIGH_WATER(p) - HEAP_START(p));
+ offset_rootset(p, offs, area, area_size, objv, nobj);
+ HEAP_TOP(p) = new_heap + heap_size;
+ HEAP_START(p) = new_heap;
+ }
+ HEAP_SIZE(p) = new_sz;
+}
+
+static Uint
+next_vheap_size(Uint vheap, Uint vheap_sz) {
+ if (vheap < H_MIN_SIZE) {
+ return H_MIN_SIZE;
+ }
+
+ /* grow */
+ if (vheap > vheap_sz) {
+ return erts_next_heap_size(2*vheap, 0);
+ }
+ /* shrink */
+ if ( vheap < vheap_sz/2) {
+ return (Uint)vheap_sz*3/4;
+ }
+
+ return vheap_sz;
+}
+
+
+static void
+sweep_proc_externals(Process *p, int fullsweep)
+{
+ ExternalThing** prev;
+ ExternalThing* ptr;
+ char* oh = 0;
+ Uint oh_size = 0;
+
+ if (fullsweep == 0) {
+ oh = (char *) OLD_HEAP(p);
+ oh_size = (char *) OLD_HEND(p) - oh;
+ }
+
+ prev = &MSO(p).externals;
+ ptr = MSO(p).externals;
+
+ while (ptr) {
+ Eterm* ppt = (Eterm *) ptr;
+
+ if (IS_MOVED(*ppt)) { /* Object is alive */
+ ExternalThing* ro = external_thing_ptr(*ppt);
+
+ *prev = ro; /* Patch to moved pos */
+ prev = &ro->next;
+ ptr = ro->next;
+ } else if (in_area(ppt, oh, oh_size)) {
+ /*
+ * Object resides on old heap, and we just did a
+ * generational collection - keep object in list.
+ */
+ prev = &ptr->next;
+ ptr = ptr->next;
+ } else { /* Object has not been moved - deref it */
+ erts_deref_node_entry(ptr->node);
+ *prev = ptr = ptr->next;
+ }
+ }
+ ASSERT(*prev == NULL);
+}
+
+static void
+sweep_proc_funs(Process *p, int fullsweep)
+{
+ ErlFunThing** prev;
+ ErlFunThing* ptr;
+ char* oh = 0;
+ Uint oh_size = 0;
+
+ if (fullsweep == 0) {
+ oh = (char *) OLD_HEAP(p);
+ oh_size = (char *) OLD_HEND(p) - oh;
+ }
+
+ prev = &MSO(p).funs;
+ ptr = MSO(p).funs;
+
+ while (ptr) {
+ Eterm* ppt = (Eterm *) ptr;
+
+ if (IS_MOVED(*ppt)) { /* Object is alive */
+ ErlFunThing* ro = (ErlFunThing *) fun_val(*ppt);
+
+ *prev = ro; /* Patch to moved pos */
+ prev = &ro->next;
+ ptr = ro->next;
+ } else if (in_area(ppt, oh, oh_size)) {
+ /*
+ * Object resides on old heap, and we just did a
+ * generational collection - keep object in list.
+ */
+ prev = &ptr->next;
+ ptr = ptr->next;
+ } else { /* Object has not been moved - deref it */
+ ErlFunEntry* fe = ptr->fe;
+
+ *prev = ptr = ptr->next;
+ if (erts_refc_dectest(&fe->refc, 0) == 0) {
+ erts_erase_fun_entry(fe);
+ }
+ }
+ }
+ ASSERT(*prev == NULL);
+}
+
+struct shrink_cand_data {
+ ProcBin* new_candidates;
+ ProcBin* new_candidates_end;
+ ProcBin* old_candidates;
+ Uint no_of_candidates;
+ Uint no_of_active;
+};
+
+static ERTS_INLINE void
+link_live_proc_bin(struct shrink_cand_data *shrink,
+ ProcBin ***prevppp,
+ ProcBin **pbpp,
+ int new_heap)
+{
+ ProcBin *pbp = *pbpp;
+
+ *pbpp = pbp->next;
+
+ if (pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE)) {
+ ASSERT(((pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE))
+ == (PB_ACTIVE_WRITER|PB_IS_WRITABLE))
+ || ((pbp->flags & (PB_ACTIVE_WRITER|PB_IS_WRITABLE))
+ == PB_IS_WRITABLE));
+
+
+ if (pbp->flags & PB_ACTIVE_WRITER) {
+ pbp->flags &= ~PB_ACTIVE_WRITER;
+ shrink->no_of_active++;
+ }
+ else { /* inactive */
+ Uint unused = pbp->val->orig_size - pbp->size;
+ /* Our allocators are 8 byte aligned, i.e., shrinking with
+ less than 8 bytes will have no real effect */
+ if (unused >= 8) { /* A shrink candidate; save in candidate list */
+ if (new_heap) {
+ if (!shrink->new_candidates)
+ shrink->new_candidates_end = pbp;
+ pbp->next = shrink->new_candidates;
+ shrink->new_candidates = pbp;
+ }
+ else {
+ pbp->next = shrink->old_candidates;
+ shrink->old_candidates = pbp;
+ }
+ shrink->no_of_candidates++;
+ return;
+ }
+ }
+ }
+
+ /* Not a shrink candidate; keep in original mso list */
+ **prevppp = pbp;
+ *prevppp = &pbp->next;
+
+}
+
+
+static void
+sweep_proc_bins(Process *p, int fullsweep)
+{
+ struct shrink_cand_data shrink = {0};
+ ProcBin** prev;
+ ProcBin* ptr;
+ Binary* bptr;
+ char* oh = NULL;
+ Uint oh_size = 0;
+ Uint bin_vheap = 0;
+
+ if (fullsweep == 0) {
+ oh = (char *) OLD_HEAP(p);
+ oh_size = (char *) OLD_HEND(p) - oh;
+ }
+
+ BIN_OLD_VHEAP(p) = 0;
+
+ prev = &MSO(p).mso;
+ ptr = MSO(p).mso;
+
+ /*
+ * Note: In R7 we no longer force a fullsweep when we find binaries
+ * on the old heap. The reason is that with the introduction of the
+ * bit syntax we can expect binaries to be used a lot more. Note that
+ * in earlier releases a brand new binary (or any other term) could
+ * be put on the old heap during a gen-gc fullsweep, but this is
+ * no longer the case in R7.
+ */
+ while (ptr) {
+ Eterm* ppt = (Eterm *) ptr;
+
+ if (IS_MOVED(*ppt)) { /* Object is alive */
+ bin_vheap += ptr->size / sizeof(Eterm);
+ ptr = (ProcBin*) binary_val(*ppt);
+ link_live_proc_bin(&shrink,
+ &prev,
+ &ptr,
+ !in_area(ptr, oh, oh_size));
+ } else if (in_area(ppt, oh, oh_size)) {
+ /*
+ * Object resides on old heap, and we just did a
+ * generational collection - keep object in list.
+ */
+ BIN_OLD_VHEAP(p) += ptr->size / sizeof(Eterm); /* for binary gc (words)*/
+ link_live_proc_bin(&shrink, &prev, &ptr, 0);
+ } else { /* Object has not been moved - deref it */
+
+ *prev = ptr->next;
+ bptr = ptr->val;
+ if (erts_refc_dectest(&bptr->refc, 0) == 0)
+ erts_bin_free(bptr);
+ ptr = *prev;
+ }
+ }
+
+ if (BIN_OLD_VHEAP(p) >= BIN_OLD_VHEAP_SZ(p)) {
+ FLAGS(p) |= F_NEED_FULLSWEEP;
+ }
+
+ BIN_VHEAP_SZ(p) = next_vheap_size(bin_vheap, BIN_VHEAP_SZ(p));
+ BIN_OLD_VHEAP_SZ(p) = next_vheap_size(BIN_OLD_VHEAP(p), BIN_OLD_VHEAP_SZ(p));
+ MSO(p).overhead = bin_vheap;
+
+ /*
+ * If we got any shrink candidates, check them out.
+ */
+
+ if (shrink.no_of_candidates) {
+ ProcBin *candlist[] = {shrink.new_candidates, shrink.old_candidates};
+ Uint leave_unused = 0;
+ int i;
+
+ if (shrink.no_of_active == 0) {
+ if (shrink.no_of_candidates <= ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT)
+ leave_unused = ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE;
+ else if (shrink.no_of_candidates <= ERTS_INACT_WR_PB_LEAVE_LIMIT)
+ leave_unused = ERTS_INACT_WR_PB_LEAVE_PERCENTAGE;
+ }
+
+ for (i = 0; i < sizeof(candlist)/sizeof(candlist[0]); i++) {
+
+ for (ptr = candlist[i]; ptr; ptr = ptr->next) {
+ Uint new_size = ptr->size;
+
+ if (leave_unused) {
+ new_size += (new_size * 100) / leave_unused;
+ /* Our allocators are 8 byte aligned, i.e., shrinking with
+ less than 8 bytes will have no real effect */
+ if (new_size + 8 >= ptr->val->orig_size)
+ continue;
+ }
+
+ ptr->val = erts_bin_realloc(ptr->val, new_size);
+ ptr->val->orig_size = new_size;
+ ptr->bytes = (byte *) ptr->val->orig_bytes;
+ }
+ }
+
+
+ /*
+ * We now potentially have the mso list divided into three lists:
+ * - shrink candidates on new heap (inactive writable with unused data)
+ * - shrink candidates on old heap (inactive writable with unused data)
+ * - other binaries (read only + active writable ...)
+ *
+ * Put them back together: new candidates -> other -> old candidates
+ * This order will ensure that the list only refers from new
+ * generation to old and never from old to new *which is important*.
+ */
+ if (shrink.new_candidates) {
+ if (prev == &MSO(p).mso) /* empty other binaries list */
+ prev = &shrink.new_candidates_end->next;
+ else
+ shrink.new_candidates_end->next = MSO(p).mso;
+ MSO(p).mso = shrink.new_candidates;
+ }
+ }
+
+ *prev = shrink.old_candidates;
+}
+
+/*
+ * Offset pointers into the heap (not stack).
+ */
+
+static void
+offset_heap(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
+{
+ while (sz--) {
+ Eterm val = *hp;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ if (in_area(ptr_val(val), area, area_size)) {
+ *hp = offset_ptr(val, offs);
+ }
+ hp++;
+ break;
+ case TAG_PRIMARY_HEADER: {
+ Uint tari;
+
+ if (header_is_transparent(val)) {
+ hp++;
+ continue;
+ }
+ tari = thing_arityval(val);
+ switch (thing_subtag(val)) {
+ case REFC_BINARY_SUBTAG:
+ {
+ ProcBin* pb = (ProcBin*) hp;
+ Eterm** uptr = (Eterm **) (void *) &pb->next;
+
+ if (*uptr && in_area((Eterm *)pb->next, area, area_size)) {
+ *uptr += offs; /* Patch the mso chain */
+ }
+ sz -= tari;
+ hp += tari + 1;
+ }
+ break;
+ case BIN_MATCHSTATE_SUBTAG:
+ {
+ ErlBinMatchState *ms = (ErlBinMatchState*) hp;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ if (in_area(ptr_val(mb->orig), area, area_size)) {
+ mb->orig = offset_ptr(mb->orig, offs);
+ mb->base = binary_bytes(mb->orig);
+ }
+ sz -= tari;
+ hp += tari + 1;
+ }
+ break;
+ case FUN_SUBTAG:
+ {
+ ErlFunThing* funp = (ErlFunThing *) hp;
+ Eterm** uptr = (Eterm **) (void *) &funp->next;
+
+ if (*uptr && in_area((Eterm *)funp->next, area, area_size)) {
+ *uptr += offs;
+ }
+ sz -= tari;
+ hp += tari + 1;
+ }
+ break;
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ {
+ ExternalThing* etp = (ExternalThing *) hp;
+ Eterm** uptr = (Eterm **) (void *) &etp->next;
+
+ if (*uptr && in_area((Eterm *)etp->next, area, area_size)) {
+ *uptr += offs;
+ }
+ sz -= tari;
+ hp += tari + 1;
+ }
+ break;
+ default:
+ sz -= tari;
+ hp += tari + 1;
+ }
+ break;
+ }
+ default:
+ hp++;
+ continue;
+ }
+ }
+}
+
+/*
+ * Offset pointers to heap from stack.
+ */
+
+static void
+offset_heap_ptr(Eterm* hp, Uint sz, Sint offs, char* area, Uint area_size)
+{
+ while (sz--) {
+ Eterm val = *hp;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ if (in_area(ptr_val(val), area, area_size)) {
+ *hp = offset_ptr(val, offs);
+ }
+ hp++;
+ break;
+ default:
+ hp++;
+ break;
+ }
+ }
+}
+
+static void
+offset_off_heap(Process* p, Sint offs, char* area, Uint area_size)
+{
+ if (MSO(p).mso && in_area((Eterm *)MSO(p).mso, area, area_size)) {
+ Eterm** uptr = (Eterm**) (void *) &MSO(p).mso;
+ *uptr += offs;
+ }
+
+ if (MSO(p).funs && in_area((Eterm *)MSO(p).funs, area, area_size)) {
+ Eterm** uptr = (Eterm**) (void *) &MSO(p).funs;
+ *uptr += offs;
+ }
+
+ if (MSO(p).externals && in_area((Eterm *)MSO(p).externals, area, area_size)) {
+ Eterm** uptr = (Eterm**) (void *) &MSO(p).externals;
+ *uptr += offs;
+ }
+}
+
+/*
+ * Offset pointers in message queue.
+ */
+static void
+offset_mqueue(Process *p, Sint offs, char* area, Uint area_size)
+{
+ ErlMessage* mp = p->msg.first;
+
+ while (mp != NULL) {
+ Eterm mesg = ERL_MESSAGE_TERM(mp);
+ if (is_value(mesg)) {
+ switch (primary_tag(mesg)) {
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ if (in_area(ptr_val(mesg), area, area_size)) {
+ ERL_MESSAGE_TERM(mp) = offset_ptr(mesg, offs);
+ }
+ break;
+ }
+ }
+ mesg = ERL_MESSAGE_TOKEN(mp);
+ if (is_boxed(mesg) && in_area(ptr_val(mesg), area, area_size)) {
+ ERL_MESSAGE_TOKEN(mp) = offset_ptr(mesg, offs);
+ }
+ ASSERT((is_nil(ERL_MESSAGE_TOKEN(mp)) ||
+ is_tuple(ERL_MESSAGE_TOKEN(mp)) ||
+ is_atom(ERL_MESSAGE_TOKEN(mp))));
+ mp = mp->next;
+ }
+}
+
+static void ERTS_INLINE
+offset_one_rootset(Process *p, Sint offs, char* area, Uint area_size,
+ Eterm* objv, int nobj)
+{
+ if (p->dictionary) {
+ offset_heap(p->dictionary->data,
+ p->dictionary->used,
+ offs, area, area_size);
+ }
+ offset_heap_ptr(&p->fvalue, 1, offs, area, area_size);
+ offset_heap_ptr(&p->ftrace, 1, offs, area, area_size);
+ offset_heap_ptr(&p->seq_trace_token, 1, offs, area, area_size);
+ offset_heap_ptr(&p->group_leader, 1, offs, area, area_size);
+ offset_mqueue(p, offs, area, area_size);
+ offset_heap_ptr(p->stop, (STACK_START(p) - p->stop), offs, area, area_size);
+ offset_nstack(p, offs, area, area_size);
+ if (nobj > 0) {
+ offset_heap_ptr(objv, nobj, offs, area, area_size);
+ }
+ offset_off_heap(p, offs, area, area_size);
+}
+
+static void
+offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
+ Eterm* objv, int nobj)
+{
+ offset_one_rootset(p, offs, area, area_size, objv, nobj);
+}
+
+#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
+
+static int
+within2(Eterm *ptr, Process *p, Eterm *real_htop)
+{
+ ErlHeapFragment* bp = MBUF(p);
+ ErlMessage* mp = p->msg.first;
+ Eterm *htop = real_htop ? real_htop : HEAP_TOP(p);
+
+ if (OLD_HEAP(p) && (OLD_HEAP(p) <= ptr && ptr < OLD_HEND(p))) {
+ return 1;
+ }
+ if (HEAP_START(p) <= ptr && ptr < htop) {
+ return 1;
+ }
+ while (bp != NULL) {
+ if (bp->mem <= ptr && ptr < bp->mem + bp->size) {
+ return 1;
+ }
+ bp = bp->next;
+ }
+ while (mp) {
+ if (mp->data.attached) {
+ ErlHeapFragment *hfp;
+ if (is_value(ERL_MESSAGE_TERM(mp)))
+ hfp = mp->data.heap_frag;
+ else if (is_not_nil(ERL_MESSAGE_TOKEN(mp)))
+ hfp = erts_dist_ext_trailer(mp->data.dist_ext);
+ else
+ hfp = NULL;
+ if (hfp && hfp->mem <= ptr && ptr < hfp->mem + hfp->size)
+ return 1;
+ }
+ mp = mp->next;
+ }
+ return 0;
+}
+
+int
+within(Eterm *ptr, Process *p)
+{
+ return within2(ptr, p, NULL);
+}
+
+#endif
+
+#ifdef ERTS_OFFHEAP_DEBUG
+
+#define ERTS_CHK_OFFHEAP_ASSERT(EXP) \
+do { \
+ if (!(EXP)) \
+ erl_exit(ERTS_ABORT_EXIT, \
+ "%s:%d: Assertion failed: %s\n", \
+ __FILE__, __LINE__, #EXP); \
+} while (0)
+
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
+# define ERTS_EXTERNAL_VISITED_BIT ((Eterm) 1 << 31)
+#endif
+
+
+void
+erts_check_off_heap2(Process *p, Eterm *htop)
+{
+ Eterm *oheap = (Eterm *) OLD_HEAP(p);
+ Eterm *ohtop = (Eterm *) OLD_HTOP(p);
+ int old;
+ ProcBin *pb;
+ ErlFunThing *eft;
+ ExternalThing *et;
+
+ old = 0;
+ for (pb = MSO(p).mso; pb; pb = pb->next) {
+ Eterm *ptr = (Eterm *) pb;
+ long refc = erts_refc_read(&pb->val->refc, 1);
+ ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
+ if (old) {
+ ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
+ }
+ else if (oheap <= ptr && ptr < ohtop)
+ old = 1;
+ else {
+ ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
+ }
+ }
+
+ old = 0;
+ for (eft = MSO(p).funs; eft; eft = eft->next) {
+ Eterm *ptr = (Eterm *) eft;
+ long refc = erts_refc_read(&eft->fe->refc, 1);
+ ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
+ if (old)
+ ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
+ else if (oheap <= ptr && ptr < ohtop)
+ old = 1;
+ else
+ ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
+ }
+
+ old = 0;
+ for (et = MSO(p).externals; et; et = et->next) {
+ Eterm *ptr = (Eterm *) et;
+ long refc = erts_refc_read(&et->node->refc, 1);
+ ERTS_CHK_OFFHEAP_ASSERT(refc >= 1);
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
+ ERTS_CHK_OFFHEAP_ASSERT(!(et->header & ERTS_EXTERNAL_VISITED_BIT));
+#endif
+ if (old)
+ ERTS_CHK_OFFHEAP_ASSERT(oheap <= ptr && ptr < ohtop);
+ else if (oheap <= ptr && ptr < ohtop)
+ old = 1;
+ else
+ ERTS_CHK_OFFHEAP_ASSERT(within2(ptr, p, htop));
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
+ et->header |= ERTS_EXTERNAL_VISITED_BIT;
+#endif
+ }
+
+#ifdef ERTS_OFFHEAP_DEBUG_CHK_CIRCULAR_EXTERNAL_LIST
+ for (et = MSO(p).externals; et; et = et->next)
+ et->header &= ~ERTS_EXTERNAL_VISITED_BIT;
+#endif
+
+}
+
+void
+erts_check_off_heap(Process *p)
+{
+ erts_check_off_heap2(p, NULL);
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
new file mode 100644
index 0000000000..af55b6363f
--- /dev/null
+++ b/erts/emulator/beam/erl_gc.h
@@ -0,0 +1,72 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2007-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ERL_GC_H__
+#define __ERL_GC_H__
+
+/* GC declarations shared by beam/erl_gc.c and hipe/hipe_gc.c */
+
+#ifdef DEBUG
+# define HARDDEBUG 1
+#endif
+
+#define IS_MOVED(x) (!is_header((x)))
+
+#define MOVE_CONS(PTR,CAR,HTOP,ORIG) \
+do { \
+ Eterm gval; \
+ \
+ HTOP[0] = CAR; /* copy car */ \
+ HTOP[1] = PTR[1]; /* copy cdr */ \
+ gval = make_list(HTOP); /* new location */ \
+ *ORIG = gval; /* redirect original reference */ \
+ PTR[0] = THE_NON_VALUE; /* store forwarding indicator */ \
+ PTR[1] = gval; /* store forwarding address */ \
+ HTOP += 2; /* update tospace htop */ \
+} while(0)
+
+#define MOVE_BOXED(PTR,HDR,HTOP,ORIG) \
+do { \
+ Eterm gval; \
+ Sint nelts; \
+ \
+ ASSERT(is_header(HDR)); \
+ gval = make_boxed(HTOP); \
+ *ORIG = gval; \
+ *HTOP++ = HDR; \
+ *PTR++ = gval; \
+ nelts = header_arity(HDR); \
+ switch ((HDR) & _HEADER_SUBTAG_MASK) { \
+ case SUB_BINARY_SUBTAG: nelts++; break; \
+ case FUN_SUBTAG: nelts+=((ErlFunThing*)(PTR-1))->num_free+1; break; \
+ } \
+ while (nelts--) \
+ *HTOP++ = *PTR++; \
+} while(0)
+
+#define in_area(ptr,start,nbytes) \
+ ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+
+extern Uint erts_test_long_gc_sleep;
+
+#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
+int within(Eterm *ptr, Process *p);
+#endif
+
+#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
new file mode 100644
index 0000000000..ea2ba4d55c
--- /dev/null
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -0,0 +1,662 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description: A "good fit" allocator. Segregated free-lists with a
+ * maximum search depth are used in order to find a good
+ * fit fast. Each free-list contains blocks of sizes in a
+ * specific range. First the free-list
+ * covering the desired size is searched if it is not empty.
+ * This search is stopped when the maximum search depth has
+ * been reached. If no free block was found in the free-list
+ * covering the desired size, the next non-empty free-list
+ * covering larger sizes is searched. The maximum search
+ * depth is by default 3. The insert and delete operations
+ * are O(1) and the search operation is O(n) where n is the
+ * maximum search depth, i.e. by default the all operations
+ * are O(1).
+ *
+ * This module is a callback-module for erl_alloc_util.c
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "global.h"
+#define GET_ERL_GF_ALLOC_IMPL
+#include "erl_goodfit_alloc.h"
+
+#define MIN_MBC_SZ (16*1024)
+#define MIN_MBC_FIRST_FREE_SZ (4*1024)
+
+#define MAX_SUB_MASK_IX \
+ ((((Uint)1) << (NO_OF_BKT_IX_BITS - SUB_MASK_IX_SHIFT)) - 1)
+#define MAX_SUB_BKT_IX ((((Uint)1) << SUB_MASK_IX_SHIFT) - 1)
+#define MAX_BKT_IX (NO_OF_BKTS - 1)
+
+#define MIN_BLK_SZ UNIT_CEILING(sizeof(GFFreeBlock_t) + sizeof(Uint))
+
+#define IX2SBIX(IX) ((IX) & (~(~((Uint)0) << SUB_MASK_IX_SHIFT)))
+#define IX2SMIX(IX) ((IX) >> SUB_MASK_IX_SHIFT)
+#define MAKE_BKT_IX(SMIX, SBIX) \
+ ((((Uint)(SMIX)) << SUB_MASK_IX_SHIFT) | ((Uint)(SBIX)))
+
+#define SET_BKT_MASK_IX(BM, IX) \
+do { \
+ int sub_mask_ix__ = IX2SMIX((IX)); \
+ (BM).main |= (((Uint) 1) << sub_mask_ix__); \
+ (BM).sub[sub_mask_ix__] |= (((Uint)1) << IX2SBIX((IX))); \
+} while (0)
+
+#define UNSET_BKT_MASK_IX(BM, IX) \
+do { \
+ int sub_mask_ix__ = IX2SMIX((IX)); \
+ (BM).sub[sub_mask_ix__] &= ~(((Uint)1) << IX2SBIX((IX))); \
+ if (!(BM).sub[sub_mask_ix__]) \
+ (BM).main &= ~(((Uint)1) << sub_mask_ix__); \
+} while (0)
+
+/* Buckets ... */
+
+#define BKT_INTRVL_A (1*sizeof(Unit_t))
+#define BKT_INTRVL_B (16*sizeof(Unit_t))
+#define BKT_INTRVL_C (96*sizeof(Unit_t))
+
+#define BKT_MIN_SIZE_A MIN_BLK_SZ
+#define BKT_MIN_SIZE_B (BKT_MAX_SIZE_A + 1)
+#define BKT_MIN_SIZE_C (BKT_MAX_SIZE_B + 1)
+#define BKT_MIN_SIZE_D (BKT_MAX_SIZE_C + 1)
+
+#define BKT_MAX_SIZE_A ((NO_OF_BKTS/4)*BKT_INTRVL_A+BKT_MIN_SIZE_A-1)
+#define BKT_MAX_SIZE_B ((NO_OF_BKTS/4)*BKT_INTRVL_B+BKT_MIN_SIZE_B-1)
+#define BKT_MAX_SIZE_C ((NO_OF_BKTS/4)*BKT_INTRVL_C+BKT_MIN_SIZE_C-1)
+
+
+#define BKT_MAX_IX_A ((NO_OF_BKTS*1)/4 - 1)
+#define BKT_MAX_IX_B ((NO_OF_BKTS*2)/4 - 1)
+#define BKT_MAX_IX_C ((NO_OF_BKTS*3)/4 - 1)
+#define BKT_MAX_IX_D ((NO_OF_BKTS*4)/4 - 1)
+
+#define BKT_MIN_IX_A (0)
+#define BKT_MIN_IX_B (BKT_MAX_IX_A + 1)
+#define BKT_MIN_IX_C (BKT_MAX_IX_B + 1)
+#define BKT_MIN_IX_D (BKT_MAX_IX_C + 1)
+
+
+#define BKT_IX_(BAP, SZ) \
+ ((SZ) <= BKT_MAX_SIZE_A \
+ ? (((SZ) - BKT_MIN_SIZE_A)/BKT_INTRVL_A + BKT_MIN_IX_A) \
+ : ((SZ) <= BKT_MAX_SIZE_B \
+ ? (((SZ) - BKT_MIN_SIZE_B)/BKT_INTRVL_B + BKT_MIN_IX_B) \
+ : ((SZ) <= BKT_MAX_SIZE_C \
+ ? (((SZ) - BKT_MIN_SIZE_C)/BKT_INTRVL_C + BKT_MIN_IX_C) \
+ : ((SZ) <= (BAP)->bkt_max_size_d \
+ ? (((SZ) - BKT_MIN_SIZE_D)/(BAP)->bkt_intrvl_d + BKT_MIN_IX_D)\
+ : (NO_OF_BKTS - 1)))))
+
+#define BKT_MIN_SZ_(BAP, IX) \
+ ((IX) <= BKT_MAX_IX_A \
+ ? (((IX) - BKT_MIN_IX_A)*BKT_INTRVL_A + BKT_MIN_SIZE_A) \
+ : ((IX) <= BKT_MAX_IX_B \
+ ? (((IX) - BKT_MIN_IX_B)*BKT_INTRVL_B + BKT_MIN_SIZE_B) \
+ : ((IX) <= BKT_MAX_IX_C \
+ ? (((IX) - BKT_MIN_IX_C)*BKT_INTRVL_C + BKT_MIN_SIZE_C) \
+ : (((IX) - BKT_MIN_IX_D)*(BAP)->bkt_intrvl_d + BKT_MIN_SIZE_D))))
+
+#ifdef DEBUG
+
+static int
+BKT_IX(GFAllctr_t *gfallctr, Uint size)
+{
+ int ix;
+ ASSERT(size >= MIN_BLK_SZ);
+
+ ix = BKT_IX_(gfallctr, size);
+
+ ASSERT(0 <= ix && ix <= BKT_MAX_IX_D);
+
+ return ix;
+}
+
+static Uint
+BKT_MIN_SZ(GFAllctr_t *gfallctr, int ix)
+{
+ Uint size;
+ ASSERT(0 <= ix && ix <= BKT_MAX_IX_D);
+
+ size = BKT_MIN_SZ_(gfallctr, ix);
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ ASSERT(ix == BKT_IX(gfallctr, size));
+ ASSERT(size == MIN_BLK_SZ || ix - 1 == BKT_IX(gfallctr, size - 1));
+#endif
+
+ return size;
+}
+
+#else
+
+#define BKT_IX BKT_IX_
+#define BKT_MIN_SZ BKT_MIN_SZ_
+
+#endif
+
+
+/* Prototypes of callback functions */
+static Block_t * get_free_block (Allctr_t *, Uint,
+ Block_t *, Uint);
+static void link_free_block (Allctr_t *, Block_t *);
+static void unlink_free_block (Allctr_t *, Block_t *);
+static void update_last_aux_mbc (Allctr_t *, Carrier_t *);
+static Eterm info_options (Allctr_t *, char *, int *,
+ void *, Uint **, Uint *);
+static void init_atoms (void);
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+static void check_block (Allctr_t *, Block_t *, int);
+static void check_mbc (Allctr_t *, Carrier_t *);
+#endif
+
+static int atoms_initialized = 0;
+
+void
+erts_gfalc_init(void)
+{
+ atoms_initialized = 0;
+}
+
+
+Allctr_t *
+erts_gfalc_start(GFAllctr_t *gfallctr,
+ GFAllctrInit_t *gfinit,
+ AllctrInit_t *init)
+{
+ GFAllctr_t nulled_state = {{0}};
+ /* {{0}} is used instead of {0}, in order to avoid (an incorrect) gcc
+ warning. gcc warns if {0} is used as initializer of a struct when
+ the first member is a struct (not if, for example, the third member
+ is a struct). */
+ Allctr_t *allctr = (Allctr_t *) gfallctr;
+
+ sys_memcpy((void *) gfallctr, (void *) &nulled_state, sizeof(GFAllctr_t));
+
+ allctr->mbc_header_size = sizeof(Carrier_t);
+ allctr->min_mbc_size = MIN_MBC_SZ;
+ allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
+ allctr->min_block_size = sizeof(GFFreeBlock_t);
+
+
+ allctr->vsn_str = ERTS_ALC_GF_ALLOC_VSN_STR;
+
+ /* Callback functions */
+
+ allctr->get_free_block = get_free_block;
+ allctr->link_free_block = link_free_block;
+ allctr->unlink_free_block = unlink_free_block;
+ allctr->info_options = info_options;
+
+ allctr->get_next_mbc_size = NULL;
+ allctr->creating_mbc = update_last_aux_mbc;
+ allctr->destroying_mbc = update_last_aux_mbc;
+
+ allctr->init_atoms = init_atoms;
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+ allctr->check_block = check_block;
+ allctr->check_mbc = check_mbc;
+#endif
+
+ allctr->atoms_initialized = 0;
+
+ if (init->sbct > BKT_MIN_SIZE_D-1)
+ gfallctr->bkt_intrvl_d =
+ UNIT_CEILING(((3*(init->sbct - BKT_MIN_SIZE_D - 1)
+ /(NO_OF_BKTS/4 - 1)) + 1)
+ / 2);
+ if (gfallctr->bkt_intrvl_d < BKT_INTRVL_C)
+ gfallctr->bkt_intrvl_d = BKT_INTRVL_C;
+ gfallctr->bkt_max_size_d = ((NO_OF_BKTS/4)*gfallctr->bkt_intrvl_d
+ + BKT_MIN_SIZE_D
+ - 1);
+
+ gfallctr->max_blk_search = (Uint) MAX(1, gfinit->mbsd);
+
+ if (!erts_alcu_start(allctr, init))
+ return NULL;
+
+ if (allctr->min_block_size != MIN_BLK_SZ)
+ return NULL;
+
+ return allctr;
+}
+
+static int
+find_bucket(BucketMask_t *bmask, int min_index)
+{
+ int min, mid, max;
+ int sub_mask_ix, sub_bkt_ix;
+ int ix = -1;
+
+#undef GET_MIN_BIT
+#define GET_MIN_BIT(MinBit, BitMask, Min, Max) \
+ min = (Min); \
+ max = (Max); \
+ while(max != min) { \
+ mid = ((max - min) >> 1) + min; \
+ if((BitMask) \
+ & (~(~((Uint) 0) << (mid + 1))) \
+ & (~((Uint) 0) << min)) \
+ max = mid; \
+ else \
+ min = mid + 1; \
+ } \
+ (MinBit) = min
+
+
+ ASSERT(bmask->main < (((Uint) 1) << (MAX_SUB_MASK_IX+1)));
+
+ sub_mask_ix = IX2SMIX(min_index);
+
+ if ((bmask->main & (~((Uint) 0) << sub_mask_ix)) == 0)
+ return -1;
+
+ /* There exists a non empty bucket; find it... */
+
+ if (bmask->main & (((Uint) 1) << sub_mask_ix)) {
+ sub_bkt_ix = IX2SBIX(min_index);
+ if ((bmask->sub[sub_mask_ix] & (~((Uint) 0) << sub_bkt_ix)) == 0) {
+ sub_mask_ix++;
+ sub_bkt_ix = 0;
+ if ((bmask->main & (~((Uint) 0)<< sub_mask_ix)) == 0)
+ return -1;
+ }
+ else
+ goto find_sub_bkt_ix;
+ }
+ else {
+ sub_mask_ix++;
+ sub_bkt_ix = 0;
+ }
+
+ ASSERT(sub_mask_ix <= MAX_SUB_MASK_IX);
+ /* Has to be a bit > sub_mask_ix */
+ ASSERT(bmask->main & (~((Uint) 0) << (sub_mask_ix)));
+ GET_MIN_BIT(sub_mask_ix, bmask->main, sub_mask_ix, MAX_SUB_MASK_IX);
+
+ find_sub_bkt_ix:
+ ASSERT(sub_mask_ix <= MAX_SUB_MASK_IX);
+ ASSERT(sub_bkt_ix <= MAX_SUB_BKT_IX);
+
+ if ((bmask->sub[sub_mask_ix] & (((Uint) 1) << sub_bkt_ix)) == 0) {
+ ASSERT(sub_mask_ix + 1 <= MAX_SUB_BKT_IX);
+ /* Has to be a bit > sub_bkt_ix */
+ ASSERT(bmask->sub[sub_mask_ix] & (~((Uint) 0) << sub_bkt_ix));
+
+ GET_MIN_BIT(sub_bkt_ix,
+ bmask->sub[sub_mask_ix],
+ sub_bkt_ix+1,
+ MAX_SUB_BKT_IX);
+
+ ASSERT(sub_bkt_ix <= MAX_SUB_BKT_IX);
+ }
+
+ ix = MAKE_BKT_IX(sub_mask_ix, sub_bkt_ix);
+
+ ASSERT(0 <= ix && ix < NO_OF_BKTS);
+
+ return ix;
+
+#undef GET_MIN_BIT
+
+}
+
+static Block_t *
+search_bucket(Allctr_t *allctr, int ix, Uint size)
+{
+ int i;
+ Uint min_sz;
+ Uint blk_sz;
+ Uint cand_sz = 0;
+ Uint max_blk_search;
+ GFFreeBlock_t *blk;
+ GFFreeBlock_t *cand = NULL;
+ int blk_on_lambc;
+ int cand_on_lambc = 0;
+ GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
+
+ ASSERT(0 <= ix && ix <= NO_OF_BKTS - 1);
+
+ if (!gfallctr->buckets[ix])
+ return NULL;
+
+ min_sz = BKT_MIN_SZ(gfallctr, ix);
+ if (min_sz < size)
+ min_sz = size;
+
+ max_blk_search = gfallctr->max_blk_search;
+ for (blk = gfallctr->buckets[ix], i = 0;
+ blk && i < max_blk_search;
+ blk = blk->next, i++) {
+
+ blk_sz = BLK_SZ(blk);
+ blk_on_lambc = (((char *) blk) < gfallctr->last_aux_mbc_end
+ && gfallctr->last_aux_mbc_start <= ((char *) blk));
+
+ if (blk_sz == min_sz && !blk_on_lambc)
+ return (Block_t *) blk;
+
+ if (blk_sz >= min_sz
+ && (!cand
+ || (!blk_on_lambc && (cand_on_lambc || blk_sz < cand_sz))
+ || (blk_on_lambc && cand_on_lambc && blk_sz < cand_sz))) {
+ cand_sz = blk_sz;
+ cand = blk;
+ cand_on_lambc = blk_on_lambc;
+ }
+
+ }
+ return (Block_t *) cand;
+}
+
+static Block_t *
+get_free_block(Allctr_t *allctr, Uint size,
+ Block_t *cand_blk, Uint cand_size)
+{
+ GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
+ int unsafe_bi, min_bi;
+ Block_t *blk;
+
+ ASSERT(!cand_blk || cand_size >= size);
+
+ unsafe_bi = BKT_IX(gfallctr, size);
+
+ min_bi = find_bucket(&gfallctr->bucket_mask, unsafe_bi);
+ if (min_bi < 0)
+ return NULL;
+
+ if (min_bi == unsafe_bi) {
+ blk = search_bucket(allctr, min_bi, size);
+ if (blk) {
+ if (cand_blk && cand_size <= BLK_SZ(blk))
+ return NULL; /* cand_blk was better */
+ unlink_free_block(allctr, blk);
+ return blk;
+ }
+ if (min_bi < NO_OF_BKTS - 1) {
+ min_bi = find_bucket(&gfallctr->bucket_mask, min_bi + 1);
+ if (min_bi < 0)
+ return NULL;
+ }
+ else
+ return NULL;
+ }
+ else {
+ ASSERT(min_bi > unsafe_bi);
+ }
+
+ /* We are guaranteed to find a block that fits in this bucket */
+ blk = search_bucket(allctr, min_bi, size);
+ ASSERT(blk);
+ if (cand_blk && cand_size <= BLK_SZ(blk))
+ return NULL; /* cand_blk was better */
+ unlink_free_block(allctr, blk);
+ return blk;
+}
+
+
+
+static void
+link_free_block(Allctr_t *allctr, Block_t *block)
+{
+ GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
+ GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
+ Uint sz = BLK_SZ(blk);
+ int i = BKT_IX(gfallctr, sz);
+
+ ASSERT(sz >= MIN_BLK_SZ);
+
+ SET_BKT_MASK_IX(gfallctr->bucket_mask, i);
+
+ blk->prev = NULL;
+ blk->next = gfallctr->buckets[i];
+ if (blk->next) {
+ ASSERT(!blk->next->prev);
+ blk->next->prev = blk;
+ }
+ gfallctr->buckets[i] = blk;
+}
+
+static void
+unlink_free_block(Allctr_t *allctr, Block_t *block)
+{
+ GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
+ GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
+ Uint sz = BLK_SZ(blk);
+ int i = BKT_IX(gfallctr, sz);
+
+ if (!blk->prev) {
+ ASSERT(gfallctr->buckets[i] == blk);
+ gfallctr->buckets[i] = blk->next;
+ }
+ else
+ blk->prev->next = blk->next;
+ if (blk->next)
+ blk->next->prev = blk->prev;
+
+ if (!gfallctr->buckets[i])
+ UNSET_BKT_MASK_IX(gfallctr->bucket_mask, i);
+}
+
+static void
+update_last_aux_mbc(Allctr_t *allctr, Carrier_t *mbc)
+{
+ GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
+
+ if (gfallctr->last_aux_mbc_start != (char *) allctr->mbc_list.last) {
+
+ if (allctr->mbc_list.last
+ && allctr->main_carrier != allctr->mbc_list.last) {
+ gfallctr->last_aux_mbc_start = (char *) allctr->mbc_list.last;
+ gfallctr->last_aux_mbc_end = (((char *) allctr->mbc_list.last)
+ + CARRIER_SZ(allctr->mbc_list.last));
+ }
+ else {
+ gfallctr->last_aux_mbc_start = NULL;
+ gfallctr->last_aux_mbc_end = NULL;
+ }
+
+ }
+}
+
+static struct {
+ Eterm mbsd;
+ Eterm as;
+ Eterm gf;
+#ifdef DEBUG
+ Eterm end_of_atoms;
+#endif
+} am;
+
+static void ERTS_INLINE atom_init(Eterm *atom, char *name)
+{
+ *atom = am_atom_put(name, strlen(name));
+}
+#define AM_INIT(AM) atom_init(&am.AM, #AM)
+
+static void
+init_atoms(void)
+{
+#ifdef DEBUG
+ Eterm *atom;
+#endif
+
+ if (atoms_initialized)
+ return;
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
+ *atom = THE_NON_VALUE;
+ }
+#endif
+
+ AM_INIT(mbsd);
+ AM_INIT(as);
+ AM_INIT(gf);
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
+ ASSERT(*atom != THE_NON_VALUE);
+ }
+#endif
+
+ atoms_initialized = 1;
+}
+
+#define bld_uint erts_bld_uint
+#define bld_cons erts_bld_cons
+#define bld_tuple erts_bld_tuple
+
+static ERTS_INLINE void
+add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
+{
+ *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 2, el1, el2), *lp);
+}
+
+static Eterm
+info_options(Allctr_t *allctr,
+ char *prefix,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
+ Eterm res = THE_NON_VALUE;
+
+ if (print_to_p) {
+ erts_print(*print_to_p,
+ print_to_arg,
+ "%smbsd: %lu\n"
+ "%sas: gf\n",
+ prefix, gfallctr->max_blk_search,
+ prefix);
+ }
+
+ if (hpp || szp) {
+
+ if (!atoms_initialized)
+ erl_exit(1, "%s:%d: Internal error: Atoms not initialized",
+ __FILE__, __LINE__);;
+
+ res = NIL;
+ add_2tup(hpp, szp, &res, am.as, am.gf);
+ add_2tup(hpp, szp, &res,
+ am.mbsd,
+ bld_uint(hpp, szp, gfallctr->max_blk_search));
+ }
+
+ return res;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * NOTE: erts_gfalc_test() is only supposed to be used for testing. *
+ * *
+ * Keep alloc_SUITE_data/allocator_test.h updated if changes are made *
+ * to erts_gfalc_test() *
+\* */
+
+unsigned long
+erts_gfalc_test(unsigned long op, unsigned long a1, unsigned long a2)
+{
+ switch (op) {
+ case 0x100: return (unsigned long) BKT_IX((GFAllctr_t *) a1, (Uint) a2);
+ case 0x101: return (unsigned long) BKT_MIN_SZ((GFAllctr_t *) a1, (int) a2);
+ case 0x102: return (unsigned long) NO_OF_BKTS;
+ case 0x103: return (unsigned long)
+ find_bucket(&((GFAllctr_t *) a1)->bucket_mask, (int) a2);
+ default: ASSERT(0); return ~((unsigned long) 0);
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Debug functions *
+\* */
+
+#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
+void
+check_block(Allctr_t *allctr, Block_t * blk, int free_block)
+{
+ GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
+ int i;
+ int bi;
+ int found;
+ GFFreeBlock_t *fblk;
+
+ if(free_block) {
+ Uint blk_sz = BLK_SZ(blk);
+ bi = BKT_IX(gfallctr, blk_sz);
+
+ ASSERT(gfallctr->bucket_mask.main & (((Uint) 1) << IX2SMIX(bi)));
+ ASSERT(gfallctr->bucket_mask.sub[IX2SMIX(bi)]
+ & (((Uint) 1) << IX2SBIX(bi)));
+
+ found = 0;
+ for (fblk = gfallctr->buckets[bi]; fblk; fblk = fblk->next)
+ if (blk == (Block_t *) fblk)
+ found++;
+ ASSERT(found == 1);
+ }
+ else
+ bi = -1;
+
+ found = 0;
+ for (i = 0; i < NO_OF_BKTS; i++) {
+ if (i == bi)
+ continue; /* Already checked */
+ for (fblk = gfallctr->buckets[i]; fblk; fblk = fblk->next)
+ if (blk == (Block_t *) fblk)
+ found++;
+ }
+
+ ASSERT(found == 0);
+
+}
+
+void
+check_mbc(Allctr_t *allctr, Carrier_t *mbc)
+{
+ GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
+ int bi;
+
+ for(bi = 0; bi < NO_OF_BKTS; bi++) {
+ if ((gfallctr->bucket_mask.main & (((Uint) 1) << IX2SMIX(bi)))
+ && (gfallctr->bucket_mask.sub[IX2SMIX(bi)]
+ & (((Uint) 1) << IX2SBIX(bi)))) {
+ ASSERT(gfallctr->buckets[bi] != NULL);
+ }
+ else {
+ ASSERT(gfallctr->buckets[bi] == NULL);
+ }
+ }
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_goodfit_alloc.h b/erts/emulator/beam/erl_goodfit_alloc.h
new file mode 100644
index 0000000000..3d1b8c01f6
--- /dev/null
+++ b/erts/emulator/beam/erl_goodfit_alloc.h
@@ -0,0 +1,88 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+#ifndef ERL_GOODFIT_ALLOC__
+#define ERL_GOODFIT_ALLOC__
+
+#include "erl_alloc_util.h"
+
+#define ERTS_ALC_GF_ALLOC_VSN_STR "2.1"
+
+typedef struct GFAllctr_t_ GFAllctr_t;
+
+typedef struct {
+ Uint mbsd;
+} GFAllctrInit_t;
+
+#define ERTS_DEFAULT_GF_ALLCTR_INIT { \
+ 3, /* (amount) mbsd: max (mbc) block search depth */\
+}
+
+void erts_gfalc_init(void);
+Allctr_t *erts_gfalc_start(GFAllctr_t *, GFAllctrInit_t *, AllctrInit_t *);
+
+#endif /* #ifndef ERL_GOODFIT_ALLOC__ */
+
+
+
+#if defined(GET_ERL_GF_ALLOC_IMPL) && !defined(ERL_GF_ALLOC_IMPL__)
+#define ERL_GF_ALLOC_IMPL__
+
+#define GET_ERL_ALLOC_UTIL_IMPL
+#include "erl_alloc_util.h"
+
+#define NO_OF_BKT_IX_BITS (8)
+#ifdef ARCH_64
+# define SUB_MASK_IX_SHIFT (6)
+#else
+# define SUB_MASK_IX_SHIFT (5)
+#endif
+#define NO_OF_BKTS (((Uint) 1) << NO_OF_BKT_IX_BITS)
+#define NO_OF_SUB_MASKS (NO_OF_BKTS/(((Uint) 1) << SUB_MASK_IX_SHIFT))
+
+typedef struct {
+ Uint main;
+ Uint sub[NO_OF_SUB_MASKS];
+} BucketMask_t;
+
+typedef struct GFFreeBlock_t_ GFFreeBlock_t;
+struct GFFreeBlock_t_ {
+ Block_t block_head;
+ GFFreeBlock_t *prev;
+ GFFreeBlock_t *next;
+};
+
+struct GFAllctr_t_ {
+ Allctr_t allctr; /* Has to be first! */
+
+ char * last_aux_mbc_start;
+ char * last_aux_mbc_end;
+ Uint bkt_max_size_d;
+ Uint bkt_intrvl_d;
+ BucketMask_t bucket_mask;
+ GFFreeBlock_t * buckets[NO_OF_BKTS];
+ Uint max_blk_search;
+
+};
+
+unsigned long erts_gfalc_test(unsigned long, unsigned long, unsigned long);
+
+#endif /* #if defined(GET_ERL_GF_ALLOC_IMPL)
+ && !defined(ERL_GF_ALLOC_IMPL__) */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
new file mode 100644
index 0000000000..8afd349b85
--- /dev/null
+++ b/erts/emulator/beam/erl_init.c
@@ -0,0 +1,1461 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include <ctype.h>
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_version.h"
+#include "erl_db.h"
+#include "beam_bp.h"
+#include "erl_bits.h"
+#include "erl_binary.h"
+#include "dist.h"
+#include "erl_mseg.h"
+#include "erl_nmgc.h"
+#include "erl_threads.h"
+#include "erl_bif_timer.h"
+#include "erl_instrument.h"
+#include "erl_printf_term.h"
+#include "erl_misc_utils.h"
+#include "packet_parser.h"
+
+#ifdef HIPE
+#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
+#include "hipe_signal.h" /* for hipe_signal_init() */
+#endif
+
+#ifdef HAVE_SYS_RESOURCE_H
+# include <sys/resource.h>
+#endif
+
+/*
+ * Note about VxWorks: All variables must be initialized by executable code,
+ * not by an initializer. Otherwise a new instance of the emulator will
+ * inherit previous values.
+ */
+
+extern void erl_crash_dump_v(char *, int, char *, va_list);
+#ifdef __WIN32__
+extern void ConNormalExit(void);
+extern void ConWaitForExit(void);
+#endif
+
+#define ERTS_MIN_COMPAT_REL 7
+
+#ifdef ERTS_SMP
+erts_smp_atomic_t erts_writing_erl_crash_dump;
+#else
+volatile int erts_writing_erl_crash_dump = 0;
+#endif
+int erts_initialized = 0;
+
+#if defined(USE_THREADS) && !defined(ERTS_SMP)
+static erts_tid_t main_thread;
+#endif
+
+erts_cpu_info_t *erts_cpuinfo;
+
+int erts_use_sender_punish;
+
+/*
+ * Configurable parameters.
+ */
+
+Uint display_items; /* no of items to display in traces etc */
+Uint display_loads; /* print info about loaded modules */
+int H_MIN_SIZE; /* The minimum heap grain */
+
+Uint32 erts_debug_flags; /* Debug flags. */
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+int count_instructions;
+#endif
+int erts_backtrace_depth; /* How many functions to show in a backtrace
+ * in error codes.
+ */
+
+int erts_async_max_threads; /* number of threads for async support */
+int erts_async_thread_suggested_stack_size;
+erts_smp_atomic_t erts_max_gen_gcs;
+
+Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
+ am_info or am_warning, am_error is
+ the default for BC */
+
+int erts_compat_rel;
+
+static int use_multi_run_queue;
+static int no_schedulers;
+static int no_schedulers_online;
+
+#ifdef DEBUG
+Uint32 verbose; /* See erl_debug.h for information about verbose */
+#endif
+
+int erts_disable_tolerant_timeofday; /* Time correction can be disabled it is
+ * not and/or it is too slow.
+ */
+
+int erts_modified_timing_level;
+
+int erts_no_crash_dump = 0; /* Use -d to suppress crash dump. */
+
+/*
+ * Other global variables.
+ */
+
+ErtsModifiedTimings erts_modified_timings[] = {
+ /* 0 */ {make_small(0), CONTEXT_REDS, INPUT_REDUCTIONS},
+ /* 1 */ {make_small(0), 2*CONTEXT_REDS, 2*INPUT_REDUCTIONS},
+ /* 2 */ {make_small(0), CONTEXT_REDS/2, INPUT_REDUCTIONS/2},
+ /* 3 */ {make_small(0), 3*CONTEXT_REDS, 3*INPUT_REDUCTIONS},
+ /* 4 */ {make_small(0), CONTEXT_REDS/3, 3*INPUT_REDUCTIONS},
+ /* 5 */ {make_small(0), 4*CONTEXT_REDS, INPUT_REDUCTIONS/2},
+ /* 6 */ {make_small(1), CONTEXT_REDS/4, 2*INPUT_REDUCTIONS},
+ /* 7 */ {make_small(1), 5*CONTEXT_REDS, INPUT_REDUCTIONS/3},
+ /* 8 */ {make_small(10), CONTEXT_REDS/5, 3*INPUT_REDUCTIONS},
+ /* 9 */ {make_small(10), 6*CONTEXT_REDS, INPUT_REDUCTIONS/4}
+};
+
+#define ERTS_MODIFIED_TIMING_LEVELS \
+ (sizeof(erts_modified_timings)/sizeof(ErtsModifiedTimings))
+
+Export *erts_delay_trap = NULL;
+
+int erts_use_r9_pids_ports;
+
+#ifdef HYBRID
+Eterm *global_heap;
+Eterm *global_hend;
+Eterm *global_htop;
+Eterm *global_saved_htop;
+Eterm *global_old_heap;
+Eterm *global_old_hend;
+ErlOffHeap erts_global_offheap;
+Uint global_heap_sz = SH_DEFAULT_SIZE;
+
+#ifndef INCREMENTAL
+Eterm *global_high_water;
+Eterm *global_old_htop;
+#endif
+
+Uint16 global_gen_gcs;
+Uint16 global_max_gen_gcs;
+Uint global_gc_flags;
+
+Uint global_heap_min_sz = SH_DEFAULT_SIZE;
+#endif
+
+int ignore_break;
+int replace_intr;
+
+static ERTS_INLINE int
+has_prefix(const char *prefix, const char *string)
+{
+ int i;
+ for (i = 0; prefix[i]; i++)
+ if (prefix[i] != string[i])
+ return 0;
+ return 1;
+}
+
+static char*
+progname(char *fullname)
+{
+ int i;
+
+ i = strlen(fullname);
+ while (i >= 0) {
+ if ((fullname[i] != '/') && (fullname[i] != '\\'))
+ i--;
+ else
+ break;
+ }
+ return fullname+i+1;
+}
+
+static int
+this_rel_num(void)
+{
+ static int this_rel = -1;
+
+ if (this_rel < 1) {
+ int i;
+ char this_rel_str[] = ERLANG_OTP_RELEASE;
+
+ i = 0;
+ while (this_rel_str[i] && !isdigit((int) this_rel_str[i]))
+ i++;
+ this_rel = atoi(&this_rel_str[i]);
+ if (this_rel < 1)
+ erl_exit(-1, "Unexpected ERLANG_OTP_RELEASE format\n");
+ }
+ return this_rel;
+}
+
+/*
+ * Common error printout function, all error messages
+ * that don't go to the error logger go through here.
+ */
+
+void erl_error(char *fmt, va_list args)
+{
+ erts_vfprintf(stderr, fmt, args);
+}
+
+static void early_init(int *argc, char **argv);
+
+void
+erts_short_init(void)
+{
+ early_init(NULL, NULL);
+ erl_init();
+ erts_initialized = 1;
+}
+
+void
+erl_init(void)
+{
+ init_benchmarking();
+
+#ifdef ERTS_SMP
+ erts_system_block_init();
+#endif
+
+ erts_init_monitors();
+ erts_init_gc();
+ init_time();
+ erts_init_process();
+ erts_init_scheduling(use_multi_run_queue,
+ no_schedulers,
+ no_schedulers_online);
+
+ H_MIN_SIZE = erts_next_heap_size(H_MIN_SIZE, 0);
+
+ erts_init_trace();
+ erts_init_binary();
+ erts_init_bits();
+ erts_init_fun_table();
+ init_atom_table();
+ init_export_table();
+ init_module_table();
+ init_register_table();
+ init_message();
+ erts_bif_info_init();
+ erts_ddll_init();
+ init_emulator();
+ erts_bp_init();
+ init_db(); /* Must be after init_emulator */
+ erts_bif_timer_init();
+ erts_init_node_tables();
+ init_dist();
+ erl_drv_thr_init();
+ init_io();
+ init_copy();
+ init_load();
+ erts_init_bif();
+ erts_init_bif_chksum();
+ erts_init_bif_re();
+ erts_init_unicode(); /* after RE to get access to PCRE unicode */
+ erts_delay_trap = erts_export_put(am_erlang, am_delay_trap, 2);
+ erts_late_init_process();
+#if HAVE_ERTS_MSEG
+ erts_mseg_late_init(); /* Must be after timer (init_time()) and thread
+ initializations */
+#endif
+#ifdef HIPE
+ hipe_mode_switch_init(); /* Must be after init_load/beam_catches/init */
+#endif
+#ifdef _OSE_
+ erl_sys_init_final();
+#endif
+ packet_parser_init();
+}
+
+static void
+init_shared_memory(int argc, char **argv)
+{
+#ifdef HYBRID
+ int arg_size = 0;
+
+ global_heap_sz = erts_next_heap_size(global_heap_sz,0);
+
+ /* Make sure arguments will fit on the heap, no one else will check! */
+ while (argc--)
+ arg_size += 2 + strlen(argv[argc]);
+ if (global_heap_sz < arg_size)
+ global_heap_sz = erts_next_heap_size(arg_size,1);
+
+#ifndef INCREMENTAL
+ global_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
+ sizeof(Eterm) * global_heap_sz);
+ global_hend = global_heap + global_heap_sz;
+ global_htop = global_heap;
+ global_high_water = global_heap;
+ global_old_hend = global_old_htop = global_old_heap = NULL;
+#endif
+
+ global_gen_gcs = 0;
+ global_max_gen_gcs = erts_smp_atomic_read(&erts_max_gen_gcs);
+ global_gc_flags = erts_default_process_flags;
+
+ erts_global_offheap.mso = NULL;
+#ifndef HYBRID /* FIND ME! */
+ erts_global_offheap.funs = NULL;
+#endif
+ erts_global_offheap.overhead = 0;
+#endif
+
+#ifdef INCREMENTAL
+ erts_init_incgc();
+#endif
+}
+
+
+/*
+ * Create the very first process.
+ */
+
+void
+erts_first_process(Eterm modname, void* code, unsigned size, int argc, char** argv)
+{
+ int i;
+ Eterm args;
+ Eterm pid;
+ Eterm* hp;
+ Process parent;
+ Process* p;
+ ErlSpawnOpts so;
+
+ if (erts_find_function(modname, am_start, 1) == NULL) {
+ char sbuf[256];
+ Atom* ap;
+
+ ap = atom_tab(atom_val(modname));
+ memcpy(sbuf, ap->name, ap->len);
+ sbuf[ap->len] = '\0';
+ erl_exit(5, "No function %s:start/1\n", sbuf);
+ }
+
+ /*
+ * We need a dummy parent process to be able to call erl_create_process().
+ */
+ erts_init_empty_process(&parent);
+ hp = HAlloc(&parent, argc*2 + 4);
+ args = NIL;
+ for (i = argc-1; i >= 0; i--) {
+ int len = sys_strlen(argv[i]);
+ args = CONS(hp, new_binary(&parent, (byte*)argv[i], len), args);
+ hp += 2;
+ }
+ args = CONS(hp, new_binary(&parent, code, size), args);
+ hp += 2;
+ args = CONS(hp, args, NIL);
+
+ so.flags = 0;
+ pid = erl_create_process(&parent, modname, am_start, args, &so);
+ p = process_tab[internal_pid_index(pid)];
+ p->group_leader = pid;
+
+ erts_cleanup_empty_process(&parent);
+}
+
+/*
+ * XXX Old way of starting. Hopefully soon obsolete.
+ */
+
+static void
+erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv)
+{
+ int i;
+ Eterm start_mod;
+ Eterm args;
+ Eterm* hp;
+ Process parent;
+ ErlSpawnOpts so;
+ Eterm env;
+
+ start_mod = am_atom_put(modname, sys_strlen(modname));
+ if (erts_find_function(start_mod, am_start, 2) == NULL) {
+ erl_exit(5, "No function %s:start/2\n", modname);
+ }
+
+ /*
+ * We need a dummy parent process to be able to call erl_create_process().
+ */
+
+ erts_init_empty_process(&parent);
+ erts_smp_proc_lock(&parent, ERTS_PROC_LOCK_MAIN);
+ hp = HAlloc(&parent, argc*2 + 4);
+ args = NIL;
+ for (i = argc-1; i >= 0; i--) {
+ int len = sys_strlen(argv[i]);
+ args = CONS(hp, new_binary(&parent, (byte*)argv[i], len), args);
+ hp += 2;
+ }
+ env = new_binary(&parent, code, size);
+ args = CONS(hp, args, NIL);
+ hp += 2;
+ args = CONS(hp, env, args);
+
+ so.flags = 0;
+ (void) erl_create_process(&parent, start_mod, am_start, args, &so);
+ erts_smp_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN);
+ erts_cleanup_empty_process(&parent);
+}
+
+Eterm
+erts_preloaded(Process* p)
+{
+ Eterm previous;
+ int j;
+ int need;
+ Eterm mod;
+ Eterm* hp;
+ char* name;
+ const Preload *preload = sys_preloaded();
+
+ j = 0;
+ while (preload[j].name != NULL) {
+ j++;
+ }
+ previous = NIL;
+ need = 2*j;
+ hp = HAlloc(p, need);
+ j = 0;
+ while ((name = preload[j].name) != NULL) {
+ mod = am_atom_put(name, sys_strlen(name));
+ previous = CONS(hp, mod, previous);
+ hp += 2;
+ j++;
+ }
+ return previous;
+}
+
+
+/* static variables that must not change (use same values at restart) */
+static char* program;
+static char* init = "init";
+static char* boot = "boot";
+static int boot_argc;
+static char** boot_argv;
+
+static char *
+get_arg(char* rest, char* next, int* ip)
+{
+ if (*rest == '\0') {
+ if (next == NULL) {
+ erts_fprintf(stderr, "too few arguments\n");
+ erts_usage();
+ }
+ (*ip)++;
+ return next;
+ }
+ return rest;
+}
+
+static void
+load_preloaded(void)
+{
+ int i;
+ int res;
+ Preload* preload_p;
+ Eterm module_name;
+ byte* code;
+ char* name;
+ int length;
+
+ if ((preload_p = sys_preloaded()) == NULL) {
+ return;
+ }
+ i = 0;
+ while ((name = preload_p[i].name) != NULL) {
+ length = preload_p[i].size;
+ module_name = am_atom_put(name, sys_strlen(name));
+ if ((code = sys_preload_begin(&preload_p[i])) == 0)
+ erl_exit(1, "Failed to find preloaded code for module %s\n",
+ name);
+ res = erts_load_module(NULL, 0, NIL, &module_name, code, length);
+ sys_preload_end(&preload_p[i]);
+ if (res < 0)
+ erl_exit(1,"Failed loading preloaded module %s\n", name);
+ i++;
+ }
+}
+
+/* be helpful (or maybe downright rude:-) */
+void erts_usage(void)
+{
+ erts_fprintf(stderr, "Usage: %s [flags] [ -- [init_args] ]\n", progname(program));
+ erts_fprintf(stderr, "The flags are:\n\n");
+
+ /* erts_fprintf(stderr, "-# number set the number of items to be used in traces etc\n"); */
+
+ erts_fprintf(stderr, "-a size suggested stack size in kilo words for threads\n");
+ erts_fprintf(stderr, " in the async-thread pool, valid range is [%d-%d]\n",
+ ERTS_ASYNC_THREAD_MIN_STACK_SIZE,
+ ERTS_ASYNC_THREAD_MAX_STACK_SIZE);
+ erts_fprintf(stderr, "-A number set number of threads in async thread pool,\n");
+ erts_fprintf(stderr, " valid range is [0-%d]\n",
+ ERTS_MAX_NO_OF_ASYNC_THREADS);
+
+ erts_fprintf(stderr, "-B[c|d|i] c to have Ctrl-c interrupt the Erlang shell,\n");
+ erts_fprintf(stderr, " d (or no extra option) to disable the break\n");
+ erts_fprintf(stderr, " handler, i to ignore break signals\n");
+
+ /* erts_fprintf(stderr, "-b func set the boot function (default boot)\n"); */
+
+ erts_fprintf(stderr, "-c disable continuous date/time correction with\n");
+ erts_fprintf(stderr, " respect to uptime\n");
+
+ erts_fprintf(stderr, "-d don't write a crash dump for internally detected errors\n");
+ erts_fprintf(stderr, " (halt(String) will still produce a crash dump)\n");
+
+ erts_fprintf(stderr, "-h number set minimum heap size in words (default %d)\n",
+ H_DEFAULT_SIZE);
+
+ /* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
+
+ erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
+
+ erts_fprintf(stderr, "-l turn on auto load tracing\n");
+
+ erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
+ erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n");
+
+ erts_fprintf(stderr, "-P number set maximum number of processes on this node,\n");
+ erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES);
+ erts_fprintf(stderr, "-R number set compatibility release number,\n");
+ erts_fprintf(stderr, " valid range [%d-%d]\n",
+ ERTS_MIN_COMPAT_REL, this_rel_num());
+
+ erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
+ erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n");
+ erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n");
+ erts_fprintf(stderr, "-sct cput set cpu topology,\n");
+ erts_fprintf(stderr, " see the erl(1) documentation for more info.\n");
+ erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n");
+ erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ ERTS_SCHED_THREAD_MIN_STACK_SIZE,
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE);
+ erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
+ erts_fprintf(stderr, " schedulers online (n2), valid range for both\n");
+ erts_fprintf(stderr, " numbers are [1-%d]\n",
+ ERTS_MAX_NO_OF_SCHEDULERS);
+ erts_fprintf(stderr, "-T number set modified timing level,\n");
+ erts_fprintf(stderr, " valid range is [0-%d]\n",
+ ERTS_MODIFIED_TIMING_LEVELS-1);
+ erts_fprintf(stderr, "-V print Erlang version\n");
+
+ erts_fprintf(stderr, "-v turn on chatty mode (GCs will be reported etc)\n");
+
+ erts_fprintf(stderr, "-W<i|w> set error logger warnings mapping,\n");
+ erts_fprintf(stderr, " see error_logger documentation for details\n");
+
+ erts_fprintf(stderr, "\n");
+ erts_fprintf(stderr, "Note that if the emulator is started with erlexec (typically\n");
+ erts_fprintf(stderr, "from the erl script), these flags should be specified with +.\n");
+ erts_fprintf(stderr, "\n\n");
+ erl_exit(-1, "");
+}
+
+static void
+early_init(int *argc, char **argv) /*
+ * Only put things here which are
+ * really important initialize
+ * early!
+ */
+{
+ ErtsAllocInitOpts alloc_opts = ERTS_ALLOC_INIT_DEF_OPTS_INITER;
+ int ncpu;
+ int ncpuonln;
+ int ncpuavail;
+ int schdlrs;
+ int schdlrs_onln;
+ use_multi_run_queue = 1;
+ erts_printf_eterm_func = erts_printf_term;
+ erts_disable_tolerant_timeofday = 0;
+ display_items = 200;
+ display_loads = 0;
+ erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
+ erts_async_max_threads = 0;
+ erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
+ H_MIN_SIZE = H_DEFAULT_SIZE;
+
+ erts_initialized = 0;
+
+ erts_use_sender_punish = 1;
+
+ erts_cpuinfo = erts_cpu_info_create();
+
+#ifdef ERTS_SMP
+ ncpu = erts_get_cpu_configured(erts_cpuinfo);
+ ncpuonln = erts_get_cpu_online(erts_cpuinfo);
+ ncpuavail = erts_get_cpu_available(erts_cpuinfo);
+#else
+ ncpu = 1;
+ ncpuonln = 1;
+ ncpuavail = 1;
+#endif
+
+ ignore_break = 0;
+ replace_intr = 0;
+ program = argv[0];
+
+ erts_modified_timing_level = -1;
+
+ erts_compat_rel = this_rel_num();
+
+ erts_use_r9_pids_ports = 0;
+
+ erts_sys_pre_init();
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init();
+#endif
+#ifdef ERTS_SMP
+ erts_smp_atomic_init(&erts_writing_erl_crash_dump, 0L);
+#else
+ erts_writing_erl_crash_dump = 0;
+#endif
+
+ erts_smp_atomic_init(&erts_max_gen_gcs, (long)((Uint16) -1));
+
+ erts_pre_init_process();
+#if defined(USE_THREADS) && !defined(ERTS_SMP)
+ main_thread = erts_thr_self();
+#endif
+
+ /*
+ * We need to know the number of schedulers to use before we
+ * can initialize the allocators.
+ */
+ no_schedulers = (Uint) (ncpu > 0 ? ncpu : 1);
+ no_schedulers_online = (ncpuavail > 0
+ ? ncpuavail
+ : (ncpuonln > 0 ? ncpuonln : no_schedulers));
+
+ schdlrs = no_schedulers;
+ schdlrs_onln = no_schedulers_online;
+
+ if (argc && argv) {
+ int i = 1;
+ while (i < *argc) {
+ if (strcmp(argv[i], "--") == 0) { /* end of emulator options */
+ i++;
+ break;
+ }
+ if (argv[i][0] == '-') {
+ switch (argv[i][1]) {
+ case 'S' : {
+ int tot, onln;
+ char *arg = get_arg(argv[i]+2, argv[i+1], &i);
+ switch (sscanf(arg, "%d:%d", &tot, &onln)) {
+ case 0:
+ switch (sscanf(arg, ":%d", &onln)) {
+ case 1:
+ tot = no_schedulers;
+ goto chk_S;
+ default:
+ goto bad_S;
+ }
+ case 1:
+ onln = tot < schdlrs_onln ? tot : schdlrs_onln;
+ case 2:
+ chk_S:
+ if (tot > 0)
+ schdlrs = tot;
+ else
+ schdlrs = no_schedulers + tot;
+ if (onln > 0)
+ schdlrs_onln = onln;
+ else
+ schdlrs_onln = no_schedulers_online + onln;
+ if (schdlrs < 1 || ERTS_MAX_NO_OF_SCHEDULERS < schdlrs) {
+ erts_fprintf(stderr,
+ "bad amount of schedulers %d\n",
+ tot);
+ erts_usage();
+ }
+ if (schdlrs_onln < 1 || schdlrs < schdlrs_onln) {
+ erts_fprintf(stderr,
+ "bad amount of schedulers online %d "
+ "(total amount of schedulers %d)\n",
+ schdlrs_onln, schdlrs);
+ erts_usage();
+ }
+ break;
+ default:
+ bad_S:
+ erts_fprintf(stderr,
+ "bad amount of schedulers %s\n",
+ arg);
+ erts_usage();
+ break;
+ }
+
+ VERBOSE(DEBUG_SYSTEM,
+ ("using %d:%d scheduler(s)\n", tot, onln));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ i++;
+ }
+ }
+
+#ifdef ERTS_SMP
+ no_schedulers = schdlrs;
+ no_schedulers_online = schdlrs_onln;
+
+ erts_no_schedulers = (Uint) no_schedulers;
+#endif
+
+ erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes)
+ -M flags. */
+
+ erts_early_init_scheduling(); /* Require allocators */
+ erts_init_utils(); /* Require allocators */
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_late_init();
+#endif
+
+#if defined(HIPE)
+ hipe_signal_init(); /* must be done very early */
+#endif
+ erl_sys_init();
+
+ erl_sys_args(argc, argv);
+
+ erts_ets_realloc_always_moves = 0;
+
+}
+
+#ifndef ERTS_SMP
+static void set_main_stack_size(void)
+{
+ if (erts_sched_thread_suggested_stack_size > 0) {
+# if HAVE_DECL_GETRLIMIT && HAVE_DECL_SETRLIMIT && HAVE_DECL_RLIMIT_STACK
+ struct rlimit rl;
+ int bytes = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024;
+ if (getrlimit(RLIMIT_STACK, &rl) != 0 ||
+ (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) {
+ erts_fprintf(stderr, "failed to set stack size for scheduler "
+ "thread to %d bytes\n", bytes);
+ erts_usage();
+ }
+# else
+ erts_fprintf(stderr, "no OS support for dynamic stack size limit\n");
+ erts_usage();
+# endif
+ }
+}
+#endif
+
+void
+erl_start(int argc, char **argv)
+{
+ int i = 1;
+ char* arg=NULL;
+ char* Parg = NULL;
+ int have_break_handler = 1;
+ char envbuf[21]; /* enough for any 64-bit integer */
+ size_t envbufsz;
+ int async_max_threads = erts_async_max_threads;
+
+ early_init(&argc, argv);
+
+ envbufsz = sizeof(envbuf);
+ if (erts_sys_getenv(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
+ user_requested_db_max_tabs = atoi(envbuf);
+ else
+ user_requested_db_max_tabs = 0;
+
+ envbufsz = sizeof(envbuf);
+ if (erts_sys_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
+ Uint16 max_gen_gcs = atoi(envbuf);
+ erts_smp_atomic_set(&erts_max_gen_gcs, (long) max_gen_gcs);
+ }
+
+ envbufsz = sizeof(envbuf);
+ if (erts_sys_getenv("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) {
+ async_max_threads = atoi(envbuf);
+ }
+
+
+#ifdef DEBUG
+ verbose = DEBUG_DEFAULT;
+#endif
+
+ erts_error_logger_warnings = am_error;
+
+ while (i < argc) {
+ if (argv[i][0] != '-') {
+ erts_usage();
+ }
+ if (strcmp(argv[i], "--") == 0) { /* end of emulator options */
+ i++;
+ break;
+ }
+ switch (argv[i][1]) {
+
+ /*
+ * NOTE: -M flags are handled (and removed from argv) by
+ * erts_alloc_init().
+ *
+ * The -d, -m, -S, -t, and -T flags was removed in
+ * Erlang 5.3/OTP R9C.
+ *
+ * -S, and -T has been reused in Erlang 5.5/OTP R11B.
+ *
+ * -d has been reused in a patch R12B-4.
+ */
+
+ case '#' :
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ if ((display_items = atoi(arg)) == 0) {
+ erts_fprintf(stderr, "bad display items%s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using display items %d\n",display_items));
+ break;
+
+ case 'l':
+ display_loads++;
+ break;
+
+ case 'v':
+#ifdef DEBUG
+ if (argv[i][2] == '\0') {
+ verbose |= DEBUG_SYSTEM;
+ } else {
+ char *ch;
+ for (ch = argv[i]+2; *ch != '\0'; ch++) {
+ switch (*ch) {
+ case 's': verbose |= DEBUG_SYSTEM; break;
+ case 'g': verbose |= DEBUG_PRIVATE_GC; break;
+ case 'h': verbose |= DEBUG_HYBRID_GC; break;
+ case 'M': verbose |= DEBUG_MEMORY; break;
+ case 'a': verbose |= DEBUG_ALLOCATION; break;
+ case 't': verbose |= DEBUG_THREADS; break;
+ case 'p': verbose |= DEBUG_PROCESSES; break;
+ case 'm': verbose |= DEBUG_MESSAGES; break;
+ default : erts_fprintf(stderr,"Unknown verbose option: %c\n",*ch);
+ }
+ }
+ }
+ erts_printf("Verbose level: ");
+ if (verbose & DEBUG_SYSTEM) erts_printf("SYSTEM ");
+ if (verbose & DEBUG_PRIVATE_GC) erts_printf("PRIVATE_GC ");
+ if (verbose & DEBUG_HYBRID_GC) erts_printf("HYBRID_GC ");
+ if (verbose & DEBUG_MEMORY) erts_printf("PARANOID_MEMORY ");
+ if (verbose & DEBUG_ALLOCATION) erts_printf("ALLOCATION ");
+ if (verbose & DEBUG_THREADS) erts_printf("THREADS ");
+ if (verbose & DEBUG_PROCESSES) erts_printf("PROCESSES ");
+ if (verbose & DEBUG_MESSAGES) erts_printf("MESSAGES ");
+ erts_printf("\n");
+#else
+ erts_fprintf(stderr, "warning: -v (only in debug compiled code)\n");
+#endif
+ break;
+ case 'V' :
+ {
+ char tmp[256];
+
+ tmp[0] = tmp[1] = '\0';
+#ifdef DEBUG
+ strcat(tmp, ",DEBUG");
+#endif
+#ifdef ERTS_SMP
+ strcat(tmp, ",SMP");
+#endif
+#ifdef USE_THREADS
+ strcat(tmp, ",ASYNC_THREADS");
+#endif
+#ifdef HIPE
+ strcat(tmp, ",HIPE");
+#endif
+#ifdef INCREMENTAL
+ strcat(tmp, ",INCREMENTAL_GC");
+#endif
+#ifdef HYBRID
+ strcat(tmp, ",HYBRID");
+#endif
+ erts_fprintf(stderr, "Erlang ");
+ if (tmp[1]) {
+ erts_fprintf(stderr, "(%s) ", tmp+1);
+ }
+ erts_fprintf(stderr, "(" EMULATOR ") emulator version "
+ ERLANG_VERSION "\n");
+ erl_exit(0, "");
+ }
+ break;
+
+ case 'H': /* undocumented */
+ fprintf(stderr, "The undocumented +H option has been removed (R10B-6).\n\n");
+ break;
+
+ case 'h':
+ /* set default heap size */
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ if ((H_MIN_SIZE = atoi(arg)) <= 0) {
+ erts_fprintf(stderr, "bad heap size %s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using minimum heap size %d\n",H_MIN_SIZE));
+ break;
+
+ case 'd':
+ /*
+ * Never produce crash dumps for internally detected
+ * errors; only produce a core dump. (Generation of
+ * crash dumps is destructive and makes it impossible
+ * to inspect the contents of process heaps in the
+ * core dump.)
+ */
+ erts_no_crash_dump = 1;
+ break;
+
+ case 'e':
+ /* set maximum number of ets tables */
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ if (( user_requested_db_max_tabs = atoi(arg) ) < 0) {
+ erts_fprintf(stderr, "bad maximum number of ets tables %s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("using maximum number of ets tables %d\n",
+ user_requested_db_max_tabs));
+ break;
+
+ case 'i':
+ /* define name of module for initial function */
+ init = get_arg(argv[i]+2, argv[i+1], &i);
+ break;
+
+ case 'b':
+ /* define name of initial function */
+ boot = get_arg(argv[i]+2, argv[i+1], &i);
+ break;
+
+ case 'B':
+ if (argv[i][2] == 'i') /* +Bi */
+ ignore_break = 1;
+ else if (argv[i][2] == 'c') /* +Bc */
+ replace_intr = 1;
+ else if (argv[i][2] == 'd') /* +Bd */
+ have_break_handler = 0;
+ else if (argv[i+1][0] == 'i') { /* +B i */
+ get_arg(argv[i]+2, argv[i+1], &i);
+ ignore_break = 1;
+ }
+ else if (argv[i+1][0] == 'c') { /* +B c */
+ get_arg(argv[i]+2, argv[i+1], &i);
+ replace_intr = 1;
+ }
+ else if (argv[i+1][0] == 'd') { /* +B d */
+ get_arg(argv[i]+2, argv[i+1], &i);
+ have_break_handler = 0;
+ }
+ else /* +B */
+ have_break_handler = 0;
+ break;
+
+ case 'K':
+ /* If kernel poll support is present,
+ erl_sys_args() will remove the K parameter
+ and value */
+ get_arg(argv[i]+2, argv[i+1], &i);
+ erts_fprintf(stderr,
+ "kernel-poll not supported; \"K\" parameter ignored\n",
+ arg);
+ break;
+
+ case 'P':
+ /* set maximum number of processes */
+ Parg = get_arg(argv[i]+2, argv[i+1], &i);
+ erts_max_processes = atoi(Parg);
+ /* Check of result is delayed until later. This is because +R
+ may be given after +P. */
+ break;
+
+ case 'S' : /* Was handled in early_init() just read past it */
+ (void) get_arg(argv[i]+2, argv[i+1], &i);
+ break;
+
+ case 's' : {
+ char *estr;
+ int res;
+ char *sub_param = argv[i]+2;
+ if (has_prefix("bt", sub_param)) {
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ res = erts_init_scheduler_bind_type(arg);
+ if (res != ERTS_INIT_SCHED_BIND_TYPE_SUCCESS) {
+ switch (res) {
+ case ERTS_INIT_SCHED_BIND_TYPE_NOT_SUPPORTED:
+ estr = "not supported";
+ break;
+ case ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY:
+ estr = "no cpu topology available";
+ break;
+ case ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_BAD_TYPE:
+ estr = "invalid type";
+ break;
+ default:
+ estr = "undefined error";
+ break;
+ }
+ erts_fprintf(stderr,
+ "setting scheduler bind type '%s' failed: %s\n",
+ arg,
+ estr);
+ erts_usage();
+ }
+ }
+ else if (has_prefix("ct", sub_param)) {
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ res = erts_init_cpu_topology(arg);
+ if (res != ERTS_INIT_CPU_TOPOLOGY_OK) {
+ switch (res) {
+ case ERTS_INIT_CPU_TOPOLOGY_INVALID_ID:
+ estr = "invalid identifier";
+ break;
+ case ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE:
+ estr = "invalid identifier range";
+ break;
+ case ERTS_INIT_CPU_TOPOLOGY_INVALID_HIERARCHY:
+ estr = "invalid hierarchy";
+ break;
+ case ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_TYPE:
+ estr = "invalid identifier type";
+ break;
+ case ERTS_INIT_CPU_TOPOLOGY_INVALID_NODES:
+ estr = "invalid nodes declaration";
+ break;
+ case ERTS_INIT_CPU_TOPOLOGY_MISSING_LID:
+ estr = "missing logical identifier";
+ break;
+ case ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_LIDS:
+ estr = "not unique logical identifiers";
+ break;
+ case ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_ENTITIES:
+ estr = "not unique entities";
+ break;
+ case ERTS_INIT_CPU_TOPOLOGY_MISSING:
+ estr = "missing cpu topology";
+ break;
+ default:
+ estr = "undefined error";
+ break;
+ }
+ erts_fprintf(stderr,
+ "bad cpu topology '%s': %s\n",
+ arg,
+ estr);
+ erts_usage();
+ }
+ }
+ else if (sys_strcmp("mrq", sub_param) == 0)
+ use_multi_run_queue = 1;
+ else if (sys_strcmp("srq", sub_param) == 0)
+ use_multi_run_queue = 0;
+ else if (sys_strcmp("nsp", sub_param) == 0)
+ erts_use_sender_punish = 0;
+ else if (has_prefix("ss", sub_param)) {
+ /* suggested stack size (Kilo Words) for scheduler threads */
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ erts_sched_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_sched_thread_suggested_stack_size
+ < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ || (erts_sched_thread_suggested_stack_size >
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for scheduler threads %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested scheduler thread stack size %d kilo words\n",
+ erts_sched_thread_suggested_stack_size));
+ }
+ else {
+ erts_fprintf(stderr, "bad scheduling option %s\n", argv[i]);
+ erts_usage();
+ }
+ break;
+ }
+ case 'T' :
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ errno = 0;
+ erts_modified_timing_level = atoi(arg);
+ if ((erts_modified_timing_level == 0 && errno != 0)
+ || erts_modified_timing_level < 0
+ || erts_modified_timing_level >= ERTS_MODIFIED_TIMING_LEVELS) {
+ erts_fprintf(stderr, "bad modified timing level %s\n", arg);
+ erts_usage();
+ }
+ else {
+ VERBOSE(DEBUG_SYSTEM,
+ ("using modified timing level %d\n",
+ erts_modified_timing_level));
+ }
+
+ break;
+
+ case 'R': {
+ /* set compatibility release */
+
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ erts_compat_rel = atoi(arg);
+
+ if (erts_compat_rel < ERTS_MIN_COMPAT_REL
+ || erts_compat_rel > this_rel_num()) {
+ erts_fprintf(stderr, "bad compatibility release number %s\n", arg);
+ erts_usage();
+ }
+
+ ASSERT(ERTS_MIN_COMPAT_REL >= 7);
+ switch (erts_compat_rel) {
+ case 7:
+ case 8:
+ case 9:
+ erts_use_r9_pids_ports = 1;
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ case 'A':
+ /* set number of threads in thread pool */
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ if (((async_max_threads = atoi(arg)) < 0) ||
+ (async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS)) {
+ erts_fprintf(stderr, "bad number of async threads %s\n", arg);
+ erts_usage();
+ }
+
+ VERBOSE(DEBUG_SYSTEM, ("using %d async-threads\n",
+ async_max_threads));
+ break;
+
+ case 'a':
+ /* suggested stack size (Kilo Words) for threads in thread pool */
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ erts_async_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_async_thread_suggested_stack_size
+ < ERTS_ASYNC_THREAD_MIN_STACK_SIZE)
+ || (erts_async_thread_suggested_stack_size >
+ ERTS_ASYNC_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for async threads %s\n",
+ arg);
+ erts_usage();
+ }
+
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested async-thread stack size %d kilo words\n",
+ erts_async_thread_suggested_stack_size));
+ break;
+
+ case 'r':
+ erts_ets_realloc_always_moves = 1;
+ break;
+ case 'n': /* XXX obsolete */
+ break;
+ case 'c':
+ if (argv[i][2] == 0) { /* -c: documented option */
+ erts_disable_tolerant_timeofday = 1;
+ }
+#ifdef ERTS_OPCODE_COUNTER_SUPPORT
+ else if (argv[i][2] == 'i') { /* -ci: undcoumented option*/
+ count_instructions = 1;
+ }
+#endif
+ break;
+ case 'W':
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ switch (arg[0]) {
+ case 'i':
+ erts_error_logger_warnings = am_info;
+ break;
+ case 'w':
+ erts_error_logger_warnings = am_warning;
+ break;
+ case 'e': /* The default */
+ erts_error_logger_warnings = am_error;
+ default:
+ erts_fprintf(stderr, "unrecognized warning_map option %s\n", arg);
+ erts_usage();
+ }
+ break;
+
+ default:
+ erts_fprintf(stderr, "%s unknown flag %s\n", argv[0], argv[i]);
+ erts_usage();
+ }
+ i++;
+ }
+
+#ifdef USE_THREADS
+ erts_async_max_threads = async_max_threads;
+#endif
+
+ /* Delayed check of +P flag */
+ if (erts_max_processes < ERTS_MIN_PROCESSES
+ || erts_max_processes > ERTS_MAX_PROCESSES
+ || (erts_use_r9_pids_ports
+ && erts_max_processes > ERTS_MAX_R9_PROCESSES)) {
+ erts_fprintf(stderr, "bad number of processes %s\n", Parg);
+ erts_usage();
+ }
+
+ /* Restart will not reinstall the break handler */
+#ifdef __WIN32__
+ if (ignore_break)
+ erts_set_ignore_break();
+ else if (replace_intr)
+ erts_replace_intr();
+ else
+ init_break_handler();
+#else
+ if (ignore_break)
+ erts_set_ignore_break();
+ else if (have_break_handler)
+ init_break_handler();
+ if (replace_intr)
+ erts_replace_intr();
+#endif
+
+ boot_argc = argc - i; /* Number of arguments to init */
+ boot_argv = &argv[i];
+
+ erl_init();
+
+ init_shared_memory(boot_argc, boot_argv);
+ load_preloaded();
+
+ erts_initialized = 1;
+
+ erl_first_process_otp("otp_ring0", NULL, 0, boot_argc, boot_argv);
+
+#ifdef ERTS_SMP
+ erts_start_schedulers();
+ /* Let system specific code decide what to do with the main thread... */
+
+ erts_sys_main_thread(); /* May or may not return! */
+#else
+ set_main_stack_size();
+ process_main();
+#endif
+}
+
+
+#ifdef USE_THREADS
+
+__decl_noreturn void erts_thr_fatal_error(int err, char *what)
+{
+ char *errstr = err ? strerror(err) : NULL;
+ erts_fprintf(stderr,
+ "Failed to %s: %s%s(%d)\n",
+ what,
+ errstr ? errstr : "",
+ errstr ? " " : "",
+ err);
+ abort();
+}
+
+#endif
+
+static void
+system_cleanup(int exit_code)
+{
+ /* No cleanup wanted if ...
+ * 1. we are about to do an abnormal exit
+ * 2. we haven't finished initializing, or
+ * 3. another thread than the main thread is performing the exit
+ * (in threaded non smp case).
+ */
+
+ if (exit_code != 0
+ || !erts_initialized
+#if defined(USE_THREADS) && !defined(ERTS_SMP)
+ || !erts_equal_tids(main_thread, erts_thr_self())
+#endif
+ )
+ return;
+
+#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0);
+#endif
+ erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC); /* We never release it... */
+#endif
+
+#ifdef HYBRID
+ if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
+ (void *)ma_src_stack);
+ if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
+ (void *)ma_dst_stack);
+ if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK,
+ (void *)ma_offset_stack);
+ ma_src_stack = NULL;
+ ma_dst_stack = NULL;
+ ma_offset_stack = NULL;
+ erts_cleanup_offheap(&erts_global_offheap);
+#endif
+
+#if defined(HYBRID) && !defined(INCREMENTAL)
+ if (global_heap) {
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
+ (void*) global_heap,
+ sizeof(Eterm) * global_heap_sz);
+ }
+ global_heap = NULL;
+#endif
+
+#ifdef INCREMENTAL
+ erts_cleanup_incgc();
+#endif
+
+#if defined(USE_THREADS) && !defined(ERTS_SMP)
+ exit_async();
+#endif
+#if HAVE_ERTS_MSEG
+ erts_mseg_exit();
+#endif
+
+ /*
+ * A lot more cleaning could/should have been done...
+ */
+
+}
+
+/*
+ * Common exit function, all exits from the system go through here.
+ * n <= 0 -> normal exit with status n;
+ * n = 127 -> Erlang crash dump produced, exit with status 1;
+ * other positive n -> Erlang crash dump and core dump produced.
+ */
+
+__decl_noreturn void erl_exit0(char *file, int line, int n, char *fmt,...)
+{
+ unsigned int an;
+ va_list args;
+
+ va_start(args, fmt);
+
+ save_statistics();
+
+ system_cleanup(n);
+
+ an = abs(n);
+
+ if (erts_mtrace_enabled)
+ erts_mtrace_exit((Uint32) an);
+
+ /* Produce an Erlang core dump if error */
+ if (n > 0 && erts_initialized &&
+ (erts_no_crash_dump == 0 || n == ERTS_DUMP_EXIT)) {
+ erl_crash_dump_v(file, line, fmt, args);
+ }
+
+ /* need to reinitialize va_args thing */
+ va_end(args);
+ va_start(args, fmt);
+
+ if (fmt != NULL && *fmt != '\0')
+ erl_error(fmt, args); /* Print error message. */
+ va_end(args);
+#ifdef __WIN32__
+ if(n > 0) ConWaitForExit();
+ else ConNormalExit();
+#endif
+#if !defined(__WIN32__) && !defined(VXWORKS) && !defined(_OSE_)
+ sys_tty_reset();
+#endif
+
+ if (n == ERTS_INTR_EXIT)
+ exit(0);
+ else if (n == 127)
+ ERTS_EXIT_AFTER_DUMP(1);
+ else if (n > 0 || n == ERTS_ABORT_EXIT)
+ abort();
+ exit(an);
+}
+
+__decl_noreturn void erl_exit(int n, char *fmt,...)
+{
+ unsigned int an;
+ va_list args;
+
+ va_start(args, fmt);
+
+ save_statistics();
+
+ system_cleanup(n);
+
+ an = abs(n);
+
+ if (erts_mtrace_enabled)
+ erts_mtrace_exit((Uint32) an);
+
+ /* Produce an Erlang core dump if error */
+ if (n > 0 && erts_initialized &&
+ (erts_no_crash_dump == 0 || n == ERTS_DUMP_EXIT)) {
+ erl_crash_dump_v((char*) NULL, 0, fmt, args);
+ }
+
+ /* need to reinitialize va_args thing */
+ va_end(args);
+ va_start(args, fmt);
+
+ if (fmt != NULL && *fmt != '\0')
+ erl_error(fmt, args); /* Print error message. */
+ va_end(args);
+#ifdef __WIN32__
+ if(n > 0) ConWaitForExit();
+ else ConNormalExit();
+#endif
+#if !defined(__WIN32__) && !defined(VXWORKS) && !defined(_OSE_)
+ sys_tty_reset();
+#endif
+
+ if (n == ERTS_INTR_EXIT)
+ exit(0);
+ else if (n == ERTS_DUMP_EXIT)
+ ERTS_EXIT_AFTER_DUMP(1);
+ else if (n > 0 || n == ERTS_ABORT_EXIT)
+ abort();
+ exit(an);
+}
+
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
new file mode 100644
index 0000000000..3f022f92b8
--- /dev/null
+++ b/erts/emulator/beam/erl_instrument.c
@@ -0,0 +1,1221 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "global.h"
+#include "big.h"
+#include "erl_instrument.h"
+#include "erl_threads.h"
+
+typedef union { long l; double d; } Align_t;
+
+typedef struct {
+ Uint size;
+#ifdef VALGRIND
+ void* valgrind_leak_suppressor;
+#endif
+ Align_t mem[1];
+} StatBlock_t;
+
+#define STAT_BLOCK_HEADER_SIZE (sizeof(StatBlock_t) - sizeof(Align_t))
+
+typedef struct MapStatBlock_t_ MapStatBlock_t;
+struct MapStatBlock_t_ {
+ Uint size;
+ ErtsAlcType_t type_no;
+ Eterm pid;
+ MapStatBlock_t *prev;
+ MapStatBlock_t *next;
+ Align_t mem[1];
+};
+
+#define MAP_STAT_BLOCK_HEADER_SIZE (sizeof(MapStatBlock_t) - sizeof(Align_t))
+
+typedef struct {
+ Uint size;
+ Uint max_size;
+ Uint max_size_ever;
+
+ Uint blocks;
+ Uint max_blocks;
+ Uint max_blocks_ever;
+} Stat_t;
+
+static erts_mtx_t instr_mutex;
+static erts_mtx_t instr_x_mutex;
+
+int erts_instr_memory_map;
+int erts_instr_stat;
+
+static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
+
+struct stats_ {
+ Stat_t tot;
+ Stat_t a[ERTS_ALC_A_MAX+1];
+ Stat_t *ap[ERTS_ALC_A_MAX+1];
+ Stat_t c[ERTS_ALC_C_MAX+1];
+ Stat_t n[ERTS_ALC_N_MAX+1];
+};
+
+static struct stats_ *stats;
+
+static MapStatBlock_t *mem_anchor;
+
+static Eterm *am_tot;
+static Eterm *am_n;
+static Eterm *am_a;
+static Eterm *am_c;
+
+static int atoms_initialized;
+
+static struct {
+ Eterm total;
+ Eterm allocators;
+ Eterm classes;
+ Eterm types;
+ Eterm sizes;
+ Eterm blocks;
+ Eterm instr_hdr;
+#ifdef DEBUG
+ Eterm end_of_atoms;
+#endif
+} am;
+
+static void ERTS_INLINE atom_init(Eterm *atom, const char *name)
+{
+ *atom = am_atom_put((char *) name, strlen(name));
+}
+#define AM_INIT(AM) atom_init(&am.AM, #AM)
+
+static void
+init_atoms(void)
+{
+#ifdef DEBUG
+ Eterm *atom;
+ for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
+ *atom = THE_NON_VALUE;
+ }
+#endif
+
+ AM_INIT(total);
+ AM_INIT(allocators);
+ AM_INIT(classes);
+ AM_INIT(types);
+ AM_INIT(sizes);
+ AM_INIT(blocks);
+ AM_INIT(instr_hdr);
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
+ ASSERT(*atom != THE_NON_VALUE);
+ }
+#endif
+
+ atoms_initialized = 1;
+}
+
+#undef AM_INIT
+
+static void
+init_am_tot(void)
+{
+ am_tot = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
+ sizeof(Eterm));
+ atom_init(am_tot, "total");
+}
+
+
+static void
+init_am_n(void)
+{
+ int i;
+ am_n = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
+ (ERTS_ALC_N_MAX+1)*sizeof(Eterm));
+
+ for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
+ atom_init(&am_n[i], ERTS_ALC_N2TD(i));
+ }
+
+}
+
+static void
+init_am_c(void)
+{
+ int i;
+ am_c = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
+ (ERTS_ALC_C_MAX+1)*sizeof(Eterm));
+
+ for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) {
+ atom_init(&am_c[i], ERTS_ALC_C2CD(i));
+ }
+
+}
+
+static void
+init_am_a(void)
+{
+ int i;
+ am_a = (Eterm *) erts_alloc(ERTS_ALC_T_INSTR_INFO,
+ (ERTS_ALC_A_MAX+1)*sizeof(Eterm));
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ atom_init(&am_a[i], ERTS_ALC_A2AD(i));
+ }
+
+}
+
+static ERTS_INLINE void
+stat_upd_alloc(ErtsAlcType_t n, Uint size)
+{
+ ErtsAlcType_t t = ERTS_ALC_N2T(n);
+ ErtsAlcType_t a = ERTS_ALC_T2A(t);
+ ErtsAlcType_t c = ERTS_ALC_T2C(t);
+
+ stats->ap[a]->size += size;
+ if (stats->ap[a]->max_size < stats->ap[a]->size)
+ stats->ap[a]->max_size = stats->ap[a]->size;
+
+ stats->c[c].size += size;
+ if (stats->c[c].max_size < stats->c[c].size)
+ stats->c[c].max_size = stats->c[c].size;
+
+ stats->n[n].size += size;
+ if (stats->n[n].max_size < stats->n[n].size)
+ stats->n[n].max_size = stats->n[n].size;
+
+ stats->tot.size += size;
+ if (stats->tot.max_size < stats->tot.size)
+ stats->tot.max_size = stats->tot.size;
+
+ stats->ap[a]->blocks++;
+ if (stats->ap[a]->max_blocks < stats->ap[a]->blocks)
+ stats->ap[a]->max_blocks = stats->ap[a]->blocks;
+
+ stats->c[c].blocks++;
+ if (stats->c[c].max_blocks < stats->c[c].blocks)
+ stats->c[c].max_blocks = stats->c[c].blocks;
+
+ stats->n[n].blocks++;
+ if (stats->n[n].max_blocks < stats->n[n].blocks)
+ stats->n[n].max_blocks = stats->n[n].blocks;
+
+ stats->tot.blocks++;
+ if (stats->tot.max_blocks < stats->tot.blocks)
+ stats->tot.max_blocks = stats->tot.blocks;
+
+}
+
+
+static ERTS_INLINE void
+stat_upd_free(ErtsAlcType_t n, Uint size)
+{
+ ErtsAlcType_t t = ERTS_ALC_N2T(n);
+ ErtsAlcType_t a = ERTS_ALC_T2A(t);
+ ErtsAlcType_t c = ERTS_ALC_T2C(t);
+
+ ASSERT(stats->ap[a]->size >= size);
+ stats->ap[a]->size -= size;
+
+ ASSERT(stats->c[c].size >= size);
+ stats->c[c].size -= size;
+
+ ASSERT(stats->n[n].size >= size);
+ stats->n[n].size -= size;
+
+ ASSERT(stats->tot.size >= size);
+ stats->tot.size -= size;
+
+ ASSERT(stats->ap[a]->blocks > 0);
+ stats->ap[a]->blocks--;
+
+ ASSERT(stats->c[c].blocks > 0);
+ stats->c[c].blocks--;
+
+ ASSERT(stats->n[n].blocks > 0);
+ stats->n[n].blocks--;
+
+ ASSERT(stats->tot.blocks > 0);
+ stats->tot.blocks--;
+
+}
+
+
+static ERTS_INLINE void
+stat_upd_realloc(ErtsAlcType_t n, Uint size, Uint old_size)
+{
+ if (old_size)
+ stat_upd_free(n, old_size);
+ stat_upd_alloc(n, size);
+}
+
+/*
+ * stat instrumentation callback functions
+ */
+
+static void *
+stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ Uint ssize;
+ void *res;
+
+ erts_mtx_lock(&instr_mutex);
+
+ ssize = size + STAT_BLOCK_HEADER_SIZE;
+ res = (*real_af->alloc)(n, real_af->extra, ssize);
+ if (res) {
+ stat_upd_alloc(n, size);
+ ((StatBlock_t *) res)->size = size;
+#ifdef VALGRIND
+ /* Suppress "possibly leaks" by storing an actual dummy pointer
+ to the _start_ of the allocated block.*/
+ ((StatBlock_t *) res)->valgrind_leak_suppressor = res;
+#endif
+ res = (void *) ((StatBlock_t *) res)->mem;
+ }
+
+ erts_mtx_unlock(&instr_mutex);
+
+ return res;
+}
+
+static void *
+stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ Uint old_size;
+ Uint ssize;
+ void *sptr;
+ void *res;
+
+ erts_mtx_lock(&instr_mutex);
+
+ if (ptr) {
+ sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
+ old_size = ((StatBlock_t *) sptr)->size;
+ }
+ else {
+ sptr = NULL;
+ old_size = 0;
+ }
+
+ ssize = size + STAT_BLOCK_HEADER_SIZE;
+ res = (*real_af->realloc)(n, real_af->extra, sptr, ssize);
+ if (res) {
+ stat_upd_realloc(n, size, old_size);
+ ((StatBlock_t *) res)->size = size;
+#ifdef VALGRIND
+ ((StatBlock_t *) res)->valgrind_leak_suppressor = res;
+#endif
+ res = (void *) ((StatBlock_t *) res)->mem;
+ }
+
+ erts_mtx_unlock(&instr_mutex);
+
+ return res;
+}
+
+static void
+stat_free(ErtsAlcType_t n, void *extra, void *ptr)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ void *sptr;
+
+ erts_mtx_lock(&instr_mutex);
+
+ if (ptr) {
+ sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
+ stat_upd_free(n, ((StatBlock_t *) sptr)->size);
+ }
+ else {
+ sptr = NULL;
+ }
+
+ (*real_af->free)(n, real_af->extra, sptr);
+
+ erts_mtx_unlock(&instr_mutex);
+
+}
+
+/*
+ * map stat instrumentation callback functions
+ */
+
+static void *
+map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ Uint msize;
+ void *res;
+
+ erts_mtx_lock(&instr_mutex);
+
+ msize = size + MAP_STAT_BLOCK_HEADER_SIZE;
+ res = (*real_af->alloc)(n, real_af->extra, msize);
+ if (res) {
+ MapStatBlock_t *mb = (MapStatBlock_t *) res;
+ stat_upd_alloc(n, size);
+
+ mb->size = size;
+ mb->type_no = n;
+ mb->pid = erts_get_current_pid();
+
+ mb->prev = NULL;
+ mb->next = mem_anchor;
+ if (mem_anchor)
+ mem_anchor->prev = mb;
+ mem_anchor = mb;
+
+ res = (void *) mb->mem;
+ }
+
+ erts_mtx_unlock(&instr_mutex);
+
+ return res;
+}
+
+static void *
+map_stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ Uint old_size;
+ Uint msize;
+ void *mptr;
+ void *res;
+
+ erts_mtx_lock(&instr_x_mutex);
+ erts_mtx_lock(&instr_mutex);
+
+ if (ptr) {
+ mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE);
+ old_size = ((MapStatBlock_t *) mptr)->size;
+ }
+ else {
+ mptr = NULL;
+ old_size = 0;
+ }
+
+ msize = size + MAP_STAT_BLOCK_HEADER_SIZE;
+ res = (*real_af->realloc)(n, real_af->extra, mptr, msize);
+ if (res) {
+ MapStatBlock_t *mb = (MapStatBlock_t *) res;
+
+ mb->size = size;
+ mb->type_no = n;
+ mb->pid = erts_get_current_pid();
+
+ stat_upd_realloc(n, size, old_size);
+
+ if (mptr != res) {
+
+ if (mptr) {
+ if (mb->prev)
+ mb->prev->next = mb;
+ else {
+ ASSERT(mem_anchor == (MapStatBlock_t *) mptr);
+ mem_anchor = mb;
+ }
+ if (mb->next)
+ mb->next->prev = mb;
+ }
+ else {
+ mb->prev = NULL;
+ mb->next = mem_anchor;
+ if (mem_anchor)
+ mem_anchor->prev = mb;
+ mem_anchor = mb;
+ }
+
+ }
+
+ res = (void *) mb->mem;
+ }
+
+ erts_mtx_unlock(&instr_mutex);
+ erts_mtx_unlock(&instr_x_mutex);
+
+ return res;
+}
+
+static void
+map_stat_free(ErtsAlcType_t n, void *extra, void *ptr)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ void *mptr;
+
+ erts_mtx_lock(&instr_x_mutex);
+ erts_mtx_lock(&instr_mutex);
+
+ if (ptr) {
+ MapStatBlock_t *mb;
+
+ mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE);
+ mb = (MapStatBlock_t *) mptr;
+
+ stat_upd_free(n, mb->size);
+
+ if (mb->prev)
+ mb->prev->next = mb->next;
+ else
+ mem_anchor = mb->next;
+ if (mb->next)
+ mb->next->prev = mb->prev;
+ }
+ else {
+ mptr = NULL;
+ }
+
+ (*real_af->free)(n, real_af->extra, mptr);
+
+ erts_mtx_unlock(&instr_mutex);
+ erts_mtx_unlock(&instr_x_mutex);
+
+}
+
+static void dump_memory_map_to_stream(FILE *fp)
+{
+ ErtsAlcType_t n;
+ MapStatBlock_t *bp;
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ erts_mtx_lock(&instr_mutex);
+
+ /* Write header */
+
+ fprintf(fp,
+ "{instr_hdr,\n"
+ " %lu,\n"
+ " %lu,\n"
+ " {",
+ (unsigned long) ERTS_INSTR_VSN,
+ (unsigned long) MAP_STAT_BLOCK_HEADER_SIZE);
+
+#if ERTS_ALC_N_MIN != 1
+#error ERTS_ALC_N_MIN is not 1
+#endif
+
+ for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
+ ErtsAlcType_t t = ERTS_ALC_N2T(n);
+ ErtsAlcType_t a = ERTS_ALC_T2A(t);
+ ErtsAlcType_t c = ERTS_ALC_T2C(t);
+ const char *astr;
+
+ if (erts_allctrs_info[a].enabled)
+ astr = ERTS_ALC_A2AD(a);
+ else
+ astr = ERTS_ALC_A2AD(ERTS_ALC_A_SYSTEM);
+
+ fprintf(fp,
+ "%s{%s,%s,%s}%s",
+ (n == ERTS_ALC_N_MIN) ? "" : " ",
+ ERTS_ALC_N2TD(n),
+ astr,
+ ERTS_ALC_C2CD(c),
+ (n == ERTS_ALC_N_MAX) ? "" : ",\n");
+ }
+
+ fprintf(fp, "}}.\n");
+
+ /* Write memory data */
+ for (bp = mem_anchor; bp; bp = bp->next) {
+ if (is_internal_pid(bp->pid))
+ fprintf(fp,
+ "{%lu, %lu, %lu, {%lu,%lu,%lu}}.\n",
+ (Uint) bp->type_no,
+ (Uint) bp->mem,
+ (Uint) bp->size,
+ (Uint) pid_channel_no(bp->pid),
+ (Uint) pid_number(bp->pid),
+ (Uint) pid_serial(bp->pid));
+ else
+ fprintf(fp,
+ "{%lu, %lu, %lu, undefined}.\n",
+ (Uint) bp->type_no,
+ (Uint) bp->mem,
+ (Uint) bp->size);
+ }
+
+ if (lock)
+ erts_mtx_unlock(&instr_mutex);
+}
+
+int erts_instr_dump_memory_map_to_fd(int fd)
+{
+ char buf[BUFSIZ];
+ FILE *f;
+
+ if (!erts_instr_memory_map)
+ return 0;
+
+ f = fdopen(fd, "w");
+ if (f == NULL)
+ return 0;
+
+ /* Avoid allocating memory; we may have run out of it at this point. */
+ setbuf(f, buf);
+
+ dump_memory_map_to_stream(f);
+ fflush(f);
+ return 1;
+}
+
+int erts_instr_dump_memory_map(const char *name)
+{
+ FILE *f;
+
+ if (!erts_instr_memory_map)
+ return 0;
+
+ f = fopen(name, "w");
+ if (f == NULL)
+ return 0;
+
+ dump_memory_map_to_stream(f);
+
+ fclose(f);
+ return 1;
+}
+
+Eterm erts_instr_get_memory_map(Process *proc)
+{
+ MapStatBlock_t *org_mem_anchor;
+ Eterm hdr_tuple, md_list, res;
+ Eterm *hp;
+ Uint hsz;
+ MapStatBlock_t *bp;
+#ifdef DEBUG
+ Eterm *end_hp;
+#endif
+
+ if (!erts_instr_memory_map)
+ return am_false;
+
+ if (!atoms_initialized)
+ init_atoms();
+ if (!am_n)
+ init_am_n();
+ if (!am_c)
+ init_am_c();
+ if (!am_a)
+ init_am_a();
+
+ erts_mtx_lock(&instr_x_mutex);
+ erts_mtx_lock(&instr_mutex);
+
+ /* Header size */
+ hsz = 5 + 1 + (ERTS_ALC_N_MAX+1-ERTS_ALC_N_MIN)*(1 + 4);
+
+ /* Memory data list */
+ for (bp = mem_anchor; bp; bp = bp->next) {
+ if (is_internal_pid(bp->pid)) {
+#if (_PID_NUM_SIZE - 1 > MAX_SMALL)
+ if (internal_pid_number(bp->pid) > MAX_SMALL)
+ hsz += BIG_UINT_HEAP_SIZE;
+#endif
+#if (_PID_SER_SIZE - 1 > MAX_SMALL)
+ if (internal_pid_serial(bp->pid) > MAX_SMALL)
+ hsz += BIG_UINT_HEAP_SIZE;
+#endif
+ hsz += 4;
+ }
+
+ if ((Uint) bp->mem > MAX_SMALL)
+ hsz += BIG_UINT_HEAP_SIZE;
+ if (bp->size > MAX_SMALL)
+ hsz += BIG_UINT_HEAP_SIZE;
+
+ hsz += 5 + 2;
+ }
+
+ hsz += 3; /* Root tuple */
+
+ org_mem_anchor = mem_anchor;
+ mem_anchor = NULL;
+
+ erts_mtx_unlock(&instr_mutex);
+
+ hp = HAlloc(proc, hsz); /* May end up calling map_stat_alloc() */
+
+ erts_mtx_lock(&instr_mutex);
+
+#ifdef DEBUG
+ end_hp = hp + hsz;
+#endif
+
+ { /* Build header */
+ ErtsAlcType_t n;
+ Eterm type_map;
+ Uint *hp2 = hp;
+#ifdef DEBUG
+ Uint *hp2_end;
+#endif
+
+ hp += (ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN)*4;
+
+#ifdef DEBUG
+ hp2_end = hp;
+#endif
+
+ type_map = make_tuple(hp);
+ *(hp++) = make_arityval(ERTS_ALC_N_MAX + 1 - ERTS_ALC_N_MIN);
+
+ for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
+ ErtsAlcType_t t = ERTS_ALC_N2T(n);
+ ErtsAlcType_t a = ERTS_ALC_T2A(t);
+ ErtsAlcType_t c = ERTS_ALC_T2C(t);
+
+ if (!erts_allctrs_info[a].enabled)
+ a = ERTS_ALC_A_SYSTEM;
+
+ *(hp++) = TUPLE3(hp2, am_n[n], am_a[a], am_c[c]);
+ hp2 += 4;
+ }
+
+ ASSERT(hp2 == hp2_end);
+
+ hdr_tuple = TUPLE4(hp,
+ am.instr_hdr,
+ make_small(ERTS_INSTR_VSN),
+ make_small(MAP_STAT_BLOCK_HEADER_SIZE),
+ type_map);
+
+ hp += 5;
+ }
+
+ /* Build memory data list */
+
+ for (md_list = NIL, bp = org_mem_anchor; bp; bp = bp->next) {
+ Eterm tuple;
+ Eterm type;
+ Eterm ptr;
+ Eterm size;
+ Eterm pid;
+
+ if (is_not_internal_pid(bp->pid))
+ pid = am_undefined;
+ else {
+ Eterm c;
+ Eterm n;
+ Eterm s;
+
+#if (ERST_INTERNAL_CHANNEL_NO > MAX_SMALL)
+#error Oversized internal channel number
+#endif
+ c = make_small(ERST_INTERNAL_CHANNEL_NO);
+
+#if (_PID_NUM_SIZE - 1 > MAX_SMALL)
+ if (internal_pid_number(bp->pid) > MAX_SMALL) {
+ n = uint_to_big(internal_pid_number(bp->pid), hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ else
+#endif
+ n = make_small(internal_pid_number(bp->pid));
+
+#if (_PID_SER_SIZE - 1 > MAX_SMALL)
+ if (internal_pid_serial(bp->pid) > MAX_SMALL) {
+ s = uint_to_big(internal_pid_serial(bp->pid), hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ else
+#endif
+ s = make_small(internal_pid_serial(bp->pid));
+ pid = TUPLE3(hp, c, n, s);
+ hp += 4;
+ }
+
+
+#if ERTS_ALC_N_MAX > MAX_SMALL
+#error Oversized memory type number
+#endif
+ type = make_small(bp->type_no);
+
+ if ((Uint) bp->mem > MAX_SMALL) {
+ ptr = uint_to_big((Uint) bp->mem, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ else
+ ptr = make_small((Uint) bp->mem);
+
+ if (bp->size > MAX_SMALL) {
+ size = uint_to_big(bp->size, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ else
+ size = make_small(bp->size);
+
+ tuple = TUPLE4(hp, type, ptr, size, pid);
+ hp += 5;
+
+ md_list = CONS(hp, tuple, md_list);
+ hp += 2;
+ }
+
+ res = TUPLE2(hp, hdr_tuple, md_list);
+
+ ASSERT(hp + 3 == end_hp);
+
+ if (mem_anchor) {
+ for (bp = mem_anchor; bp->next; bp = bp->next);
+
+ ASSERT(org_mem_anchor);
+ org_mem_anchor->prev = bp;
+ bp->next = org_mem_anchor;
+ }
+ else {
+ mem_anchor = org_mem_anchor;
+ }
+
+ erts_mtx_unlock(&instr_mutex);
+ erts_mtx_unlock(&instr_x_mutex);
+
+ return res;
+}
+
+static ERTS_INLINE void
+begin_new_max_period(Stat_t *stat, int min, int max)
+{
+ int i;
+ for (i = min; i <= max; i++) {
+ stat[i].max_size = stat[i].size;
+ stat[i].max_blocks = stat[i].blocks;
+ }
+}
+
+static ERTS_INLINE void
+update_max_ever_values(Stat_t *stat, int min, int max)
+{
+ int i;
+ for (i = min; i <= max; i++) {
+ if (stat[i].max_size_ever < stat[i].max_size)
+ stat[i].max_size_ever = stat[i].max_size;
+ if (stat[i].max_blocks_ever < stat[i].max_blocks)
+ stat[i].max_blocks_ever = stat[i].max_blocks;
+ }
+}
+
+#define bld_string erts_bld_string
+#define bld_tuple erts_bld_tuple
+#define bld_tuplev erts_bld_tuplev
+#define bld_list erts_bld_list
+#define bld_2tup_list erts_bld_2tup_list
+#define bld_uint erts_bld_uint
+
+Eterm
+erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period)
+{
+ int i, len, max, min, allctr;
+ Eterm *names, *values, res;
+ Uint arr_size, stat_size, hsz, *hszp, *hp, **hpp;
+ Stat_t *stat_src, *stat;
+
+ if (!erts_instr_stat)
+ return am_false;
+
+ if (!atoms_initialized)
+ init_atoms();
+
+ if (what == am.total) {
+ min = 0;
+ max = 0;
+ allctr = 0;
+ stat_size = sizeof(Stat_t);
+ stat_src = &stats->tot;
+ if (!am_tot)
+ init_am_tot();
+ names = am_tot;
+ }
+ else if (what == am.allocators) {
+ min = ERTS_ALC_A_MIN;
+ max = ERTS_ALC_A_MAX;
+ allctr = 1;
+ stat_size = sizeof(Stat_t)*(ERTS_ALC_A_MAX+1);
+ stat_src = stats->a;
+ if (!am_a)
+ init_am_a();
+ names = am_a;
+ }
+ else if (what == am.classes) {
+ min = ERTS_ALC_C_MIN;
+ max = ERTS_ALC_C_MAX;
+ allctr = 0;
+ stat_size = sizeof(Stat_t)*(ERTS_ALC_C_MAX+1);
+ stat_src = stats->c;
+ if (!am_c)
+ init_am_c();
+ names = &am_c[ERTS_ALC_C_MIN];
+ }
+ else if (what == am.types) {
+ min = ERTS_ALC_N_MIN;
+ max = ERTS_ALC_N_MAX;
+ allctr = 0;
+ stat_size = sizeof(Stat_t)*(ERTS_ALC_N_MAX+1);
+ stat_src = stats->n;
+ if (!am_n)
+ init_am_n();
+ names = &am_n[ERTS_ALC_N_MIN];
+ }
+ else {
+ return THE_NON_VALUE;
+ }
+
+ stat = (Stat_t *) erts_alloc(ERTS_ALC_T_TMP, stat_size);
+
+ arr_size = (max - min + 1)*sizeof(Eterm);
+
+ if (allctr)
+ names = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size);
+
+ values = (Eterm *) erts_alloc(ERTS_ALC_T_TMP, arr_size);
+
+ erts_mtx_lock(&instr_mutex);
+
+ update_max_ever_values(stat_src, min, max);
+
+ sys_memcpy((void *) stat, (void *) stat_src, stat_size);
+
+ if (begin_max_period)
+ begin_new_max_period(stat_src, min, max);
+
+ erts_mtx_unlock(&instr_mutex);
+
+ hsz = 0;
+ hszp = &hsz;
+ hpp = NULL;
+
+ restart_bld:
+
+ len = 0;
+ for (i = min; i <= max; i++) {
+ if (!allctr || erts_allctrs_info[i].enabled) {
+ Eterm s[2];
+
+ if (allctr)
+ names[len] = am_a[i];
+
+ s[0] = bld_tuple(hpp, hszp, 4,
+ am.sizes,
+ bld_uint(hpp, hszp, stat[i].size),
+ bld_uint(hpp, hszp, stat[i].max_size),
+ bld_uint(hpp, hszp, stat[i].max_size_ever));
+
+ s[1] = bld_tuple(hpp, hszp, 4,
+ am.blocks,
+ bld_uint(hpp, hszp, stat[i].blocks),
+ bld_uint(hpp, hszp, stat[i].max_blocks),
+ bld_uint(hpp, hszp, stat[i].max_blocks_ever));
+
+ values[len] = bld_list(hpp, hszp, 2, s);
+
+ len++;
+ }
+ }
+
+ res = bld_2tup_list(hpp, hszp, len, names, values);
+
+ if (!hpp) {
+ hp = HAlloc(proc, hsz);
+ hszp = NULL;
+ hpp = &hp;
+ goto restart_bld;
+ }
+
+ erts_free(ERTS_ALC_T_TMP, (void *) stat);
+ erts_free(ERTS_ALC_T_TMP, (void *) values);
+ if (allctr)
+ erts_free(ERTS_ALC_T_TMP, (void *) names);
+
+ return res;
+}
+
+static void
+dump_stat_to_stream(FILE *fp, int begin_max_period)
+{
+ ErtsAlcType_t i, a_max, a_min;
+
+ erts_mtx_lock(&instr_mutex);
+
+ fprintf(fp,
+ "{instr_vsn,%lu}.\n",
+ (unsigned long) ERTS_INSTR_VSN);
+
+ update_max_ever_values(&stats->tot, 0, 0);
+
+ fprintf(fp,
+ "{total,[{total,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}]}.\n",
+ stats->tot.size,
+ stats->tot.max_size,
+ stats->tot.max_size_ever,
+ stats->tot.blocks,
+ stats->tot.max_blocks,
+ stats->tot.max_blocks_ever);
+
+ a_max = 0;
+ a_min = ~0;
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ if (erts_allctrs_info[i].enabled) {
+ if (a_min > i)
+ a_min = i;
+ if (a_max < i)
+ a_max = i;
+ }
+ }
+
+ ASSERT(ERTS_ALC_A_MIN <= a_min && a_min <= ERTS_ALC_A_MAX);
+ ASSERT(ERTS_ALC_A_MIN <= a_max && a_max <= ERTS_ALC_A_MAX);
+ ASSERT(a_min <= a_max);
+
+ update_max_ever_values(stats->a, a_min, a_max);
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ if (erts_allctrs_info[i].enabled) {
+ fprintf(fp,
+ "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
+ i == a_min ? "{allocators,\n [" : " ",
+ ERTS_ALC_A2AD(i),
+ stats->a[i].size,
+ stats->a[i].max_size,
+ stats->a[i].max_size_ever,
+ stats->a[i].blocks,
+ stats->a[i].max_blocks,
+ stats->a[i].max_blocks_ever,
+ i == a_max ? "]}.\n" : ",\n");
+ }
+ }
+
+ update_max_ever_values(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);
+
+ for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) {
+ fprintf(fp,
+ "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
+ i == ERTS_ALC_C_MIN ? "{classes,\n [" : " ",
+ ERTS_ALC_C2CD(i),
+ stats->c[i].size,
+ stats->c[i].max_size,
+ stats->c[i].max_size_ever,
+ stats->c[i].blocks,
+ stats->c[i].max_blocks,
+ stats->c[i].max_blocks_ever,
+ i == ERTS_ALC_C_MAX ? "]}.\n" : ",\n" );
+ }
+
+ update_max_ever_values(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);
+
+ for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
+ fprintf(fp,
+ "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
+ i == ERTS_ALC_N_MIN ? "{types,\n [" : " ",
+ ERTS_ALC_N2TD(i),
+ stats->n[i].size,
+ stats->n[i].max_size,
+ stats->n[i].max_size_ever,
+ stats->n[i].blocks,
+ stats->n[i].max_blocks,
+ stats->n[i].max_blocks_ever,
+ i == ERTS_ALC_N_MAX ? "]}.\n" : ",\n" );
+ }
+
+ if (begin_max_period) {
+ begin_new_max_period(&stats->tot, 0, 0);
+ begin_new_max_period(stats->a, a_min, a_max);
+ begin_new_max_period(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);
+ begin_new_max_period(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);
+ }
+
+ erts_mtx_unlock(&instr_mutex);
+
+}
+
+int erts_instr_dump_stat_to_fd(int fd, int begin_max_period)
+{
+ char buf[BUFSIZ];
+ FILE *fp;
+
+ if (!erts_instr_stat)
+ return 0;
+
+ fp = fdopen(fd, "w");
+ if (fp == NULL)
+ return 0;
+
+ /* Avoid allocating memory; we may have run out of it at this point. */
+ setbuf(fp, buf);
+
+ dump_stat_to_stream(fp, begin_max_period);
+ fflush(fp);
+ return 1;
+}
+
+int erts_instr_dump_stat(const char *name, int begin_max_period)
+{
+ FILE *file;
+
+ if (!erts_instr_stat)
+ return 0;
+
+ file = fopen(name, "w");
+ if (file == NULL)
+ return 0;
+
+ dump_stat_to_stream(file, begin_max_period);
+
+ fclose(file);
+ return 1;
+}
+
+
+Uint
+erts_instr_get_total(void)
+{
+ return erts_instr_stat ? stats->tot.size : 0;
+}
+
+Uint
+erts_instr_get_max_total(void)
+{
+ if (erts_instr_stat) {
+ update_max_ever_values(&stats->tot, 0, 0);
+ return stats->tot.max_size_ever;
+ }
+ return 0;
+}
+
+Eterm
+erts_instr_get_type_info(Process *proc)
+{
+ Eterm res, *tpls;
+ Uint hsz, *hszp, *hp, **hpp;
+ ErtsAlcType_t n;
+
+ if (!am_n)
+ init_am_n();
+ if (!am_a)
+ init_am_a();
+ if (!am_c)
+ init_am_c();
+
+ tpls = (Eterm *) erts_alloc(ERTS_ALC_T_TMP,
+ (ERTS_ALC_N_MAX-ERTS_ALC_N_MIN+1)
+ * sizeof(Eterm));
+ hsz = 0;
+ hszp = &hsz;
+ hpp = NULL;
+
+ restart_bld:
+
+#if ERTS_ALC_N_MIN != 1
+#error ERTS_ALC_N_MIN is not 1
+#endif
+
+ for (n = ERTS_ALC_N_MIN; n <= ERTS_ALC_N_MAX; n++) {
+ ErtsAlcType_t t = ERTS_ALC_N2T(n);
+ ErtsAlcType_t a = ERTS_ALC_T2A(t);
+ ErtsAlcType_t c = ERTS_ALC_T2C(t);
+
+ if (!erts_allctrs_info[a].enabled)
+ a = ERTS_ALC_A_SYSTEM;
+
+ tpls[n - ERTS_ALC_N_MIN]
+ = bld_tuple(hpp, hszp, 3, am_n[n], am_a[a], am_c[c]);
+ }
+
+ res = bld_tuplev(hpp, hszp, ERTS_ALC_N_MAX-ERTS_ALC_N_MIN+1, tpls);
+
+ if (!hpp) {
+ hp = HAlloc(proc, hsz);
+ hszp = NULL;
+ hpp = &hp;
+ goto restart_bld;
+ }
+
+ erts_free(ERTS_ALC_T_TMP, tpls);
+
+ return res;
+}
+
+Uint
+erts_instr_init(int stat, int map_stat)
+{
+ int i;
+
+ am_tot = NULL;
+ am_n = NULL;
+ am_c = NULL;
+ am_a = NULL;
+
+ erts_instr_memory_map = 0;
+ erts_instr_stat = 0;
+ atoms_initialized = 0;
+
+ if (!stat && !map_stat)
+ return 0;
+
+ stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_));
+
+ erts_mtx_init(&instr_mutex, "instr");
+
+ mem_anchor = NULL;
+
+ /* Install instrumentation functions */
+ ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+
+ sys_memcpy((void *)real_allctrs,(void *)erts_allctrs,sizeof(erts_allctrs));
+
+ sys_memzero((void *) &stats->tot, sizeof(Stat_t));
+ sys_memzero((void *) stats->a, sizeof(Stat_t)*(ERTS_ALC_A_MAX+1));
+ sys_memzero((void *) stats->c, sizeof(Stat_t)*(ERTS_ALC_C_MAX+1));
+ sys_memzero((void *) stats->n, sizeof(Stat_t)*(ERTS_ALC_N_MAX+1));
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ if (erts_allctrs_info[i].enabled)
+ stats->ap[i] = &stats->a[i];
+ else
+ stats->ap[i] = &stats->a[ERTS_ALC_A_SYSTEM];
+ }
+
+ if (map_stat) {
+
+ erts_mtx_init(&instr_x_mutex, "instr_x");
+
+ erts_instr_memory_map = 1;
+ erts_instr_stat = 1;
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ erts_allctrs[i].alloc = map_stat_alloc;
+ erts_allctrs[i].realloc = map_stat_realloc;
+ erts_allctrs[i].free = map_stat_free;
+ erts_allctrs[i].extra = (void *) &real_allctrs[i];
+ }
+ return MAP_STAT_BLOCK_HEADER_SIZE;
+ }
+ else {
+ erts_instr_stat = 1;
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ erts_allctrs[i].alloc = stat_alloc;
+ erts_allctrs[i].realloc = stat_realloc;
+ erts_allctrs[i].free = stat_free;
+ erts_allctrs[i].extra = (void *) &real_allctrs[i];
+ }
+ return STAT_BLOCK_HEADER_SIZE;
+ }
+
+}
+
diff --git a/erts/emulator/beam/erl_instrument.h b/erts/emulator/beam/erl_instrument.h
new file mode 100644
index 0000000000..37b9b67139
--- /dev/null
+++ b/erts/emulator/beam/erl_instrument.h
@@ -0,0 +1,41 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_INSTRUMENT_H__
+#define ERL_INSTRUMENT_H__
+
+#include "erl_mtrace.h"
+
+#define ERTS_INSTR_VSN 2
+
+extern int erts_instr_memory_map;
+extern int erts_instr_stat;
+
+Uint erts_instr_init(int stat, int map_stat);
+int erts_instr_dump_memory_map_to_fd(int fd);
+int erts_instr_dump_memory_map(const char *name);
+Eterm erts_instr_get_memory_map(Process *process);
+int erts_instr_dump_stat_to_fd(int fd, int begin_max_period);
+int erts_instr_dump_stat(const char *name, int begin_max_period);
+Eterm erts_instr_get_stat(Process *proc, Eterm what, int begin_max_period);
+Eterm erts_instr_get_type_info(Process *proc);
+Uint erts_instr_get_total(void);
+Uint erts_instr_get_max_total(void);
+
+#endif
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
new file mode 100644
index 0000000000..25f1d420d1
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -0,0 +1,1307 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A lock checker that checks that each thread acquires
+ * locks according to a predefined global lock order. The
+ * global lock order is used to prevent deadlocks. If the
+ * lock order is violated, an error message is printed
+ * and the emulator aborts. The lock checker is only
+ * intended to be enabled when debugging.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+/* Needed for VxWorks va_arg */
+#include "sys.h"
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+
+#include "erl_lock_check.h"
+#include "erl_term.h"
+#include "erl_threads.h"
+
+typedef struct {
+ char *name;
+ char *internal_order;
+} erts_lc_lock_order_t;
+
+/*
+ * Global lock order for locks in the emulator.
+ *
+ * Locks early (low indexes) in the 'erts_lock_order' array should be
+ * locked before locks late (high indexes) in the array. Each lock has
+ * a name which is set on initialization. If multiple locks with the
+ * same name are used, either an immediate Erlang term (e.g. internal
+ * pid) or the address of the lock is used for internal lock order.
+ * The immediate Erlang term used for internal lock order is also set
+ * on initialization. Locks with small immediate Erlang terms should
+ * be locked before locks with large immediate Erlang terms, and
+ * locks with small addresses should be locked before locks with
+ * large addresses. The immediate terms and adresses (boxed pointers)
+ * are compared as unsigned integers not as Erlang terms.
+ *
+ * Once a spinlock or rw(spin)lock has been locked, the thread is not
+ * allowed to lock mutexes, rwmutexes or process locks until all
+ * spinlocks and rwlocks have been unlocked. This restriction is not
+ * reflected by the lock order below, but the lock checker will still
+ * check for violations of this restriction.
+ */
+static erts_lc_lock_order_t erts_lock_order[] = {
+ /*
+ * "Lock name" "Internal lock order
+ * description (NULL
+ * if only one lock use
+ * the lock name)"
+ */
+#ifdef ERTS_SMP
+ { "driver_lock", "driver_name" },
+ { "port_lock", "port_id" },
+#endif
+ { "port_data_lock", "address" },
+#ifdef ERTS_SMP
+ { "bif_timers", NULL },
+ { "reg_tab", NULL },
+ { "migration_info_update", NULL },
+ { "proc_main", "pid" },
+ { "nodes_monitors", NULL },
+ { "driver_list", NULL },
+ { "proc_link", "pid" },
+ { "proc_msgq", "pid" },
+ { "dist_entry", "address" },
+ { "dist_entry_links", "address" },
+ { "proc_status", "pid" },
+ { "proc_tab", NULL },
+ { "ports_snapshot", NULL },
+ { "db_tab", "address" },
+ { "db_tab_fix", "address" },
+ { "meta_name_tab", "address" },
+ { "meta_main_tab_slot", "address" },
+ { "meta_main_tab_main", NULL },
+ { "db_hash_slot", "address" },
+ { "node_table", NULL },
+ { "dist_table", NULL },
+ { "sys_tracers", NULL },
+ { "trace_pattern", NULL },
+ { "module_tab", NULL },
+ { "export_tab", NULL },
+ { "fun_tab", NULL },
+ { "environ", NULL },
+#endif
+ { "asyncq", "address" },
+#ifndef ERTS_SMP
+ { "async_ready", NULL },
+#endif
+ { "efile_drv", "address" },
+#if defined(ENABLE_CHILD_WAITER_THREAD) || defined(ERTS_SMP)
+ { "child_status", NULL },
+#endif
+#ifdef __WIN32__
+ { "sys_driver_data_lock", NULL },
+#endif
+ { "drv_ev_state_grow", NULL, },
+ { "drv_ev_state", "address" },
+ { "safe_hash", "address" },
+ { "pollset_rm_list", NULL },
+ { "removed_fd_pre_alloc_lock", NULL },
+ { "state_prealloc", NULL },
+ { "schdlr_sspnd", NULL },
+ { "cpu_bind", NULL },
+ { "run_queue", "address" },
+ { "pollset", "address" },
+#ifdef __WIN32__
+ { "pollwaiter", "address" },
+ { "break_waiter_lock", NULL },
+#endif /* __WIN32__ */
+ { "alcu_init_atoms", NULL },
+ { "mseg_init_atoms", NULL },
+ { "drv_tsd", NULL },
+#ifdef ERTS_SMP
+ { "sys_msg_q", NULL },
+ { "atom_tab", NULL },
+ { "make_ref", NULL },
+ { "misc_op_list_pre_alloc_lock", "address" },
+ { "message_pre_alloc_lock", "address" },
+ { "ptimer_pre_alloc_lock", "address", },
+ { "btm_pre_alloc_lock", NULL, },
+ { "dist_entry_out_queue", "address" },
+#endif
+ { "mtrace_op", NULL },
+ { "instr_x", NULL },
+ { "instr", NULL },
+ { "fix_alloc", "index" },
+ { "alcu_allocator", "index" },
+ { "mseg", NULL },
+#ifdef ERTS_SMP
+ { "port_task_pre_alloc_lock", "address" },
+ { "port_taskq_pre_alloc_lock", "address" },
+ { "proclist_pre_alloc_lock", "address" },
+ { "port_tasks_lock", NULL },
+ { "get_free_port", NULL },
+ { "port_state", "address" },
+ { "xports_list_pre_alloc_lock", "address" },
+ { "inet_buffer_stack_lock", NULL },
+ { "gc_info", NULL },
+ { "io_wake", NULL },
+ { "timer_wheel", NULL },
+ { "system_block", NULL },
+ { "timeofday", NULL },
+ { "breakpoints", NULL },
+ { "pollsets_lock", NULL },
+ { "async_id", NULL },
+ { "pix_lock", "address" },
+ { "run_queues_lists", NULL },
+ { "sched_stat", NULL },
+#endif
+ { "alloc_thr_ix_lock", NULL },
+#ifdef ERTS_SMP
+ { "proc_lck_wtr_alloc", NULL },
+#endif
+#ifdef __WIN32__
+#ifdef DEBUG
+ { "save_ops_lock", NULL },
+#endif
+#endif
+ { "mtrace_buf", NULL }
+};
+
+#define ERTS_LOCK_ORDER_SIZE \
+ (sizeof(erts_lock_order)/sizeof(erts_lc_lock_order_t))
+
+#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
+ (((LCKD_FLG) & (ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)) \
+ && ((LCK_FLG) \
+ & ERTS_LC_FLG_LT_ALL \
+ & ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+
+static char *
+lock_type(Uint16 flags)
+{
+ switch (flags & ERTS_LC_FLG_LT_ALL) {
+ case ERTS_LC_FLG_LT_SPINLOCK: return "[spinlock]";
+ case ERTS_LC_FLG_LT_RWSPINLOCK: return "[rw(spin)lock]";
+ case ERTS_LC_FLG_LT_MUTEX: return "[mutex]";
+ case ERTS_LC_FLG_LT_RWMUTEX: return "[rwmutex]";
+ case ERTS_LC_FLG_LT_PROCLOCK: return "[proclock]";
+ default: return "";
+ }
+}
+
+static char *
+rw_op_str(Uint16 flags)
+{
+ switch (flags & ERTS_LC_FLG_LO_READ_WRITE) {
+ case ERTS_LC_FLG_LO_READ_WRITE:
+ return " (rw)";
+ case ERTS_LC_FLG_LO_READ:
+ return " (r)";
+ case ERTS_LC_FLG_LO_WRITE:
+ erts_fprintf(stderr, "\nInternal error\n");
+ abort();
+ default:
+ break;
+ }
+ return "";
+}
+
+typedef struct erts_lc_locked_lock_t_ erts_lc_locked_lock_t;
+struct erts_lc_locked_lock_t_ {
+ erts_lc_locked_lock_t *next;
+ erts_lc_locked_lock_t *prev;
+ Eterm extra;
+ Sint16 id;
+ Uint16 flags;
+};
+
+typedef struct {
+ erts_lc_locked_lock_t *first;
+ erts_lc_locked_lock_t *last;
+} erts_lc_locked_lock_list_t;
+
+typedef struct erts_lc_locked_locks_t_ erts_lc_locked_locks_t;
+struct erts_lc_locked_locks_t_ {
+ char *thread_name;
+ erts_tid_t tid;
+ erts_lc_locked_locks_t *next;
+ erts_lc_locked_locks_t *prev;
+ erts_lc_locked_lock_list_t locked;
+ erts_lc_locked_lock_list_t required;
+};
+
+typedef union erts_lc_free_block_t_ erts_lc_free_block_t;
+union erts_lc_free_block_t_ {
+ erts_lc_free_block_t *next;
+ erts_lc_locked_lock_t lock;
+};
+
+static ethr_tsd_key locks_key;
+
+static erts_lc_locked_locks_t *erts_locked_locks;
+
+static erts_lc_free_block_t *free_blocks;
+
+#ifdef ERTS_LC_STATIC_ALLOC
+#define ERTS_LC_FB_CHUNK_SIZE 10000
+#else
+#define ERTS_LC_FB_CHUNK_SIZE 10
+#endif
+
+#ifdef ETHR_HAVE_NATIVE_LOCKS
+static ethr_spinlock_t free_blocks_lock;
+#define ERTS_LC_LOCK ethr_spin_lock
+#define ERTS_LC_UNLOCK ethr_spin_unlock
+#else
+static ethr_mutex free_blocks_lock;
+#define ERTS_LC_LOCK ethr_mutex_lock
+#define ERTS_LC_UNLOCK ethr_mutex_unlock
+#endif
+
+static ERTS_INLINE void
+lc_lock(void)
+{
+ if (ERTS_LC_LOCK(&free_blocks_lock) != 0)
+ abort();
+}
+
+static ERTS_INLINE void
+lc_unlock(void)
+{
+ if (ERTS_LC_UNLOCK(&free_blocks_lock) != 0)
+ abort();
+}
+
+static ERTS_INLINE void lc_free(void *p)
+{
+ erts_lc_free_block_t *fb = (erts_lc_free_block_t *) p;
+#ifdef DEBUG
+ memset((void *) p, 0xdf, sizeof(erts_lc_free_block_t));
+#endif
+ lc_lock();
+ fb->next = free_blocks;
+ free_blocks = fb;
+ lc_unlock();
+}
+
+#ifdef ERTS_LC_STATIC_ALLOC
+
+static void *lc_core_alloc(void)
+{
+ lc_unlock();
+ erts_fprintf(stderr, "Lock checker out of memory!\n");
+ abort();
+}
+
+#else
+
+static void *lc_core_alloc(void)
+{
+ int i;
+ erts_lc_free_block_t *fbs;
+ lc_unlock();
+ fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
+ * ERTS_LC_FB_CHUNK_SIZE);
+ if (!fbs) {
+ erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
+ abort();
+ }
+ for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
+#ifdef DEBUG
+ memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
+#endif
+ fbs[i].next = &fbs[i+1];
+ }
+#ifdef DEBUG
+ memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
+ 0xdf, sizeof(erts_lc_free_block_t));
+#endif
+ lc_lock();
+ fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = free_blocks;
+ free_blocks = &fbs[1];
+ return (void *) &fbs[0];
+}
+
+#endif
+
+static ERTS_INLINE void *lc_alloc(void)
+{
+ void *res;
+ lc_lock();
+ if (!free_blocks)
+ res = lc_core_alloc();
+ else {
+ res = (void *) free_blocks;
+ free_blocks = free_blocks->next;
+ }
+ lc_unlock();
+ return res;
+}
+
+
+static erts_lc_locked_locks_t *
+create_locked_locks(char *thread_name)
+{
+ erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
+ if (!l_lcks)
+ abort();
+
+ l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
+ if (!l_lcks->thread_name)
+ abort();
+
+ l_lcks->tid = erts_thr_self();
+ l_lcks->required.first = NULL;
+ l_lcks->required.last = NULL;
+ l_lcks->locked.first = NULL;
+ l_lcks->locked.last = NULL;
+ l_lcks->prev = NULL;
+ lc_lock();
+ l_lcks->next = erts_locked_locks;
+ if (erts_locked_locks)
+ erts_locked_locks->prev = l_lcks;
+ erts_locked_locks = l_lcks;
+ lc_unlock();
+ erts_tsd_set(locks_key, (void *) l_lcks);
+ return l_lcks;
+}
+
+static void
+destroy_locked_locks(erts_lc_locked_locks_t *l_lcks)
+{
+ ASSERT(l_lcks->thread_name);
+ free((void *) l_lcks->thread_name);
+ ASSERT(l_lcks->required.first == NULL);
+ ASSERT(l_lcks->required.last == NULL);
+ ASSERT(l_lcks->locked.first == NULL);
+ ASSERT(l_lcks->locked.last == NULL);
+
+ lc_lock();
+ if (l_lcks->prev)
+ l_lcks->prev->next = l_lcks->next;
+ else {
+ ASSERT(erts_locked_locks == l_lcks);
+ erts_locked_locks = l_lcks->next;
+ }
+
+ if (l_lcks->next)
+ l_lcks->next->prev = l_lcks->prev;
+ lc_unlock();
+
+ free((void *) l_lcks);
+
+}
+
+static ERTS_INLINE erts_lc_locked_locks_t *
+get_my_locked_locks(void)
+{
+ return erts_tsd_get(locks_key);
+}
+
+static ERTS_INLINE erts_lc_locked_locks_t *
+make_my_locked_locks(void)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ if (l_lcks)
+ return l_lcks;
+ else
+ return create_locked_locks(NULL);
+}
+
+static ERTS_INLINE erts_lc_locked_lock_t *
+new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags)
+{
+ erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
+ l_lck->next = NULL;
+ l_lck->prev = NULL;
+ l_lck->id = lck->id;
+ l_lck->extra = lck->extra;
+ l_lck->flags = lck->flags | op_flags;
+ return l_lck;
+}
+
+static void
+print_lock2(char *prefix, Sint16 id, Eterm extra, Uint16 flags, char *suffix)
+{
+ char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
+ ? erts_lock_order[id].name
+ : "unknown");
+ if (is_boxed(extra))
+ erts_fprintf(stderr,
+ "%s'%s:%p%s'%s%s",
+ prefix,
+ lname,
+ boxed_val(extra),
+ lock_type(flags),
+ rw_op_str(flags),
+ suffix);
+ else
+ erts_fprintf(stderr,
+ "%s'%s:%T%s'%s%s",
+ prefix,
+ lname,
+ extra,
+ lock_type(flags),
+ rw_op_str(flags),
+ suffix);
+}
+
+static void
+print_lock(char *prefix, erts_lc_lock_t *lck, char *suffix)
+{
+ print_lock2(prefix, lck->id, lck->extra, lck->flags, suffix);
+}
+
+static void
+print_curr_locks(erts_lc_locked_locks_t *l_lcks)
+{
+ erts_lc_locked_lock_t *l_lck;
+ if (!l_lcks || !l_lcks->locked.first)
+ erts_fprintf(stderr,
+ "Currently no locks are locked by the %s thread.\n",
+ l_lcks->thread_name);
+ else {
+ erts_fprintf(stderr,
+ "Currently these locks are locked by the %s thread:\n",
+ l_lcks->thread_name);
+ for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
+ print_lock2(" ", l_lck->id, l_lck->extra, l_lck->flags, "\n");
+ }
+}
+
+static void
+print_lock_order(void)
+{
+ int i;
+ erts_fprintf(stderr, "Lock order:\n");
+ for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) {
+ if (erts_lock_order[i].internal_order)
+ erts_fprintf(stderr,
+ " %s:%s\n",
+ erts_lock_order[i].name,
+ erts_lock_order[i].internal_order);
+ else
+ erts_fprintf(stderr, " %s\n", erts_lock_order[i].name);
+ }
+}
+
+static void
+uninitialized_lock(void)
+{
+ erts_fprintf(stderr, "Performing operations on uninitialized lock!\n");
+ print_curr_locks(get_my_locked_locks());
+ abort();
+}
+
+static void
+lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
+ Uint16 op_flags)
+{
+ erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
+ print_lock(" ", lck, " lock which is already locked by thread!\n");
+ print_curr_locks(l_lcks);
+ abort();
+}
+
+static void
+unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
+ Uint16 op_flags)
+{
+ erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
+ print_lock("", lck, " lock which mismatch previous lock operation!\n");
+ print_curr_locks(l_lcks);
+ abort();
+}
+
+static void
+unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+{
+ print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
+ print_curr_locks(l_lcks);
+ abort();
+}
+
+static void
+lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+{
+ print_lock("Lock order violation occured when locking ", lck, "!\n");
+ print_curr_locks(l_lcks);
+ print_lock_order();
+ abort();
+}
+
+static void
+type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
+ erts_lc_lock_t *lck)
+{
+ erts_fprintf(stderr, "Lock type order violation occured when ");
+ print_lock(op, lck, "!\n");
+ ASSERT(l_lcks);
+ print_curr_locks(l_lcks);
+ abort();
+}
+
+static void
+lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
+ int failed_have, erts_lc_lock_t *have, int have_len,
+ int failed_have_not, erts_lc_lock_t *have_not, int have_not_len)
+{
+ int i;
+ erts_fprintf(stderr, "Lock mismatch found!\n");
+ if (failed_have >= 0) {
+ ASSERT(have && have_len > failed_have);
+ print_lock2("At least the ",
+ have[failed_have].id, have[failed_have].extra, 0,
+ " lock is not locked when it should have been\n");
+ }
+ else if (failed_have_not >= 0) {
+ ASSERT(have_not && have_not_len > failed_have_not);
+ print_lock2("At least the ",
+ have_not[failed_have_not].id,
+ have_not[failed_have_not].extra,
+ 0,
+ " lock is locked when it should not have been\n");
+ }
+ if (exact) {
+ if (!have || have_len <= 0)
+ erts_fprintf(stderr,
+ "Thread should not have any locks locked at all\n");
+ else {
+ erts_fprintf(stderr,
+ "Thread should have these and only these locks "
+ "locked:\n");
+ for (i = 0; i < have_len; i++)
+ print_lock2(" ", have[i].id, have[i].extra, 0, "\n");
+ }
+ }
+ else {
+ if (have && have_len > 0) {
+ erts_fprintf(stderr,
+ "Thread should at least have these locks locked:\n");
+ for (i = 0; i < have_len; i++)
+ print_lock2(" ", have[i].id, have[i].extra, 0, "\n");
+ }
+ if (have_not && have_not_len > 0) {
+ erts_fprintf(stderr,
+ "Thread should at least not have these locks "
+ "locked:\n");
+ for (i = 0; i < have_not_len; i++)
+ print_lock2(" ", have_not[i].id, have_not[i].extra, 0, "\n");
+ }
+ }
+ print_curr_locks(l_lcks);
+ abort();
+}
+
+static void
+unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+{
+ print_lock("Unlocking required ", lck, " lock!\n");
+ print_curr_locks(l_lcks);
+ abort();
+}
+
+static void
+unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+{
+ print_lock("Unrequire on ", lck, " lock not required!\n");
+ print_curr_locks(l_lcks);
+ abort();
+}
+
+static void
+require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+{
+ print_lock("Require on ", lck, " lock already required!\n");
+ print_curr_locks(l_lcks);
+ abort();
+}
+
+static void
+required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+{
+ print_lock("Required ", lck, " lock not locked!\n");
+ print_curr_locks(l_lcks);
+ abort();
+}
+
+
+static void
+thread_exit_handler(void)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ if (l_lcks) {
+ if (l_lcks->locked.first) {
+ erts_fprintf(stderr,
+ "Thread exiting while having locked locks!\n");
+ print_curr_locks(l_lcks);
+ abort();
+ }
+ destroy_locked_locks(l_lcks);
+ /* erts_tsd_set(locks_key, NULL); */
+ }
+}
+
+void
+erts_lc_set_thread_name(char *thread_name)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ if (!l_lcks)
+ (void) create_locked_locks(thread_name);
+ else {
+ ASSERT(l_lcks->thread_name);
+ free((void *) l_lcks->thread_name);
+ l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
+ if (!l_lcks->thread_name)
+ abort();
+ }
+}
+
+int
+erts_lc_assert_failed(char *file, int line, char *assertion)
+{
+ erts_fprintf(stderr, "%s:%d: Lock check assertion \"%s\" failed!\n",
+ file, line, assertion);
+ print_curr_locks(get_my_locked_locks());
+ abort();
+ return 0;
+}
+
+void erts_lc_fail(char *fmt, ...)
+{
+ va_list args;
+ erts_fprintf(stderr, "Lock check failed: ");
+ va_start(args, fmt);
+ erts_vfprintf(stderr, fmt, args);
+ va_end(args);
+ erts_fprintf(stderr, "\n");
+ print_curr_locks(get_my_locked_locks());
+ abort();
+}
+
+
+Sint16
+erts_lc_get_lock_order_id(char *name)
+{
+ int i;
+
+ if (!name || name[0] == '\0')
+ erts_fprintf(stderr, "Missing lock name\n");
+ else {
+ for (i = 0; i < ERTS_LOCK_ORDER_SIZE; i++)
+ if (strcmp(erts_lock_order[i].name, name) == 0)
+ return i;
+ erts_fprintf(stderr,
+ "Lock name '%s' missing in lock order "
+ "(update erl_lock_check.c)\n",
+ name);
+ }
+ abort();
+ return (Sint16) -1;
+}
+
+
+static int
+find_lock(erts_lc_locked_lock_t **l_lcks, erts_lc_lock_t *lck)
+{
+ erts_lc_locked_lock_t *l_lck = *l_lcks;
+
+ if (l_lck) {
+ if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
+ if ((l_lck->flags & lck->flags) == lck->flags)
+ return 1;
+ return 0;
+ }
+ else if (l_lck->id < lck->id
+ || (l_lck->id == lck->id
+ && l_lck->extra < lck->extra)) {
+ for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
+ if (l_lck->id > lck->id
+ || (l_lck->id == lck->id
+ && l_lck->extra >= lck->extra)) {
+ *l_lcks = l_lck;
+ if (l_lck->id == lck->id
+ && l_lck->extra == lck->extra
+ && ((l_lck->flags & lck->flags) == lck->flags))
+ return 1;
+ return 0;
+ }
+ }
+ }
+ else {
+ for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
+ if (l_lck->id < lck->id
+ || (l_lck->id == lck->id
+ && l_lck->extra <= lck->extra)) {
+ *l_lcks = l_lck;
+ if (l_lck->id == lck->id
+ && l_lck->extra == lck->extra
+ && ((l_lck->flags & lck->flags) == lck->flags))
+ return 1;
+ return 0;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+find_id(erts_lc_locked_lock_t **l_lcks, Sint16 id)
+{
+ erts_lc_locked_lock_t *l_lck = *l_lcks;
+
+ if (l_lck) {
+ if (l_lck->id == id)
+ return 1;
+ else if (l_lck->id < id) {
+ for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
+ if (l_lck->id >= id) {
+ *l_lcks = l_lck;
+ if (l_lck->id == id)
+ return 1;
+ return 0;
+ }
+ }
+ }
+ else {
+ for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
+ if (l_lck->id <= id) {
+ *l_lcks = l_lck;
+ if (l_lck->id == id)
+ return 1;
+ return 0;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+void
+erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ int i;
+
+ if (!l_lcks) {
+ for (i = 0; i < len; i++)
+ resv[i] = 0;
+ }
+ else {
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ for (i = 0; i < len; i++)
+ resv[i] = find_lock(&l_lck, &locks[i]);
+ }
+}
+
+void
+erts_lc_have_lock_ids(int *resv, int *ids, int len)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ int i;
+
+ if (!l_lcks) {
+ for (i = 0; i < len; i++)
+ resv[i] = 0;
+ }
+ else {
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ for (i = 0; i < len; i++)
+ resv[i] = find_id(&l_lck, ids[i]);
+ }
+}
+
+void
+erts_lc_check(erts_lc_lock_t *have, int have_len,
+ erts_lc_lock_t *have_not, int have_not_len)
+{
+ int i;
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ erts_lc_locked_lock_t *l_lck;
+
+ if (have && have_len > 0) {
+ if (!l_lcks)
+ lock_mismatch(NULL, 0,
+ -1, have, have_len,
+ -1, have_not, have_not_len);
+ l_lck = l_lcks->locked.first;
+ for (i = 0; i < have_len; i++) {
+ if (!find_lock(&l_lck, &have[i]))
+ lock_mismatch(l_lcks, 0,
+ i, have, have_len,
+ -1, have_not, have_not_len);
+ }
+ }
+ if (have_not && have_not_len > 0 && l_lcks) {
+ l_lck = l_lcks->locked.first;
+ for (i = 0; i < have_not_len; i++) {
+ if (find_lock(&l_lck, &have_not[i]))
+ lock_mismatch(l_lcks, 0,
+ -1, have, have_len,
+ i, have_not, have_not_len);
+ }
+ }
+}
+
+void
+erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ if (!l_lcks) {
+ if (have && have_len > 0)
+ lock_mismatch(NULL, 1,
+ -1, have, have_len,
+ -1, NULL, 0);
+ }
+ else {
+ int i;
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ for (i = 0; i < have_len; i++) {
+ if (!find_lock(&l_lck, &have[i]))
+ lock_mismatch(l_lcks, 1,
+ i, have, have_len,
+ -1, NULL, 0);
+ }
+ for (i = 0, l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
+ i++;
+ if (i != have_len)
+ lock_mismatch(l_lcks, 1,
+ -1, have, have_len,
+ -1, NULL, 0);
+ }
+}
+
+int
+erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+{
+#ifdef ERTS_LC_DO_NOT_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
+ return 0;
+#else
+ /*
+ * Force busy trylock if locking doesn't follow lock order.
+ * This in order to make sure that caller can handle
+ * the situation without causing a lock order violation.
+ */
+ erts_lc_locked_locks_t *l_lcks;
+
+ if (lck->inited != ERTS_LC_INITITALIZED)
+ uninitialized_lock();
+
+ if (lck->id < 0)
+ return 0;
+
+ l_lcks = get_my_locked_locks();
+
+ if (!l_lcks || !l_lcks->locked.first) {
+ ASSERT(!l_lcks || !l_lcks->locked.last);
+ return 0;
+ }
+ else {
+ erts_lc_locked_lock_t *tl_lck;
+
+ ASSERT(l_lcks->locked.last);
+
+#if 0 /* Ok when trylocking I guess... */
+ if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
+ type_order_violation("trylocking ", l_lcks, lck);
+#endif
+
+ if (l_lcks->locked.last->id < lck->id
+ || (l_lcks->locked.last->id == lck->id
+ && l_lcks->locked.last->extra < lck->extra))
+ return 0;
+
+ /*
+ * Lock order violation
+ */
+
+
+ /* Check that we are not trying to lock this lock twice */
+ for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
+ if (tl_lck->id < lck->id
+ || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
+ if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
+ lock_twice("Trylocking", l_lcks, lck, op_flags);
+ break;
+ }
+ }
+
+#ifndef ERTS_LC_ALLWAYS_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
+ /* We only force busy if a lock order violation would occur
+ and when on an even millisecond. */
+ {
+ erts_thr_timeval_t time;
+ erts_thr_time_now(&time);
+
+ if ((time.tv_nsec / 1000000) & 1)
+ return 0;
+ }
+#endif
+
+ return 1;
+ }
+#endif
+}
+
+void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags)
+{
+ erts_lc_locked_locks_t *l_lcks;
+ erts_lc_locked_lock_t *l_lck;
+
+ if (lck->inited != ERTS_LC_INITITALIZED)
+ uninitialized_lock();
+
+ if (lck->id < 0)
+ return;
+
+ l_lcks = make_my_locked_locks();
+ l_lck = locked ? new_locked_lock(lck, op_flags) : NULL;
+
+ if (!l_lcks->locked.last) {
+ ASSERT(!l_lcks->locked.first);
+ if (locked)
+ l_lcks->locked.first = l_lcks->locked.last = l_lck;
+ }
+ else {
+ erts_lc_locked_lock_t *tl_lck;
+#if 0 /* Ok when trylocking I guess... */
+ if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
+ type_order_violation("trylocking ", l_lcks, lck);
+#endif
+
+ for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
+ if (tl_lck->id < lck->id
+ || (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
+ if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
+ lock_twice("Trylocking", l_lcks, lck, op_flags);
+ if (locked) {
+ l_lck->next = tl_lck->next;
+ l_lck->prev = tl_lck;
+ if (tl_lck->next)
+ tl_lck->next->prev = l_lck;
+ else
+ l_lcks->locked.last = l_lck;
+ tl_lck->next = l_lck;
+ }
+ return;
+ }
+ }
+
+ if (locked) {
+ l_lck->next = l_lcks->locked.first;
+ l_lcks->locked.first->prev = l_lck;
+ l_lcks->locked.first = l_lck;
+ }
+ }
+
+}
+
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+{
+ erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ if (!find_lock(&l_lck, lck))
+ required_not_locked(l_lcks, lck);
+ l_lck = new_locked_lock(lck, op_flags);
+ if (!l_lcks->required.last) {
+ ASSERT(!l_lcks->required.first);
+ l_lck->next = l_lck->prev = NULL;
+ l_lcks->required.first = l_lcks->required.last = l_lck;
+ }
+ else {
+ erts_lc_locked_lock_t *l_lck2;
+ ASSERT(l_lcks->required.first);
+ for (l_lck2 = l_lcks->required.last;
+ l_lck2;
+ l_lck2 = l_lck2->prev) {
+ if (l_lck2->id < lck->id
+ || (l_lck2->id == lck->id && l_lck2->extra < lck->extra))
+ break;
+ else if (l_lck2->id == lck->id && l_lck2->extra == lck->extra)
+ require_twice(l_lcks, lck);
+ }
+ if (!l_lck2) {
+ l_lck->next = l_lcks->required.first;
+ l_lck->prev = NULL;
+ l_lcks->required.first->prev = l_lck;
+ l_lcks->required.first = l_lck;
+ }
+ else {
+ l_lck->next = l_lck2->next;
+ if (l_lck->next) {
+ ASSERT(l_lcks->required.last != l_lck2);
+ l_lck->next->prev = l_lck;
+ }
+ else {
+ ASSERT(l_lcks->required.last == l_lck2);
+ l_lcks->required.last = l_lck;
+ }
+ l_lck->prev = l_lck2;
+ l_lck2->next = l_lck;
+ }
+ }
+}
+
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+{
+ erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
+ erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ if (!find_lock(&l_lck, lck))
+ required_not_locked(l_lcks, lck);
+ l_lck = l_lcks->required.first;
+ if (!find_lock(&l_lck, lck))
+ unrequire_of_not_required_lock(l_lcks, lck);
+ if (l_lck->prev) {
+ ASSERT(l_lcks->required.first != l_lck);
+ l_lck->prev->next = l_lck->next;
+ }
+ else {
+ ASSERT(l_lcks->required.first == l_lck);
+ l_lcks->required.first = l_lck->next;
+ }
+ if (l_lck->next) {
+ ASSERT(l_lcks->required.last != l_lck);
+ l_lck->next->prev = l_lck->prev;
+ }
+ else {
+ ASSERT(l_lcks->required.last == l_lck);
+ l_lcks->required.last = l_lck->prev;
+ }
+ lc_free((void *) l_lck);
+}
+
+void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+{
+ erts_lc_locked_locks_t *l_lcks;
+ erts_lc_locked_lock_t *l_lck;
+
+ if (lck->inited != ERTS_LC_INITITALIZED)
+ uninitialized_lock();
+
+ if (lck->id < 0)
+ return;
+
+ l_lcks = make_my_locked_locks();
+ l_lck = new_locked_lock(lck, op_flags);
+
+ if (!l_lcks->locked.last) {
+ ASSERT(!l_lcks->locked.first);
+ l_lcks->locked.last = l_lcks->locked.first = l_lck;
+ }
+ else if (l_lcks->locked.last->id < lck->id
+ || (l_lcks->locked.last->id == lck->id
+ && l_lcks->locked.last->extra < lck->extra)) {
+ if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
+ type_order_violation("locking ", l_lcks, lck);
+ l_lck->prev = l_lcks->locked.last;
+ l_lcks->locked.last->next = l_lck;
+ l_lcks->locked.last = l_lck;
+ }
+ else if (l_lcks->locked.last->id == lck->id && l_lcks->locked.last->extra == lck->extra)
+ lock_twice("Locking", l_lcks, lck, op_flags);
+ else
+ lock_order_violation(l_lcks, lck);
+}
+
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+{
+ erts_lc_locked_locks_t *l_lcks;
+ erts_lc_locked_lock_t *l_lck;
+
+ if (lck->inited != ERTS_LC_INITITALIZED)
+ uninitialized_lock();
+
+ if (lck->id < 0)
+ return;
+
+ l_lcks = get_my_locked_locks();
+
+ if (l_lcks) {
+ l_lck = l_lcks->required.first;
+ if (find_lock(&l_lck, lck))
+ unlock_of_required_lock(l_lcks, lck);
+ }
+
+ for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
+ if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
+ if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags)
+ unlock_op_mismatch(l_lcks, lck, op_flags);
+ if (l_lck->prev)
+ l_lck->prev->next = l_lck->next;
+ else
+ l_lcks->locked.first = l_lck->next;
+ if (l_lck->next)
+ l_lck->next->prev = l_lck->prev;
+ else
+ l_lcks->locked.last = l_lck->prev;
+ lc_free((void *) l_lck);
+ return;
+ }
+ }
+
+ unlock_of_not_locked(l_lcks, lck);
+}
+
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+{
+ erts_lc_locked_locks_t *l_lcks;
+ erts_lc_locked_lock_t *l_lck;
+
+ if (lck->inited != ERTS_LC_INITITALIZED)
+ uninitialized_lock();
+
+ if (lck->id < 0)
+ return;
+
+ l_lcks = get_my_locked_locks();
+
+ if (l_lcks) {
+ l_lck = l_lcks->required.first;
+ if (find_lock(&l_lck, lck))
+ unlock_of_required_lock(l_lcks, lck);
+ }
+
+ l_lck = l_lcks->locked.first;
+ if (!find_lock(&l_lck, lck))
+ unlock_of_not_locked(l_lcks, lck);
+}
+
+int
+erts_lc_trylock_force_busy(erts_lc_lock_t *lck)
+{
+ return erts_lc_trylock_force_busy_flg(lck, 0);
+}
+
+void
+erts_lc_trylock(int locked, erts_lc_lock_t *lck)
+{
+ erts_lc_trylock_flg(locked, lck, 0);
+}
+
+void
+erts_lc_lock(erts_lc_lock_t *lck)
+{
+ erts_lc_lock_flg(lck, 0);
+}
+
+void
+erts_lc_unlock(erts_lc_lock_t *lck)
+{
+ erts_lc_unlock_flg(lck, 0);
+}
+
+void erts_lc_might_unlock(erts_lc_lock_t *lck)
+{
+ erts_lc_might_unlock_flg(lck, 0);
+}
+
+void erts_lc_require_lock(erts_lc_lock_t *lck)
+{
+ erts_lc_require_lock_flg(lck, 0);
+}
+
+void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
+{
+ erts_lc_unrequire_lock_flg(lck, 0);
+}
+
+void
+erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
+{
+ lck->id = erts_lc_get_lock_order_id(name);
+ lck->extra = make_boxed(&lck->extra);
+ lck->flags = flags;
+ lck->inited = ERTS_LC_INITITALIZED;
+}
+
+void
+erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra)
+{
+ lck->id = erts_lc_get_lock_order_id(name);
+ lck->extra = extra;
+ lck->flags = flags;
+ lck->inited = ERTS_LC_INITITALIZED;
+}
+
+void
+erts_lc_destroy_lock(erts_lc_lock_t *lck)
+{
+ if (lck->inited != ERTS_LC_INITITALIZED)
+ uninitialized_lock();
+
+ lck->inited = 0;
+ lck->id = -1;
+ lck->extra = THE_NON_VALUE;
+ lck->flags = 0;
+}
+
+void
+erts_lc_init(void)
+{
+#ifdef ERTS_LC_STATIC_ALLOC
+ int i;
+ static erts_lc_free_block_t fbs[ERTS_LC_FB_CHUNK_SIZE];
+ for (i = 0; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
+#ifdef DEBUG
+ memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
+#endif
+ fbs[i].next = &fbs[i+1];
+ }
+#ifdef DEBUG
+ memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
+ 0xdf, sizeof(erts_lc_free_block_t));
+#endif
+ fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = NULL;
+ free_blocks = &fbs[0];
+#else /* #ifdef ERTS_LC_STATIC_ALLOC */
+ free_blocks = NULL;
+#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
+
+#ifdef ETHR_HAVE_NATIVE_LOCKS
+ if (ethr_spinlock_init(&free_blocks_lock) != 0)
+ abort();
+#else
+ if (ethr_mutex_init(&free_blocks_lock) != 0)
+ abort();
+#endif
+
+ erts_tsd_key_create(&locks_key);
+}
+
+void
+erts_lc_late_init(void)
+{
+ erts_thr_install_exit_handler(thread_exit_handler);
+}
+
+
+/*
+ * erts_lc_pll(): print locked locks...
+ */
+void
+erts_lc_pll(void)
+{
+ print_curr_locks(get_my_locked_locks());
+}
+
+
+#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
new file mode 100644
index 0000000000..d5e2ede9ac
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -0,0 +1,117 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A lock checker that checks that each thread acquires
+ * locks according to a predefined global lock order. The
+ * global lock order is used to prevent deadlocks. If the
+ * lock order is violated, an error message is printed
+ * and the emulator aborts. The lock checker is only
+ * intended to be enabled when debugging.
+ *
+ * Author: Rickard Green
+ */
+
+#include "sys.h"
+
+#ifndef ERTS_LOCK_CHECK_H__
+#define ERTS_LOCK_CHECK_H__
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+
+typedef struct {
+ int inited;
+ Sint16 id;
+ Uint16 flags;
+ Eterm extra;
+} erts_lc_lock_t;
+
+#define ERTS_LC_INITITALIZED 0x7f7f7f7f
+
+
+#define ERTS_LC_FLG_LT_SPINLOCK (((Uint16) 1) << 0)
+#define ERTS_LC_FLG_LT_RWSPINLOCK (((Uint16) 1) << 1)
+#define ERTS_LC_FLG_LT_MUTEX (((Uint16) 1) << 2)
+#define ERTS_LC_FLG_LT_RWMUTEX (((Uint16) 1) << 3)
+#define ERTS_LC_FLG_LT_PROCLOCK (((Uint16) 1) << 4)
+
+#define ERTS_LC_FLG_LO_READ (((Uint16) 1) << 5)
+#define ERTS_LC_FLG_LO_WRITE (((Uint16) 1) << 6)
+
+#define ERTS_LC_FLG_LO_READ_WRITE (ERTS_LC_FLG_LO_READ \
+ | ERTS_LC_FLG_LO_WRITE)
+
+#define ERTS_LC_FLG_LT_ALL (ERTS_LC_FLG_LT_SPINLOCK \
+ | ERTS_LC_FLG_LT_RWSPINLOCK \
+ | ERTS_LC_FLG_LT_MUTEX \
+ | ERTS_LC_FLG_LT_RWMUTEX \
+ | ERTS_LC_FLG_LT_PROCLOCK)
+
+#define ERTS_LC_FLG_LO_ALL (ERTS_LC_FLG_LO_READ \
+ | ERTS_LC_FLG_LO_WRITE)
+
+
+#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), (X)}
+
+void erts_lc_init(void);
+void erts_lc_late_init(void);
+Sint16 erts_lc_get_lock_order_id(char *name);
+void erts_lc_check(erts_lc_lock_t *have, int have_len,
+ erts_lc_lock_t *have_not, int have_not_len);
+void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
+void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
+void erts_lc_have_lock_ids(int *resv, int *ids, int len);
+int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_trylock_flg(int locked, erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+int erts_lc_trylock_force_busy(erts_lc_lock_t *lck);
+void erts_lc_trylock(int locked, erts_lc_lock_t *lck);
+void erts_lc_lock(erts_lc_lock_t *lck);
+void erts_lc_unlock(erts_lc_lock_t *lck);
+void erts_lc_might_unlock(erts_lc_lock_t *lck);
+void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags);
+void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra);
+void erts_lc_destroy_lock(erts_lc_lock_t *lck);
+void erts_lc_fail(char *fmt, ...);
+int erts_lc_assert_failed(char *file, int line, char *assertion);
+void erts_lc_set_thread_name(char *thread_name);
+void erts_lc_pll(void);
+
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+
+void erts_lc_require_lock(erts_lc_lock_t *lck);
+void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
+
+
+#define ERTS_LC_ASSERT(A) \
+ ((void) ((A) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
+#ifdef ERTS_SMP
+#define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A)
+#else
+#define ERTS_SMP_LC_ASSERT(A) ((void) 1)
+#endif
+#else /* #ifdef ERTS_ENABLE_LOCK_CHECK */
+#define ERTS_SMP_LC_ASSERT(A) ((void) 1)
+#define ERTS_LC_ASSERT(A) ((void) 1)
+#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
+
+#endif /* #ifndef ERTS_LOCK_CHECK_H__ */
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
new file mode 100644
index 0000000000..6211983f4b
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -0,0 +1,675 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Statistics for locks.
+ *
+ * Author: Bj�rn-Egil Dahlberg
+ * Date: 2008-07-03
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+/* Needed for VxWorks va_arg */
+#include "sys.h"
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+#include "erl_lock_count.h"
+#include "ethread.h"
+#include "erl_term.h"
+#include "atom.h"
+#include <stdio.h>
+
+/* globals, dont access these without locks or blocks */
+
+ethr_mutex lcnt_data_lock;
+erts_lcnt_data_t *erts_lcnt_data;
+Uint16 erts_lcnt_rt_options;
+erts_lcnt_time_t timer_start;
+const char *str_undefined = "undefined";
+
+static ethr_tsd_key lcnt_thr_data_key;
+static int lcnt_n_thr;
+static erts_lcnt_thread_data_t *lcnt_thread_data[1024];
+
+/* local functions */
+
+static ERTS_INLINE void lcnt_lock(void) {
+ ethr_mutex_lock(&lcnt_data_lock);
+}
+
+static ERTS_INLINE void lcnt_unlock(void) {
+ ethr_mutex_unlock(&lcnt_data_lock);
+}
+
+
+static char* lcnt_lock_type(Uint16 flag) {
+ switch(flag & ERTS_LCNT_LT_ALL) {
+ case ERTS_LCNT_LT_SPINLOCK: return "spinlock";
+ case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock";
+ case ERTS_LCNT_LT_MUTEX: return "mutex";
+ case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex";
+ case ERTS_LCNT_LT_PROCLOCK: return "proclock";
+ default: return "";
+ }
+}
+
+static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
+ ethr_atomic_set(&stats->tries, 0);
+ ethr_atomic_set(&stats->colls, 0);
+ stats->timer.s = 0;
+ stats->timer.ns = 0;
+ stats->timer_n = 0;
+ stats->file = (char *)str_undefined;
+ stats->line = 0;
+}
+
+static void lcnt_time(erts_lcnt_time_t *time) {
+#ifdef HAVE_GETHRTIME
+ SysHrTime hr_time;
+ hr_time = sys_gethrtime();
+ time->s = (unsigned long)(hr_time / 1000000000LL);
+ time->ns = (unsigned long)(hr_time - 1000000000LL*time->s);
+#else
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ time->s = tv.tv_sec;
+ time->ns = tv.tv_usec*1000LL;
+#endif
+}
+
+static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
+ long ds;
+ long dns;
+
+ ds = t1->s - t0->s;
+ dns = t1->ns - t0->ns;
+
+ /* the difference should not be able to get bigger than 1 sec in ns*/
+
+ if (dns < 0) {
+ ds -= 1;
+ dns += 1000000000LL;
+ }
+
+ d->s = ds;
+ d->ns = dns;
+}
+
+/* difference d must be positive */
+
+static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
+ unsigned long ngns = 0;
+
+ t->s += d->s;
+ t->ns += d->ns;
+
+ ngns = t->ns / 1000000000LL;
+ t->ns = t->ns % 1000000000LL;
+
+ t->s += ngns;
+}
+
+static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
+ erts_lcnt_thread_data_t *eltd;
+
+ eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
+ eltd->timer_set = 0;
+ eltd->lock_in_conflict = 0;
+
+ eltd->id = lcnt_n_thr++;
+ /* set thread data to array */
+ lcnt_thread_data[eltd->id] = eltd;
+
+ return eltd;
+}
+
+static erts_lcnt_thread_data_t *lcnt_get_thread_data(void) {
+ return (erts_lcnt_thread_data_t *)ethr_tsd_get(lcnt_thr_data_key);
+}
+
+
+/* debug */
+
+#if 0
+static char* lock_opt(Uint16 flag) {
+ if ((flag & ERTS_LCNT_LO_WRITE) && (flag & ERTS_LCNT_LO_READ)) return "rw";
+ if (flag & ERTS_LCNT_LO_READ ) return "r ";
+ if (flag & ERTS_LCNT_LO_WRITE) return " w";
+ return "--";
+}
+
+static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) {
+ long int colls, tries, w_state, r_state;
+ erts_lcnt_lock_stats_t *stats = NULL;
+
+ float rate;
+ char *type;
+ int i;
+
+ type = lcnt_lock_type(lock->flag);
+ ethr_atomic_read(&lock->r_state, &r_state);
+ ethr_atomic_read(&lock->w_state, &w_state);
+
+ if (tries > 0) rate = (float)(colls/(float)tries)*100;
+ else rate = 0.0f;
+
+ if (lock->flag & flag) {
+ erts_printf("%20s [%30s] [r/w state %4ld/%4ld] id %T %s\r\n",
+ action,
+ lock->name,
+ r_state,
+ w_state,
+ lock->id,
+ extra);
+
+ for(i = 0; i < lock->n_stats; i++) {
+ stats = &(lock->stats[i]);
+ ethr_atomic_read(&stats->tries, &tries);
+ ethr_atomic_read(&stats->colls, &colls);
+ fprintf(stderr, "%69s:%5d [tries %9ld] [colls %9ld] [timer_n %8ld] [timer %4ld s %6ld us]\r\n",
+ stats->file,
+ stats->line,
+ tries,
+ colls,
+ stats->timer_n,
+ stats->timer.s,
+ (unsigned long)stats->timer.ns/1000);
+ }
+ fprintf(stderr, "\r\n");
+ }
+}
+
+static void print_lock(erts_lcnt_lock_t *lock, char *action) {
+ print_lock_x(lock, ERTS_LCNT_LT_ALL, action, "");
+}
+
+#endif
+
+static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
+ unsigned int i;
+ erts_lcnt_lock_stats_t *stats = NULL;
+
+ for (i = 0; i < lock->n_stats; i++) {
+ if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
+ return &(lock->stats[i]);
+ }
+ }
+ if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ stats = &lock->stats[lock->n_stats];
+ lock->n_stats++;
+
+ stats->file = file;
+ stats->line = line;
+ return stats;
+ }
+ return &lock->stats[0];
+
+}
+
+static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_wait) {
+
+ ethr_atomic_inc(&stats->tries);
+
+ /* beware of trylock */
+ if (lock_in_conflict) ethr_atomic_inc(&stats->colls);
+
+ if (time_wait) {
+ lcnt_time_add(&(stats->timer), time_wait);
+ stats->timer_n++;
+ }
+}
+
+/*
+ * interface
+ */
+
+void erts_lcnt_init() {
+ erts_lcnt_thread_data_t *eltd = NULL;
+
+ /* init lock */
+ if (ethr_mutex_init(&lcnt_data_lock) != 0) abort();
+
+ /* init tsd */
+ lcnt_n_thr = 0;
+
+ ethr_tsd_key_create(&lcnt_thr_data_key);
+
+ lcnt_lock();
+
+ erts_lcnt_rt_options = ERTS_LCNT_OPT_PROCLOCK;
+
+ eltd = lcnt_thread_data_alloc();
+
+ ethr_tsd_set(lcnt_thr_data_key, eltd);
+
+ /* init lcnt structure */
+ erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
+ erts_lcnt_data->current_locks = erts_lcnt_list_init();
+ erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
+
+ lcnt_unlock();
+
+ /* set start timer and zero statistics */
+ erts_lcnt_clear_counters();
+}
+
+/* list operations */
+
+/* BEGIN ASSUMPTION: lcnt_data_lock taken */
+
+erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
+ erts_lcnt_lock_list_t *list;
+
+ list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
+ list->head = NULL;
+ list->tail = NULL;
+ list->n = 0;
+ return list;
+}
+
+/* only do this on the list with the deleted locks! */
+void erts_lcnt_list_clear(erts_lcnt_lock_list_t *list) {
+ erts_lcnt_lock_t *lock = NULL,
+ *next = NULL;
+
+ lock = list->head;
+
+ while(lock != NULL) {
+ next = lock->next;
+ free(lock);
+ lock = next;
+ }
+
+ list->head = NULL;
+ list->tail = NULL;
+ list->n = 0;
+}
+
+void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
+ erts_lcnt_lock_t *tail = NULL;
+
+ tail = list->tail;
+ if (tail) {
+ tail->next = lock;
+ lock->prev = tail;
+ } else {
+ list->head = lock;
+ lock->prev = NULL;
+ ASSERT(!lock->next);
+ }
+ lock->next = NULL;
+ list->tail = lock;
+
+ list->n++;
+}
+
+void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
+
+ if (lock->next) lock->next->prev = lock->prev;
+ if (lock->prev) lock->prev->next = lock->next;
+ if (list->head == lock) list->head = lock->next;
+ if (list->tail == lock) list->tail = lock->prev;
+
+ lock->prev = NULL;
+ lock->next = NULL;
+ list->n--;
+}
+/* END ASSUMPTION: lcnt_data_lock taken */
+
+
+/* lock operations */
+
+/* interface to erl_threads.h */
+/* only lock on init and destroy, all others should use atomics */
+void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
+ erts_lcnt_init_lock_x(lock, name, flag, am_undefined);
+}
+void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
+ int i;
+ lcnt_lock();
+
+ lock->next = NULL;
+ lock->prev = NULL;
+ lock->flag = flag;
+ lock->name = name;
+ lock->id = id;
+
+ ethr_atomic_init(&lock->r_state, 0);
+ ethr_atomic_init(&lock->w_state, 0);
+
+#ifdef DEBUG
+ ethr_atomic_init(&lock->flowstate, 0);
+#endif
+
+ lock->n_stats = 1;
+
+ for (i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
+ lcnt_clear_stats(&lock->stats[i]);
+ }
+ erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
+
+ lcnt_unlock();
+}
+
+void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
+ erts_lcnt_lock_t *deleted_lock;
+
+ /* copy structure and insert the copy */
+ deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
+
+ lcnt_lock();
+
+ memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
+ deleted_lock->next = NULL;
+ deleted_lock->prev = NULL;
+
+ erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock);
+
+ /* delete original */
+ erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock);
+
+ lcnt_unlock();
+}
+
+/* lock */
+
+void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
+ long r_state = 0, w_state = 0;
+ erts_lcnt_thread_data_t *eltd;
+
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+
+ eltd = lcnt_get_thread_data();
+
+ ASSERT(eltd);
+
+ ethr_atomic_read(&lock->w_state, &w_state);
+
+ if (option & ERTS_LCNT_LO_WRITE) {
+ ethr_atomic_read(&lock->r_state, &r_state);
+ ethr_atomic_inc( &lock->w_state);
+ }
+ if (option & ERTS_LCNT_LO_READ) {
+ ethr_atomic_inc( &lock->r_state);
+ }
+
+ /* we cannot acquire w_lock if either w or r are taken */
+ /* we cannot acquire r_lock if w_lock is taken */
+
+ if ((w_state > 0) || (r_state > 0)){
+ eltd->lock_in_conflict = 1;
+ if (eltd->timer_set == 0) lcnt_time(&eltd->timer);
+ eltd->timer_set++;
+ } else {
+ eltd->lock_in_conflict = 0;
+ }
+}
+
+void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
+ long w_state;
+ erts_lcnt_thread_data_t *eltd;
+
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+
+ ethr_atomic_read(&lock->w_state, &w_state);
+ ethr_atomic_inc( &lock->w_state);
+
+ eltd = lcnt_get_thread_data();
+
+ ASSERT(eltd);
+
+ if (w_state > 0) {
+ eltd->lock_in_conflict = 1;
+ /* only set the timer if nobody else has it
+ * This should only happen when proc_locks aquires several locks
+ * 'atomicly'. All other locks will block the thread if w_state > 0
+ * i.e. locked.
+ */
+ if (eltd->timer_set == 0) lcnt_time(&eltd->timer);
+ eltd->timer_set++;
+
+ } else {
+ eltd->lock_in_conflict = 0;
+ }
+}
+
+/* if a lock wasn't really a lock operation, bad bad process locks */
+
+void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
+ /* should check if this thread was "waiting" */
+
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+
+ ethr_atomic_dec( &lock->w_state);
+}
+
+/* erts_lcnt_lock_post
+ * used when we get a lock (i.e. directly after a lock operation)
+ * if the timer was set then we had to wait for the lock
+ * lock_post will calculate the wait time.
+ */
+void erts_lcnt_lock_post(erts_lcnt_lock_t *lock) {
+ erts_lcnt_lock_post_x(lock, (char*)str_undefined, 0);
+}
+
+void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
+ erts_lcnt_thread_data_t *eltd;
+ erts_lcnt_time_t timer;
+ erts_lcnt_time_t time_wait;
+ erts_lcnt_lock_stats_t *stats;
+#ifdef DEBUG
+ long flowstate;
+#endif
+
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+
+#ifdef DEBUG
+ if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
+ ethr_atomic_read(&lock->flowstate, &flowstate);
+ ASSERT(flowstate == 0);
+ ethr_atomic_inc( &lock->flowstate);
+ }
+#endif
+
+ eltd = lcnt_get_thread_data();
+
+ ASSERT(eltd);
+
+ /* if lock was in conflict, time it */
+
+ stats = lcnt_get_lock_stats(lock, file, line);
+
+ if (eltd->timer_set) {
+ lcnt_time(&timer);
+
+ eltd->timer_set--;
+
+ lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
+ lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
+
+ ASSERT(eltd->timer_set >= 0);
+ } else {
+ lcnt_update_stats(stats, eltd->lock_in_conflict, NULL);
+ }
+
+}
+
+/* unlock */
+
+void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state);
+ if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state);
+}
+
+void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
+#ifdef DEBUG
+ long w_state;
+ long flowstate;
+#endif
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+#ifdef DEBUG
+ /* flowstate */
+ ethr_atomic_read(&lock->flowstate, &flowstate);
+ ASSERT(flowstate == 1);
+ ethr_atomic_dec( &lock->flowstate);
+
+ /* write state */
+ ethr_atomic_read(&lock->w_state, &w_state);
+ ASSERT(w_state > 0)
+#endif
+ ethr_atomic_dec(&lock->w_state);
+}
+
+/* trylock */
+
+void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ /* Determine lock_state via res instead of state */
+ if (res != EBUSY) {
+ if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state);
+ if (option & ERTS_LCNT_LO_READ ) ethr_atomic_inc(&lock->r_state);
+ lcnt_update_stats(&(lock->stats[0]), 0, NULL);
+ } else {
+ ethr_atomic_inc(&lock->stats[0].tries);
+ ethr_atomic_inc(&lock->stats[0].colls);
+ }
+}
+
+
+void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
+ /* Determine lock_state via res instead of state */
+#ifdef DEBUG
+ long flowstate;
+#endif
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
+ if (res != EBUSY) {
+
+#ifdef DEBUG
+ ethr_atomic_read(&lock->flowstate, &flowstate);
+ ASSERT(flowstate == 0);
+ ethr_atomic_inc( &lock->flowstate);
+#endif
+ ethr_atomic_inc(&lock->w_state);
+
+ lcnt_update_stats(&(lock->stats[0]), 0, NULL);
+
+ } else {
+ ethr_atomic_inc(&lock->stats[0].tries);
+ ethr_atomic_inc(&lock->stats[0].colls);
+ }
+}
+
+/* thread operations */
+
+static void *lcnt_thr_init(erts_lcnt_thread_data_t *eltd) {
+ void *(*function)(void *);
+ void *argument;
+ void *res;
+ function = eltd->function;
+ argument = eltd->argument;
+
+ ethr_tsd_set(lcnt_thr_data_key, eltd);
+
+ res = (void *)function(argument);
+ free(eltd);
+ return (void *)res;
+}
+
+
+
+int erts_lcnt_thr_create(ethr_tid *tid, void * (*function)(void *), void *arg, ethr_thr_opts *opts) {
+ erts_lcnt_thread_data_t *eltd;
+
+ lcnt_lock();
+ /* lock for thread id global update */
+ eltd = lcnt_thread_data_alloc();
+ lcnt_unlock();
+
+ eltd->function = function;
+ eltd->argument = arg;
+
+ return ethr_thr_create(tid, (void *)lcnt_thr_init, (void *)eltd, opts);
+}
+
+
+/* bindings for bifs */
+
+Uint16 erts_lcnt_set_rt_opt(Uint16 opt) {
+ Uint16 prev;
+ prev = (erts_lcnt_rt_options & opt);
+ erts_lcnt_rt_options |= opt;
+ return prev;
+}
+
+Uint16 erts_lcnt_clear_rt_opt(Uint16 opt) {
+ Uint16 prev;
+ prev = (erts_lcnt_rt_options & opt);
+ erts_lcnt_rt_options &= ~opt;
+ return prev;
+}
+
+void erts_lcnt_clear_counters(void) {
+ erts_lcnt_lock_t *lock;
+ erts_lcnt_lock_list_t *list;
+ erts_lcnt_lock_stats_t *stats;
+ int i;
+
+ lcnt_lock();
+
+ list = erts_lcnt_data->current_locks;
+
+ for (lock = list->head; lock != NULL; lock = lock->next) {
+ for( i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
+ stats = &lock->stats[i];
+ lcnt_clear_stats(stats);
+ }
+ lock->n_stats = 1;
+ }
+
+ /* empty deleted locks in lock list */
+ erts_lcnt_list_clear(erts_lcnt_data->deleted_locks);
+
+ lcnt_time(&timer_start);
+
+ lcnt_unlock();
+}
+
+erts_lcnt_data_t *erts_lcnt_get_data(void) {
+ erts_lcnt_time_t timer_stop;
+
+ lcnt_lock();
+
+ lcnt_time(&timer_stop);
+ lcnt_time_diff(&(erts_lcnt_data->duration), &timer_stop, &timer_start);
+
+ lcnt_unlock();
+
+ return erts_lcnt_data;
+}
+
+char *erts_lcnt_lock_type(Uint16 type) {
+ return lcnt_lock_type(type);
+}
+
+#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
new file mode 100644
index 0000000000..8564c36203
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -0,0 +1,195 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Statistics for locks.
+ *
+ * Author: Bj�rn-Egil Dahlberg
+ * Date: 2008-07-03
+ * Abstract:
+ * Locks statistics internal representation.
+ *
+ * Conceptual representation,
+ * - set name
+ * | - id (the unique lock)
+ * | | - lock type
+ * | | - statistics
+ * | | | - location (file and line number)
+ * | | | - tries
+ * | | | - collisions (including trylock busy)
+ * | | | - timer (time spent in waiting for lock)
+ * | | | - n_timer (collisions excluding trylock busy)
+ *
+ * Each instance of a lock is the unique lock, i.e. set and id in that set.
+ * For each lock there is a set of statistics with where and what impact
+ * the lock aqusition had.
+ */
+
+#include "sys.h"
+
+#ifndef ERTS_LOCK_COUNT_H__
+#define ERTS_LOCK_COUNT_H__
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "ethread.h"
+
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+
+#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
+#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
+#define ERTS_LCNT_LT_MUTEX (((Uint16) 1) << 2)
+#define ERTS_LCNT_LT_RWMUTEX (((Uint16) 1) << 3)
+#define ERTS_LCNT_LT_PROCLOCK (((Uint16) 1) << 4)
+#define ERTS_LCNT_LT_ALLOC (((Uint16) 1) << 5)
+
+#define ERTS_LCNT_LO_READ (((Uint16) 1) << 6)
+#define ERTS_LCNT_LO_WRITE (((Uint16) 1) << 7)
+
+#define ERTS_LCNT_LO_READ_WRITE ( ERTS_LCNT_LO_READ \
+ | ERTS_LCNT_LO_WRITE )
+
+#define ERTS_LCNT_LT_ALL ( ERTS_LCNT_LT_SPINLOCK \
+ | ERTS_LCNT_LT_RWSPINLOCK \
+ | ERTS_LCNT_LT_MUTEX \
+ | ERTS_LCNT_LT_RWMUTEX \
+ | ERTS_LCNT_LT_PROCLOCK )
+/* runtime options */
+
+#define ERTS_LCNT_OPT_SUSPEND (((Uint16) 1) << 0)
+#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1)
+#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2)
+
+typedef struct {
+ unsigned long s;
+ unsigned long ns;
+} erts_lcnt_time_t;
+
+extern erts_lcnt_time_t timer_start;
+
+typedef struct erts_lcnt_lock_stats_s {
+ /* "tries" and "colls" needs to be atomic since
+ * trylock busy does not aquire a lock and there
+ * is no post action to rectify the situation
+ */
+
+ char *file; /* which file the lock was taken */
+ unsigned int line; /* line number in file */
+
+ ethr_atomic_t tries; /* n tries to get lock */
+ ethr_atomic_t colls; /* n collisions of tries to get lock */
+
+ unsigned long timer_n; /* #times waited for lock */
+ erts_lcnt_time_t timer; /* total wait time for lock */
+} erts_lcnt_lock_stats_t;
+
+/* rw locks uses both states, other locks only uses w_state */
+typedef struct erts_lcnt_lock_s {
+ char *name; /* lock name */
+ Uint16 flag; /* lock type */
+ Eterm id; /* id if possible */
+
+#ifdef DEBUG
+ ethr_atomic_t flowstate;
+#endif
+
+ /* lock states */
+ ethr_atomic_t w_state; /* 0 not taken, otherwise n threads waiting */
+ ethr_atomic_t r_state; /* 0 not taken, > 0 -> writes will wait */
+
+ /* statistics */
+ unsigned int n_stats;
+ erts_lcnt_lock_stats_t stats[ERTS_LCNT_MAX_LOCK_LOCATIONS]; /* first entry is "undefined"*/
+
+ /* chains for list handling */
+ /* data is hold by lcnt_lock */
+ struct erts_lcnt_lock_s *prev;
+ struct erts_lcnt_lock_s *next;
+} erts_lcnt_lock_t;
+
+typedef struct {
+ erts_lcnt_lock_t *head;
+ erts_lcnt_lock_t *tail;
+ unsigned long n;
+} erts_lcnt_lock_list_t;
+
+typedef struct {
+ erts_lcnt_time_t duration; /* time since last clear */
+ erts_lcnt_lock_list_t *current_locks;
+ erts_lcnt_lock_list_t *deleted_locks;
+} erts_lcnt_data_t;
+
+typedef struct {
+ int id;
+
+ erts_lcnt_time_t timer; /* timer */
+ int timer_set; /* bool */
+ int lock_in_conflict; /* bool */
+
+ /* function pointer */
+ void *(*function)(void *);
+ void *argument;
+
+} erts_lcnt_thread_data_t;
+
+/* globals */
+
+extern Uint16 erts_lcnt_rt_options;
+
+/* function declerations */
+
+void erts_lcnt_init(void);
+
+/* list operations (local) */
+erts_lcnt_lock_list_t *erts_lcnt_list_init(void);
+
+void erts_lcnt_list_clear( erts_lcnt_lock_list_t *list);
+void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
+void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
+
+/* lock operations (global) */
+void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag);
+void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id);
+void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock);
+
+void erts_lcnt_lock(erts_lcnt_lock_t *lock);
+void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option);
+void erts_lcnt_lock_post(erts_lcnt_lock_t *lock);
+void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line);
+void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock);
+
+void erts_lcnt_unlock(erts_lcnt_lock_t *lock);
+void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option);
+
+void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option);
+void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res);
+
+/* thread operations */
+
+int erts_lcnt_thr_create(ethr_tid *tid, void * (*function)(void *), void *arg, ethr_thr_opts *opts);
+
+/* bif interface */
+
+Uint16 erts_lcnt_set_rt_opt(Uint16 opt);
+Uint16 erts_lcnt_clear_rt_opt(Uint16 opt);
+void erts_lcnt_clear_counters(void);
+char *erts_lcnt_lock_type(Uint16 type);
+erts_lcnt_data_t *erts_lcnt_get_data(void);
+
+#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
+#endif /* ifndef ERTS_LOCK_COUNT_H__ */
diff --git a/erts/emulator/beam/erl_math.c b/erts/emulator/beam/erl_math.c
new file mode 100644
index 0000000000..16d4fdc09c
--- /dev/null
+++ b/erts/emulator/beam/erl_math.c
@@ -0,0 +1,233 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "big.h"
+
+static Eterm
+math_call_1(Process* p, double (*func)(double), Eterm arg1)
+{
+ FloatDef a1;
+ Eterm res;
+ Eterm* hp;
+
+ ERTS_FP_CHECK_INIT(p);
+ if (is_float(arg1)) {
+ GET_DOUBLE(arg1, a1);
+ } else if (is_small(arg1)) {
+ a1.fd = signed_val(arg1);
+ } else if (is_big(arg1)) {
+ if (big_to_double(arg1, &a1.fd) < 0) {
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ } else {
+ p->freason = BADARG;
+ return THE_NON_VALUE;
+ }
+ a1.fd = (*func)(a1.fd);
+ ERTS_FP_ERROR_THOROUGH(p, a1.fd, goto badarith);
+ hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ res = make_float(hp);
+ PUT_DOUBLE(a1, hp);
+ return res;
+}
+
+
+static Eterm
+math_call_2(Process* p, double (*func)(double, double), Eterm arg1, Eterm arg2)
+{
+ FloatDef a1;
+ FloatDef a2;
+ Eterm res;
+ Eterm* hp;
+
+ ERTS_FP_CHECK_INIT(p);
+ if (is_float(arg1)) {
+ GET_DOUBLE(arg1, a1);
+ } else if (is_small(arg1)) {
+ a1.fd = signed_val(arg1);
+ } else if (is_big(arg1)) {
+ if (big_to_double(arg1, &a1.fd) < 0) {
+ badarith:
+ p->freason = BADARITH;
+ return THE_NON_VALUE;
+ }
+ } else {
+ p->freason = BADARG;
+ return THE_NON_VALUE;
+ }
+
+ if (is_float(arg2)) {
+ GET_DOUBLE(arg2, a2);
+ } else if (is_small(arg2)) {
+ a2.fd = signed_val(arg2);
+ } else if (is_big(arg2)) {
+ if (big_to_double(arg2, &a2.fd) < 0) {
+ goto badarith;
+ }
+ } else {
+ p->freason = BADARG;
+ return THE_NON_VALUE;
+ }
+
+ a1.fd = (*func)(a1.fd, a2.fd);
+ ERTS_FP_ERROR_THOROUGH(p, a1.fd, goto badarith);
+ hp = HAlloc(p, FLOAT_SIZE_OBJECT);
+ res = make_float(hp);
+ PUT_DOUBLE(a1, hp);
+ return res;
+}
+
+BIF_RETTYPE math_cos_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, cos, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_cosh_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, cosh, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_sin_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, sin, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_sinh_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, sinh, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_tan_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, tan, BIF_ARG_1);
+}
+
+
+BIF_RETTYPE math_tanh_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, tanh, BIF_ARG_1);
+}
+
+
+BIF_RETTYPE math_acos_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, acos, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_acosh_1(BIF_ALIST_1)
+{
+#ifdef NO_ACOSH
+ BIF_ERROR(BIF_P, EXC_UNDEF);
+#else
+ return math_call_1(BIF_P, acosh, BIF_ARG_1);
+#endif
+}
+
+BIF_RETTYPE math_asin_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, asin, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_asinh_1(BIF_ALIST_1)
+{
+#ifdef NO_ASINH
+ BIF_ERROR(BIF_P, EXC_UNDEF);
+#else
+ return math_call_1(BIF_P, asinh, BIF_ARG_1);
+#endif
+}
+
+BIF_RETTYPE math_atan_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, atan, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_atanh_1(BIF_ALIST_1)
+{
+#ifdef NO_ATANH
+ BIF_ERROR(BIF_P, EXC_UNDEF);
+#else
+ return math_call_1(BIF_P, atanh, BIF_ARG_1);
+#endif
+}
+
+BIF_RETTYPE math_erf_1(BIF_ALIST_1)
+{
+#ifdef NO_ERF
+ BIF_ERROR(BIF_P, EXC_UNDEF);
+#else
+ return math_call_1(BIF_P, erf, BIF_ARG_1);
+#endif
+}
+
+BIF_RETTYPE math_erfc_1(BIF_ALIST_1)
+{
+#ifdef NO_ERFC
+ BIF_ERROR(BIF_P, EXC_UNDEF);
+#else
+ return math_call_1(BIF_P, erfc, BIF_ARG_1);
+#endif
+}
+
+BIF_RETTYPE math_exp_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, exp, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_log_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, log, BIF_ARG_1);
+}
+
+
+BIF_RETTYPE math_log10_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, log10, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_sqrt_1(BIF_ALIST_1)
+{
+ return math_call_1(BIF_P, sqrt, BIF_ARG_1);
+}
+
+BIF_RETTYPE math_atan2_2(BIF_ALIST_2)
+{
+ return math_call_2(BIF_P, atan2, BIF_ARG_1, BIF_ARG_2);
+}
+
+BIF_RETTYPE math_pow_2(BIF_ALIST_2)
+{
+ return math_call_2(BIF_P, pow, BIF_ARG_1, BIF_ARG_2);
+}
+
+
+
+
diff --git a/erts/emulator/beam/erl_md5.c b/erts/emulator/beam/erl_md5.c
new file mode 100644
index 0000000000..8d0352a367
--- /dev/null
+++ b/erts/emulator/beam/erl_md5.c
@@ -0,0 +1,340 @@
+/*
+ * MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm
+ */
+
+/* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
+ * rights reserved.
+ *
+ * License to copy and use this software is granted provided that it
+ * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
+ * Algorithm" in all material mentioning or referencing this software
+ * or this function.
+ *
+ * License is also granted to make and use derivative works provided
+ * that such works are identified as "derived from the RSA Data
+ * Security, Inc. MD5 Message-Digest Algorithm" in all material
+ * mentioning or referencing the derived work.
+ *
+ * RSA Data Security, Inc. makes no representations concerning either
+ * the merchantability of this software or the suitability of this
+ * software for any particular purpose. It is provided "as is"
+ * without express or implied warranty of any kind.
+ *
+ * These notices must be retained in any copies of any part of this
+ * documentation and/or software.
+ */
+
+/* %ExternalCopyright% */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "erl_binary.h"
+
+typedef void *POINTER;
+
+/*
+ * Constants for MD5Transform routine.
+ */
+
+#define S11 7
+#define S12 12
+#define S13 17
+#define S14 22
+#define S21 5
+#define S22 9
+#define S23 14
+#define S24 20
+#define S31 4
+#define S32 11
+#define S33 16
+#define S34 23
+#define S41 6
+#define S42 10
+#define S43 15
+#define S44 21
+
+static void MD5Transform(Uint32 [4], unsigned char [64]);
+static void Encode(unsigned char *, Uint32 *, unsigned int);
+static void Decode(Uint32 *, unsigned char *, unsigned int);
+
+static unsigned char PADDING[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/*
+ * F, G, H and I are basic MD5 functions.
+ */
+#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
+#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define I(x, y, z) ((y) ^ ((x) | (~z)))
+
+/*
+ * ROTATE_LEFT rotates x left n bits.
+ */
+#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
+
+/*
+ * FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
+ * Rotation is separate from addition to prevent recomputation.
+ */
+#define FF(a, b, c, d, x, s, ac) { \
+ (a) += F ((b), (c), (d)) + (x) + (Uint32)(ac); \
+ (a) = ROTATE_LEFT ((a), (s)); \
+ (a) += (b); \
+}
+#define GG(a, b, c, d, x, s, ac) { \
+ (a) += G ((b), (c), (d)) + (x) + (Uint32)(ac); \
+ (a) = ROTATE_LEFT ((a), (s)); \
+ (a) += (b); \
+}
+#define HH(a, b, c, d, x, s, ac) { \
+ (a) += H ((b), (c), (d)) + (x) + (Uint32)(ac); \
+ (a) = ROTATE_LEFT ((a), (s)); \
+ (a) += (b); \
+}
+#define II(a, b, c, d, x, s, ac) { \
+ (a) += I ((b), (c), (d)) + (x) + (Uint32)(ac); \
+ (a) = ROTATE_LEFT ((a), (s)); \
+ (a) += (b); \
+}
+
+/*
+ * MD5 initialization. Begins an MD5 operation, writing a new context.
+ */
+void MD5Init(MD5_CTX* context)
+{
+ context->count[0] = context->count[1] = 0;
+
+ /*
+ * Load magic initialization constants.
+ */
+ context->state[0] = 0x67452301;
+ context->state[1] = 0xefcdab89;
+ context->state[2] = 0x98badcfe;
+ context->state[3] = 0x10325476;
+}
+
+/*
+ * MD5 block update operation. Continues an MD5 message-digest
+ * operation, processing another message block, and updating the
+ * context.
+ */
+void MD5Update (context, input, inputLen)
+ MD5_CTX *context; /* context */
+ unsigned char *input; /* input block */
+ unsigned int inputLen; /* length of input block */
+{
+ unsigned int i, index, partLen;
+
+ /*
+ * Compute number of bytes mod 64
+ */
+ index = (unsigned int)((context->count[0] >> 3) & 0x3F);
+
+ /* Update number of bits */
+ if ((context->count[0] += ((Uint32)inputLen << 3))
+ < ((Uint32)inputLen << 3))
+ context->count[1]++;
+ context->count[1] += ((Uint32)inputLen >> 29);
+
+ partLen = 64 - index;
+
+ /*
+ * Transform as many times as possible.
+ */
+ if (inputLen >= partLen) {
+ sys_memcpy
+ ((POINTER)&context->buffer[index], (POINTER)input, partLen);
+ MD5Transform (context->state, context->buffer);
+
+ for (i = partLen; i + 63 < inputLen; i += 64)
+ MD5Transform (context->state, &input[i]);
+
+ index = 0;
+ }
+ else
+ i = 0;
+
+ /*
+ * Buffer remaining input
+ */
+ sys_memcpy((POINTER)&context->buffer[index], (POINTER)&input[i], inputLen-i);
+}
+
+/*
+ * MD5 finalization. Ends an MD5 message-digest operation, writing the
+ the message digest and zeroizing the context.
+ */
+void MD5Final (digest, context)
+ unsigned char digest[16]; /* message digest */
+ MD5_CTX *context; /* context */
+{
+ unsigned char bits[8];
+ unsigned int index, padLen;
+
+ /*
+ * Save number of bits
+ */
+ Encode (bits, context->count, 8);
+
+ /*
+ * Pad out to 56 mod 64.
+ */
+ index = (unsigned int)((context->count[0] >> 3) & 0x3f);
+ padLen = (index < 56) ? (56 - index) : (120 - index);
+ MD5Update (context, PADDING, padLen);
+
+ /*
+ * Append length (before padding)
+ */
+ MD5Update (context, bits, 8);
+
+ /*
+ * Store state in digest
+ */
+ Encode (digest, context->state, 16);
+
+ /*
+ * Zeroize sensitive information.
+ */
+ sys_memset ((POINTER)context, 0, sizeof (*context));
+}
+
+/*
+ * MD5 basic transformation. Transforms state based on block.
+ */
+static void MD5Transform (state, block)
+ Uint32 state[4];
+ unsigned char block[64];
+{
+ Uint32 a = state[0], b = state[1], c = state[2], d = state[3], x[16];
+
+ Decode (x, block, 64);
+
+ /* Round 1 */
+ FF (a, b, c, d, x[ 0], S11, 0xd76aa478); /* 1 */
+ FF (d, a, b, c, x[ 1], S12, 0xe8c7b756); /* 2 */
+ FF (c, d, a, b, x[ 2], S13, 0x242070db); /* 3 */
+ FF (b, c, d, a, x[ 3], S14, 0xc1bdceee); /* 4 */
+ FF (a, b, c, d, x[ 4], S11, 0xf57c0faf); /* 5 */
+ FF (d, a, b, c, x[ 5], S12, 0x4787c62a); /* 6 */
+ FF (c, d, a, b, x[ 6], S13, 0xa8304613); /* 7 */
+ FF (b, c, d, a, x[ 7], S14, 0xfd469501); /* 8 */
+ FF (a, b, c, d, x[ 8], S11, 0x698098d8); /* 9 */
+ FF (d, a, b, c, x[ 9], S12, 0x8b44f7af); /* 10 */
+ FF (c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */
+ FF (b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */
+ FF (a, b, c, d, x[12], S11, 0x6b901122); /* 13 */
+ FF (d, a, b, c, x[13], S12, 0xfd987193); /* 14 */
+ FF (c, d, a, b, x[14], S13, 0xa679438e); /* 15 */
+ FF (b, c, d, a, x[15], S14, 0x49b40821); /* 16 */
+
+ /* Round 2 */
+ GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */
+ GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */
+ GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */
+ GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */
+ GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */
+ GG (d, a, b, c, x[10], S22, 0x2441453); /* 22 */
+ GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */
+ GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */
+ GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */
+ GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */
+ GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */
+ GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */
+ GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */
+ GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */
+ GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */
+ GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */
+
+ /* Round 3 */
+ HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */
+ HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */
+ HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */
+ HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */
+ HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */
+ HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */
+ HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */
+ HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */
+ HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */
+ HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */
+ HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */
+ HH (b, c, d, a, x[ 6], S34, 0x4881d05); /* 44 */
+ HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */
+ HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */
+ HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */
+ HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */
+
+ /* Round 4 */
+ II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */
+ II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */
+ II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */
+ II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */
+ II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */
+ II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */
+ II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */
+ II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */
+ II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */
+ II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */
+ II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */
+ II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */
+ II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */
+ II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */
+ II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */
+ II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */
+
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+
+ /*
+ * Zeroize sensitive information.
+ */
+ sys_memset ((POINTER)x, 0, sizeof (x));
+}
+
+/*
+ * Encodes input (Uint32) into output (unsigned char). Assumes len is
+ * a multiple of 4.
+ */
+static void Encode (output, input, len)
+ unsigned char *output;
+ Uint32 *input;
+ unsigned int len;
+{
+ unsigned int i, j;
+
+ for (i = 0, j = 0; j < len; i++, j += 4) {
+ output[j] = (unsigned char)(input[i] & 0xff);
+ output[j+1] = (unsigned char)((input[i] >> 8) & 0xff);
+ output[j+2] = (unsigned char)((input[i] >> 16) & 0xff);
+ output[j+3] = (unsigned char)((input[i] >> 24) & 0xff);
+ }
+}
+
+/*
+ * Decodes input (unsigned char) into output (Uint32). Assumes len is
+ * a multiple of 4.
+ */
+static void Decode (output, input, len)
+ Uint32 *output;
+ unsigned char *input;
+ unsigned int len;
+{
+ unsigned int i, j;
+
+ for (i = 0, j = 0; j < len; i++, j += 4)
+ output[i] = ((Uint32)input[j]) | (((Uint32)input[j+1]) << 8) |
+ (((Uint32)input[j+2]) << 16) | (((Uint32)input[j+3]) << 24);
+}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
new file mode 100644
index 0000000000..81fbdfbd5a
--- /dev/null
+++ b/erts/emulator/beam/erl_message.c
@@ -0,0 +1,1070 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * Message passing primitives.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_message.h"
+#include "erl_process.h"
+#include "erl_nmgc.h"
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message,
+ ErlMessage,
+ ERL_MESSAGE_BUF_SZ,
+ ERTS_ALC_T_MSG_REF)
+
+#if defined(DEBUG) && 0
+#define HARD_DEBUG
+#else
+#undef HARD_DEBUG
+#endif
+
+void
+init_message(void)
+{
+ init_message_alloc();
+}
+
+void
+free_message(ErlMessage* mp)
+{
+ message_free(mp);
+}
+
+/* Allocate message buffer (size in words) */
+ErlHeapFragment*
+new_message_buffer(Uint size)
+{
+ ErlHeapFragment* bp;
+ bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP_FRAG,
+ ERTS_HEAP_FRAG_SIZE(size));
+ ERTS_INIT_HEAP_FRAG(bp, size);
+ return bp;
+}
+
+ErlHeapFragment*
+erts_resize_message_buffer(ErlHeapFragment *bp, Uint size,
+ Eterm *brefs, Uint brefs_size)
+{
+#ifdef DEBUG
+ int i;
+#endif
+#ifdef HARD_DEBUG
+ ErlHeapFragment *dbg_bp;
+ Eterm *dbg_brefs;
+ Uint dbg_size;
+ Uint dbg_tot_size;
+ Eterm *dbg_hp;
+#endif
+ ErlHeapFragment* nbp;
+
+#ifdef DEBUG
+ {
+ Uint off_sz = size < bp->size ? size : bp->size;
+ for (i = 0; i < brefs_size; i++) {
+ Eterm *ptr;
+ if (is_immed(brefs[i]))
+ continue;
+ ptr = ptr_val(brefs[i]);
+ ASSERT(&bp->mem[0] <= ptr && ptr < &bp->mem[0] + off_sz);
+
+ }
+ }
+#endif
+
+ if (size == bp->size)
+ return bp;
+
+#ifdef HARD_DEBUG
+ dbg_brefs = erts_alloc(ERTS_ALC_T_UNDEF, sizeof(Eterm *)*brefs_size);
+ dbg_bp = new_message_buffer(bp->size);
+ dbg_hp = dbg_bp->mem;
+ dbg_tot_size = 0;
+ for (i = 0; i < brefs_size; i++) {
+ dbg_size = size_object(brefs[i]);
+ dbg_tot_size += dbg_size;
+ dbg_brefs[i] = copy_struct(brefs[i], dbg_size, &dbg_hp,
+ &dbg_bp->off_heap);
+ }
+ ASSERT(dbg_tot_size == (size < bp->size ? size : bp->size));
+#endif
+
+ nbp = (ErlHeapFragment*) ERTS_HEAP_REALLOC(ERTS_ALC_T_HEAP_FRAG,
+ (void *) bp,
+ (sizeof(ErlHeapFragment)
+ - sizeof(Eterm)
+ + bp->size*sizeof(Eterm)),
+ (sizeof(ErlHeapFragment)
+ - sizeof(Eterm)
+ + size*sizeof(Eterm)));
+ if (bp != nbp) {
+ Uint off_sz = size < nbp->size ? size : nbp->size;
+ Eterm *sp = &bp->mem[0];
+ Eterm *ep = sp + off_sz;
+ Sint offs = &nbp->mem[0] - sp;
+ erts_offset_off_heap(&nbp->off_heap, offs, sp, ep);
+ erts_offset_heap(&nbp->mem[0], off_sz, offs, sp, ep);
+ if (brefs && brefs_size)
+ erts_offset_heap_ptr(brefs, brefs_size, offs, sp, ep);
+#ifdef DEBUG
+ for (i = 0; i < brefs_size; i++) {
+ Eterm *ptr;
+ if (is_immed(brefs[i]))
+ continue;
+ ptr = ptr_val(brefs[i]);
+ ASSERT(&nbp->mem[0] <= ptr && ptr < &nbp->mem[0] + off_sz);
+ }
+#endif
+ }
+ nbp->size = size;
+
+
+#ifdef HARD_DEBUG
+ for (i = 0; i < brefs_size; i++)
+ ASSERT(eq(dbg_brefs[i], brefs[i]));
+ free_message_buffer(dbg_bp);
+ erts_free(ERTS_ALC_T_UNDEF, dbg_brefs);
+#endif
+
+ return nbp;
+}
+
+
+void
+erts_cleanup_offheap(ErlOffHeap *offheap)
+{
+ if (offheap->mso) {
+ erts_cleanup_mso(offheap->mso);
+ }
+#ifndef HYBRID /* FIND ME! */
+ if (offheap->funs) {
+ erts_cleanup_funs(offheap->funs);
+ }
+#endif
+ if (offheap->externals) {
+ erts_cleanup_externals(offheap->externals);
+ }
+}
+
+void
+free_message_buffer(ErlHeapFragment* bp)
+{
+ erts_cleanup_offheap(&bp->off_heap);
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG,
+ (void *) bp,
+ (sizeof(ErlHeapFragment)
+ - sizeof(Eterm)
+ + bp->size*sizeof(Eterm)));
+}
+
+static ERTS_INLINE void
+link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp)
+{
+ if (bp) {
+ /* Link the message buffer */
+ bp->next = MBUF(proc);
+ MBUF(proc) = bp;
+ MBUF_SIZE(proc) += bp->size;
+ FLAGS(proc) |= F_FORCE_GC;
+
+ /* Move any binaries into the process */
+ if (bp->off_heap.mso != NULL) {
+ ProcBin** next_p = &bp->off_heap.mso;
+ while (*next_p != NULL) {
+ next_p = &((*next_p)->next);
+ }
+ *next_p = MSO(proc).mso;
+ MSO(proc).mso = bp->off_heap.mso;
+ bp->off_heap.mso = NULL;
+ MSO(proc).overhead += bp->off_heap.overhead;
+ }
+
+ /* Move any funs into the process */
+#ifndef HYBRID
+ if (bp->off_heap.funs != NULL) {
+ ErlFunThing** next_p = &bp->off_heap.funs;
+ while (*next_p != NULL) {
+ next_p = &((*next_p)->next);
+ }
+ *next_p = MSO(proc).funs;
+ MSO(proc).funs = bp->off_heap.funs;
+ bp->off_heap.funs = NULL;
+ }
+#endif
+
+ /* Move any external things into the process */
+ if (bp->off_heap.externals != NULL) {
+ ExternalThing** next_p = &bp->off_heap.externals;
+ while (*next_p != NULL) {
+ next_p = &((*next_p)->next);
+ }
+ *next_p = MSO(proc).externals;
+ MSO(proc).externals = bp->off_heap.externals;
+ bp->off_heap.externals = NULL;
+ }
+ }
+}
+
+Eterm
+erts_msg_distext2heap(Process *pp,
+ ErtsProcLocks *plcksp,
+ ErlHeapFragment **bpp,
+ Eterm *tokenp,
+ ErtsDistExternal *dist_extp)
+{
+ Eterm msg;
+ Uint tok_sz = 0;
+ Eterm *hp = NULL;
+ Eterm *hp_end = NULL;
+ ErlOffHeap *ohp;
+ Sint sz;
+
+ *bpp = NULL;
+ sz = erts_decode_dist_ext_size(dist_extp, 0);
+ if (sz < 0)
+ goto decode_error;
+ if (is_not_nil(*tokenp)) {
+ ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
+ tok_sz = heap_frag->size;
+ sz += tok_sz;
+ }
+ if (pp)
+ hp = erts_alloc_message_heap(sz, bpp, &ohp, pp, plcksp);
+ else {
+ *bpp = new_message_buffer(sz);
+ hp = (*bpp)->mem;
+ ohp = &(*bpp)->off_heap;
+ }
+ hp_end = hp + sz;
+ msg = erts_decode_dist_ext(&hp, ohp, dist_extp);
+ if (is_non_value(msg))
+ goto decode_error;
+ if (is_not_nil(*tokenp)) {
+ ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
+ *tokenp = copy_struct(*tokenp, tok_sz, &hp, ohp);
+ erts_cleanup_offheap(&heap_frag->off_heap);
+ }
+ erts_free_dist_ext_copy(dist_extp);
+ if (hp_end != hp) {
+ if (!(*bpp)) {
+ HRelease(pp, hp_end, hp);
+ }
+ else {
+ Uint final_size = hp - &(*bpp)->mem[0];
+ Eterm brefs[2] = {msg, *tokenp};
+ ASSERT(sz - (hp_end - hp) == final_size);
+ *bpp = erts_resize_message_buffer(*bpp, final_size, &brefs[0], 2);
+ msg = brefs[0];
+ *tokenp = brefs[1];
+ }
+ }
+ return msg;
+
+ decode_error:
+ if (is_not_nil(*tokenp)) {
+ ErlHeapFragment *heap_frag = erts_dist_ext_trailer(dist_extp);
+ erts_cleanup_offheap(&heap_frag->off_heap);
+ }
+ erts_free_dist_ext_copy(dist_extp);
+ if (*bpp)
+ free_message_buffer(*bpp);
+ else if (hp) {
+ HRelease(pp, hp_end, hp);
+ }
+ *bpp = NULL;
+ return THE_NON_VALUE;
+ }
+
+static ERTS_INLINE void
+notify_new_message(Process *receiver)
+{
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
+ & erts_proc_lc_my_proc_locks(receiver));
+
+ ACTIVATE(receiver);
+
+ switch (receiver->status) {
+ case P_GARBING:
+ switch (receiver->gcstatus) {
+ case P_SUSPENDED:
+ goto suspended;
+ case P_WAITING:
+ goto waiting;
+ default:
+ break;
+ }
+ break;
+ case P_SUSPENDED:
+ suspended:
+ receiver->rstatus = P_RUNABLE;
+ break;
+ case P_WAITING:
+ waiting:
+ erts_add_to_runq(receiver);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+erts_queue_dist_message(Process *rcvr,
+ ErtsProcLocks *rcvr_locks,
+ ErtsDistExternal *dist_ext,
+ Eterm token)
+{
+ ErlMessage* mp;
+#ifdef ERTS_SMP
+ ErtsProcLocks need_locks;
+#endif
+
+ ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
+
+ mp = message_alloc();
+
+#ifdef ERTS_SMP
+ need_locks = ~(*rcvr_locks) & (ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
+ if (need_locks) {
+ *rcvr_locks |= need_locks;
+ if (erts_smp_proc_trylock(rcvr, need_locks) == EBUSY) {
+ if (need_locks == ERTS_PROC_LOCK_MSGQ) {
+ erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS);
+ need_locks = (ERTS_PROC_LOCK_MSGQ
+ | ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_lock(rcvr, need_locks);
+ }
+ }
+
+ if (rcvr->is_exiting || ERTS_PROC_PENDING_EXIT(rcvr)) {
+ /* Drop message if receiver is exiting or has a pending exit ... */
+ if (is_not_nil(token)) {
+ ErlHeapFragment *heap_frag;
+ heap_frag = erts_dist_ext_trailer(mp->data.dist_ext);
+ erts_cleanup_offheap(&heap_frag->off_heap);
+ }
+ erts_free_dist_ext_copy(dist_ext);
+ message_free(mp);
+ }
+ else
+#endif
+ if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) {
+ /* Ahh... need to decode it in order to trace it... */
+ ErlHeapFragment *mbuf;
+ Eterm msg;
+ message_free(mp);
+ msg = erts_msg_distext2heap(rcvr, rcvr_locks, &mbuf, &token, dist_ext);
+ if (is_value(msg))
+ erts_queue_message(rcvr, rcvr_locks, mbuf, msg, token);
+ }
+ else {
+ /* Enqueue message on external format */
+
+ ERL_MESSAGE_TERM(mp) = THE_NON_VALUE;
+ ERL_MESSAGE_TOKEN(mp) = token;
+ mp->next = NULL;
+
+ mp->data.dist_ext = dist_ext;
+ LINK_MESSAGE(rcvr, mp);
+
+ notify_new_message(rcvr);
+ }
+}
+
+/* Add a message last in message queue */
+void
+erts_queue_message(Process* receiver,
+ ErtsProcLocks *receiver_locks,
+ ErlHeapFragment* bp,
+ Eterm message,
+ Eterm seq_trace_token)
+{
+ ErlMessage* mp;
+#ifdef ERTS_SMP
+ ErtsProcLocks need_locks;
+#else
+ ASSERT(bp != NULL || receiver->mbuf == NULL);
+#endif
+
+ ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+
+ mp = message_alloc();
+
+#ifdef ERTS_SMP
+ need_locks = ~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ
+ | ERTS_PROC_LOCK_STATUS);
+ if (need_locks) {
+ *receiver_locks |= need_locks;
+ if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) {
+ if (need_locks == ERTS_PROC_LOCK_MSGQ) {
+ erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS);
+ need_locks = (ERTS_PROC_LOCK_MSGQ
+ | ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_lock(receiver, need_locks);
+ }
+ }
+
+ if (receiver->is_exiting || ERTS_PROC_PENDING_EXIT(receiver)) {
+ /* Drop message if receiver is exiting or has a pending
+ * exit ...
+ */
+ if (bp)
+ free_message_buffer(bp);
+ message_free(mp);
+ return;
+ }
+#endif
+
+ ERL_MESSAGE_TERM(mp) = message;
+ ERL_MESSAGE_TOKEN(mp) = seq_trace_token;
+ mp->next = NULL;
+
+#ifdef ERTS_SMP
+ if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
+ mp->data.heap_frag = bp;
+
+ /*
+ * We move 'in queue' to 'private queue' and place
+ * message at the end of 'private queue' in order
+ * to ensure that the 'in queue' doesn't contain
+ * references into the heap. By ensuring this,
+ * we don't need to include the 'in queue' in
+ * the root set when garbage collecting.
+ */
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
+ LINK_MESSAGE_PRIVQ(receiver, mp);
+ }
+ else {
+ mp->data.heap_frag = bp;
+ LINK_MESSAGE(receiver, mp);
+ }
+#else
+ mp->data.heap_frag = bp;
+ LINK_MESSAGE(receiver, mp);
+#endif
+
+ notify_new_message(receiver);
+
+ if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
+ trace_receive(receiver, message);
+ }
+
+#ifndef ERTS_SMP
+ ERTS_HOLE_CHECK(receiver);
+#endif
+}
+
+void
+erts_link_mbuf_to_proc(struct process *proc, ErlHeapFragment *bp)
+{
+ Eterm* htop = HEAP_TOP(proc);
+
+ link_mbuf_to_proc(proc, bp);
+ if (htop < HEAP_LIMIT(proc)) {
+ *htop = make_pos_bignum_header(HEAP_LIMIT(proc)-htop-1);
+ HEAP_TOP(proc) = HEAP_LIMIT(proc);
+ }
+}
+
+/*
+ * Moves content of message buffer attached to a message into a heap.
+ * The message buffer is deallocated.
+ */
+void
+erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
+{
+ /* Unions for typecasts avoids warnings about type-punned pointers and aliasing */
+ union {
+ Uint** upp;
+ ProcBin **pbpp;
+ ErlFunThing **efpp;
+ ExternalThing **etpp;
+ } oh_list_pp, oh_el_next_pp;
+ union {
+ Uint *up;
+ ProcBin *pbp;
+ ErlFunThing *efp;
+ ExternalThing *etp;
+ } oh_el_p;
+ Eterm term, token, *fhp, *hp;
+ Sint offs;
+ Uint sz;
+ ErlHeapFragment *bp;
+
+#ifdef HARD_DEBUG
+ ProcBin *dbg_mso_start = off_heap->mso;
+ ErlFunThing *dbg_fun_start = off_heap->funs;
+ ExternalThing *dbg_external_start = off_heap->externals;
+ Eterm dbg_term, dbg_token;
+ ErlHeapFragment *dbg_bp;
+ Uint *dbg_hp, *dbg_thp_start;
+ Uint dbg_term_sz, dbg_token_sz;
+#endif
+
+ bp = msg->data.heap_frag;
+ term = ERL_MESSAGE_TERM(msg);
+ token = ERL_MESSAGE_TOKEN(msg);
+ if (!bp) {
+ ASSERT(is_immed(term) && is_immed(token));
+ return;
+ }
+
+#ifdef HARD_DEBUG
+ dbg_term_sz = size_object(term);
+ dbg_token_sz = size_object(token);
+ ASSERT(bp->size == dbg_term_sz + dbg_token_sz);
+
+ dbg_bp = new_message_buffer(bp->size);
+ dbg_hp = dbg_bp->mem;
+ dbg_term = copy_struct(term, dbg_term_sz, &dbg_hp, &dbg_bp->off_heap);
+ dbg_token = copy_struct(token, dbg_token_sz, &dbg_hp, &dbg_bp->off_heap);
+ dbg_thp_start = *hpp;
+#endif
+
+ ASSERT(bp);
+ msg->data.attached = NULL;
+
+ off_heap->overhead += bp->off_heap.overhead;
+ sz = bp->size;
+
+#ifdef DEBUG
+ if (is_not_immed(term)) {
+ ASSERT(bp->mem <= ptr_val(term));
+ ASSERT(bp->mem + bp->size > ptr_val(term));
+ }
+
+ if (is_not_immed(token)) {
+ ASSERT(bp->mem <= ptr_val(token));
+ ASSERT(bp->mem + bp->size > ptr_val(token));
+ }
+#endif
+
+ fhp = bp->mem;
+ hp = *hpp;
+ offs = hp - fhp;
+
+ oh_list_pp.upp = NULL;
+ oh_el_next_pp.upp = NULL; /* Shut up compiler warning */
+ oh_el_p.up = NULL; /* Shut up compiler warning */
+ while (sz--) {
+ Uint cpy_sz;
+ Eterm val = *fhp++;
+
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_IMMED1:
+ *hp++ = val;
+ break;
+ case TAG_PRIMARY_LIST:
+ case TAG_PRIMARY_BOXED:
+ ASSERT(bp->mem <= ptr_val(val));
+ ASSERT(bp->mem + bp->size > ptr_val(val));
+ *hp++ = offset_ptr(val, offs);
+ break;
+ case TAG_PRIMARY_HEADER:
+ *hp++ = val;
+ switch (val & _HEADER_SUBTAG_MASK) {
+ case ARITYVAL_SUBTAG:
+ break;
+ case REFC_BINARY_SUBTAG:
+ oh_list_pp.pbpp = &off_heap->mso;
+ oh_el_p.up = (hp-1);
+ oh_el_next_pp.pbpp = &(oh_el_p.pbp)->next;
+ cpy_sz = thing_arityval(val);
+ goto cpy_words;
+ case FUN_SUBTAG:
+#ifndef HYBRID
+ oh_list_pp.efpp = &off_heap->funs;
+ oh_el_p.up = (hp-1);
+ oh_el_next_pp.efpp = &(oh_el_p.efp)->next;
+#endif
+ cpy_sz = thing_arityval(val);
+ goto cpy_words;
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ oh_list_pp.etpp = &off_heap->externals;
+ oh_el_p.up = (hp-1);
+ oh_el_next_pp.etpp = &(oh_el_p.etp)->next;
+ cpy_sz = thing_arityval(val);
+ goto cpy_words;
+ default:
+ cpy_sz = header_arity(val);
+
+ cpy_words:
+ sz -= cpy_sz;
+ while (cpy_sz >= 8) {
+ cpy_sz -= 8;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ *hp++ = *fhp++;
+ }
+ switch (cpy_sz) {
+ case 7: *hp++ = *fhp++;
+ case 6: *hp++ = *fhp++;
+ case 5: *hp++ = *fhp++;
+ case 4: *hp++ = *fhp++;
+ case 3: *hp++ = *fhp++;
+ case 2: *hp++ = *fhp++;
+ case 1: *hp++ = *fhp++;
+ default: break;
+ }
+ if (oh_list_pp.upp) {
+#ifdef HARD_DEBUG
+ Uint *dbg_old_oh_list_p = *oh_list_pp.upp;
+#endif
+ /* Add to offheap list */
+ *oh_el_next_pp.upp = *oh_list_pp.upp;
+ *oh_list_pp.upp = oh_el_p.up;
+ ASSERT(*hpp <= oh_el_p.up);
+ ASSERT(hp > oh_el_p.up);
+#ifdef HARD_DEBUG
+ switch (val & _HEADER_SUBTAG_MASK) {
+ case REFC_BINARY_SUBTAG:
+ ASSERT(off_heap->mso == *oh_list_pp.pbpp);
+ ASSERT(off_heap->mso->next
+ == (ProcBin *) dbg_old_oh_list_p);
+ break;
+#ifndef HYBRID
+ case FUN_SUBTAG:
+ ASSERT(off_heap->funs == *oh_list_pp.efpp);
+ ASSERT(off_heap->funs->next
+ == (ErlFunThing *) dbg_old_oh_list_p);
+ break;
+#endif
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG:
+ case EXTERNAL_REF_SUBTAG:
+ ASSERT(off_heap->externals
+ == *oh_list_pp.etpp);
+ ASSERT(off_heap->externals->next
+ == (ExternalThing *) dbg_old_oh_list_p);
+ break;
+ default:
+ ASSERT(0);
+ }
+#endif
+ oh_list_pp.upp = NULL;
+
+
+ }
+ break;
+ }
+ break;
+ }
+ }
+
+ ASSERT(bp->size == hp - *hpp);
+ *hpp = hp;
+
+ if (is_not_immed(token)) {
+ ASSERT(bp->mem <= ptr_val(token));
+ ASSERT(bp->mem + bp->size > ptr_val(token));
+ ERL_MESSAGE_TOKEN(msg) = offset_ptr(token, offs);
+#ifdef HARD_DEBUG
+ ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TOKEN(msg)));
+ ASSERT(hp > ptr_val(ERL_MESSAGE_TOKEN(msg)));
+#endif
+ }
+
+ if (is_not_immed(term)) {
+ ASSERT(bp->mem <= ptr_val(term));
+ ASSERT(bp->mem + bp->size > ptr_val(term));
+ ERL_MESSAGE_TERM(msg) = offset_ptr(term, offs);
+#ifdef HARD_DEBUG
+ ASSERT(dbg_thp_start <= ptr_val(ERL_MESSAGE_TERM(msg)));
+ ASSERT(hp > ptr_val(ERL_MESSAGE_TERM(msg)));
+#endif
+ }
+
+
+#ifdef HARD_DEBUG
+ {
+ int i, j;
+ {
+ ProcBin *mso = off_heap->mso;
+ i = j = 0;
+ while (mso != dbg_mso_start) {
+ mso = mso->next;
+ i++;
+ }
+ mso = bp->off_heap.mso;
+ while (mso) {
+ mso = mso->next;
+ j++;
+ }
+ ASSERT(i == j);
+ }
+ {
+ ErlFunThing *fun = off_heap->funs;
+ i = j = 0;
+ while (fun != dbg_fun_start) {
+ fun = fun->next;
+ i++;
+ }
+ fun = bp->off_heap.funs;
+ while (fun) {
+ fun = fun->next;
+ j++;
+ }
+ ASSERT(i == j);
+ }
+ {
+ ExternalThing *external = off_heap->externals;
+ i = j = 0;
+ while (external != dbg_external_start) {
+ external = external->next;
+ i++;
+ }
+ external = bp->off_heap.externals;
+ while (external) {
+ external = external->next;
+ j++;
+ }
+ ASSERT(i == j);
+ }
+ }
+#endif
+
+
+ bp->off_heap.mso = NULL;
+#ifndef HYBRID
+ bp->off_heap.funs = NULL;
+#endif
+ bp->off_heap.externals = NULL;
+ free_message_buffer(bp);
+
+#ifdef HARD_DEBUG
+ ASSERT(eq(ERL_MESSAGE_TERM(msg), dbg_term));
+ ASSERT(eq(ERL_MESSAGE_TOKEN(msg), dbg_token));
+ free_message_buffer(dbg_bp);
+#endif
+
+}
+
+Uint
+erts_msg_attached_data_size_aux(ErlMessage *msg)
+{
+ Sint sz;
+ ASSERT(is_non_value(ERL_MESSAGE_TERM(msg)));
+ ASSERT(msg->data.dist_ext);
+ ASSERT(msg->data.dist_ext->heap_size < 0);
+
+ sz = erts_decode_dist_ext_size(msg->data.dist_ext, 0);
+ if (sz < 0) {
+ /* Bad external; remove it */
+ if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
+ ErlHeapFragment *heap_frag;
+ heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ erts_cleanup_offheap(&heap_frag->off_heap);
+ }
+ erts_free_dist_ext_copy(msg->data.dist_ext);
+ msg->data.dist_ext = NULL;
+ return 0;
+ }
+
+ msg->data.dist_ext->heap_size = sz;
+ if (is_not_nil(msg->m[1])) {
+ ErlHeapFragment *heap_frag;
+ heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ sz += heap_frag->size;
+ }
+ return sz;
+}
+
+void
+erts_move_msg_attached_data_to_heap(Eterm **hpp, ErlOffHeap *ohp, ErlMessage *msg)
+{
+ if (is_value(ERL_MESSAGE_TERM(msg)))
+ erts_move_msg_mbuf_to_heap(hpp, ohp, msg);
+ else if (msg->data.dist_ext) {
+ ASSERT(msg->data.dist_ext->heap_size >= 0);
+ if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
+ ErlHeapFragment *heap_frag;
+ heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ ERL_MESSAGE_TOKEN(msg) = copy_struct(ERL_MESSAGE_TOKEN(msg),
+ heap_frag->size,
+ hpp,
+ ohp);
+ erts_cleanup_offheap(&heap_frag->off_heap);
+ }
+ ERL_MESSAGE_TERM(msg) = erts_decode_dist_ext(hpp,
+ ohp,
+ msg->data.dist_ext);
+ erts_free_dist_ext_copy(msg->data.dist_ext);
+ msg->data.dist_ext = NULL;
+ }
+ /* else: bad external detected when calculating size */
+}
+
+/*
+ * Send a local message when sender & receiver processes are known.
+ */
+
+void
+erts_send_message(Process* sender,
+ Process* receiver,
+ ErtsProcLocks *receiver_locks,
+ Eterm message,
+ unsigned flags)
+{
+ Uint msize;
+ ErlHeapFragment* bp = NULL;
+ Eterm token = NIL;
+
+ BM_STOP_TIMER(system);
+ BM_MESSAGE(message,sender,receiver);
+ BM_START_TIMER(send);
+
+ if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
+ Eterm* hp;
+
+ BM_SWAP_TIMER(send,size);
+ msize = size_object(message);
+ BM_SWAP_TIMER(size,send);
+
+ seq_trace_update_send(sender);
+ seq_trace_output(SEQ_TRACE_TOKEN(sender), message, SEQ_TRACE_SEND,
+ receiver->id, sender);
+ bp = new_message_buffer(msize + 6 /* TUPLE5 */);
+ hp = bp->mem;
+
+ BM_SWAP_TIMER(send,copy);
+ token = copy_struct(SEQ_TRACE_TOKEN(sender),
+ 6 /* TUPLE5 */,
+ &hp,
+ &bp->off_heap);
+
+ message = copy_struct(message, msize, &hp, &bp->off_heap);
+ BM_MESSAGE_COPIED(msize);
+ BM_SWAP_TIMER(copy,send);
+
+ erts_queue_message(receiver,
+ receiver_locks,
+ bp,
+ message,
+ token);
+ BM_SWAP_TIMER(send,system);
+#ifdef HYBRID
+ } else {
+ ErlMessage* mp = message_alloc();
+ BM_SWAP_TIMER(send,copy);
+#ifdef INCREMENTAL
+ /* TODO: During GC activate processes if the message relies in
+ * the fromspace and the sender is active. During major
+ * collections add the message to the gray stack if it relies
+ * in the old generation and the sender is active and the
+ * receiver is inactive.
+
+ if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) &&
+ (ptr_val(message) >= inc_fromspc &&
+ ptr_val(message) < inc_fromend) && INC_IS_ACTIVE(sender))
+ INC_ACTIVATE(receiver);
+ else if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) &&
+ (ptr_val(message) >= global_old_heap &&
+ ptr_val(message) < global_old_hend) &&
+ INC_IS_ACTIVE(sender) && !INC_IS_ACTIVE(receiver))
+ Mark message in blackmap and add it to the gray stack
+ */
+
+ if (!IS_CONST(message))
+ INC_ACTIVATE(receiver);
+#endif
+ LAZY_COPY(sender,message);
+ BM_SWAP_TIMER(copy,send);
+ ERL_MESSAGE_TERM(mp) = message;
+ ERL_MESSAGE_TOKEN(mp) = NIL;
+ mp->next = NULL;
+ LINK_MESSAGE(receiver, mp);
+ ACTIVATE(receiver);
+
+ if (receiver->status == P_WAITING) {
+ erts_add_to_runq(receiver);
+ } else if (receiver->status == P_SUSPENDED) {
+ receiver->rstatus = P_RUNABLE;
+ }
+ if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
+ trace_receive(receiver, message);
+ }
+
+ BM_SWAP_TIMER(send,system);
+ return;
+#else
+ } else if (sender == receiver) {
+ /* Drop message if receiver has a pending exit ... */
+#ifdef ERTS_SMP
+ ErtsProcLocks need_locks = (~(*receiver_locks)
+ & (ERTS_PROC_LOCK_MSGQ
+ | ERTS_PROC_LOCK_STATUS));
+ if (need_locks) {
+ *receiver_locks |= need_locks;
+ if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) {
+ if (need_locks == ERTS_PROC_LOCK_MSGQ) {
+ erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS);
+ need_locks = ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS;
+ }
+ erts_smp_proc_lock(receiver, need_locks);
+ }
+ }
+ if (!ERTS_PROC_PENDING_EXIT(receiver))
+#endif
+ {
+ ErlMessage* mp = message_alloc();
+
+ mp->data.attached = NULL;
+ ERL_MESSAGE_TERM(mp) = message;
+ ERL_MESSAGE_TOKEN(mp) = NIL;
+ mp->next = NULL;
+ /*
+ * We move 'in queue' to 'private queue' and place
+ * message at the end of 'private queue' in order
+ * to ensure that the 'in queue' doesn't contain
+ * references into the heap. By ensuring this,
+ * we don't need to include the 'in queue' in
+ * the root set when garbage collecting.
+ */
+
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
+ LINK_MESSAGE_PRIVQ(receiver, mp);
+
+ if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
+ trace_receive(receiver, message);
+ }
+ }
+ BM_SWAP_TIMER(send,system);
+ return;
+ } else {
+#ifdef ERTS_SMP
+ ErlOffHeap *ohp;
+ Eterm *hp;
+ BM_SWAP_TIMER(send,size);
+ msize = size_object(message);
+ BM_SWAP_TIMER(size,send);
+ hp = erts_alloc_message_heap(msize,&bp,&ohp,receiver,receiver_locks);
+ BM_SWAP_TIMER(send,copy);
+ message = copy_struct(message, msize, &hp, ohp);
+ BM_MESSAGE_COPIED(msz);
+ BM_SWAP_TIMER(copy,send);
+ erts_queue_message(receiver, receiver_locks, bp, message, token);
+ BM_SWAP_TIMER(send,system);
+#else
+ ErlMessage* mp = message_alloc();
+ Eterm *hp;
+ BM_SWAP_TIMER(send,size);
+ msize = size_object(message);
+ BM_SWAP_TIMER(size,send);
+
+ if (receiver->stop - receiver->htop <= msize) {
+ BM_SWAP_TIMER(send,system);
+ erts_garbage_collect(receiver, msize, receiver->arg_reg, receiver->arity);
+ BM_SWAP_TIMER(system,send);
+ }
+ hp = receiver->htop;
+ receiver->htop = hp + msize;
+ BM_SWAP_TIMER(send,copy);
+ message = copy_struct(message, msize, &hp, &receiver->off_heap);
+ BM_MESSAGE_COPIED(msize);
+ BM_SWAP_TIMER(copy,send);
+ ERL_MESSAGE_TERM(mp) = message;
+ ERL_MESSAGE_TOKEN(mp) = NIL;
+ mp->next = NULL;
+ mp->data.attached = NULL;
+ LINK_MESSAGE(receiver, mp);
+
+ if (receiver->status == P_WAITING) {
+ erts_add_to_runq(receiver);
+ } else if (receiver->status == P_SUSPENDED) {
+ receiver->rstatus = P_RUNABLE;
+ }
+ if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
+ trace_receive(receiver, message);
+ }
+ BM_SWAP_TIMER(send,system);
+#endif /* #ifndef ERTS_SMP */
+ return;
+#endif /* HYBRID */
+ }
+}
+
+/*
+ * This function delivers an EXIT message to a process
+ * which is trapping EXITs.
+ */
+
+void
+erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
+ Eterm reason, Eterm token)
+{
+ Eterm mess;
+ Eterm save;
+ Eterm from_copy;
+ Uint sz_reason;
+ Uint sz_token;
+ Uint sz_from;
+ Eterm* hp;
+ Eterm temptoken;
+ ErlHeapFragment* bp = NULL;
+
+ if (token != NIL) {
+
+ ASSERT(is_tuple(token));
+ sz_reason = size_object(reason);
+ sz_token = size_object(token);
+ sz_from = size_object(from);
+ bp = new_message_buffer(sz_reason + sz_from + sz_token + 4);
+ hp = bp->mem;
+ mess = copy_struct(reason, sz_reason, &hp, &bp->off_heap);
+ from_copy = copy_struct(from, sz_from, &hp, &bp->off_heap);
+ save = TUPLE3(hp, am_EXIT, from_copy, mess);
+ hp += 4;
+ /* the trace token must in this case be updated by the caller */
+ seq_trace_output(token, save, SEQ_TRACE_SEND, to->id, NULL);
+ temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap);
+ erts_queue_message(to, to_locksp, bp, save, temptoken);
+ } else {
+ ErlOffHeap *ohp;
+ sz_reason = size_object(reason);
+ sz_from = IS_CONST(from) ? 0 : size_object(from);
+
+ hp = erts_alloc_message_heap(sz_reason+sz_from+4,
+ &bp,
+ &ohp,
+ to,
+ to_locksp);
+
+ mess = copy_struct(reason, sz_reason, &hp, ohp);
+ from_copy = (IS_CONST(from)
+ ? from
+ : copy_struct(from, sz_from, &hp, ohp));
+ save = TUPLE3(hp, am_EXIT, from_copy, mess);
+ erts_queue_message(to, to_locksp, bp, save, NIL);
+ }
+}
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
new file mode 100644
index 0000000000..f14f14a586
--- /dev/null
+++ b/erts/emulator/beam/erl_message.h
@@ -0,0 +1,251 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ERL_MESSAGE_H__
+#define __ERL_MESSAGE_H__
+
+struct proc_bin;
+struct external_thing_;
+
+/*
+ * This struct represents data that must be updated by structure copy,
+ * but is stored outside of any heap.
+ */
+
+typedef struct erl_off_heap {
+ struct proc_bin* mso; /* List of associated binaries. */
+#ifndef HYBRID /* FIND ME! */
+ struct erl_fun_thing* funs; /* List of funs. */
+#endif
+ struct external_thing_* externals; /* List of external things. */
+ int overhead; /* Administrative overhead (used to force GC). */
+} ErlOffHeap;
+
+#include "external.h"
+#include "erl_process.h"
+
+/*
+ * This struct represents a heap fragment, which is used when there
+ * isn't sufficient room in the process heap and we can't do a GC.
+ */
+
+typedef struct erl_heap_fragment ErlHeapFragment;
+struct erl_heap_fragment {
+ ErlHeapFragment* next; /* Next heap fragment */
+ ErlOffHeap off_heap; /* Offset heap data. */
+ unsigned size; /* Size in words of mem */
+ Eterm mem[1]; /* Data */
+};
+
+#define ERTS_SET_MBUF_HEAP_END(BP, HENDP) \
+do { \
+ unsigned real_size__ = (BP)->size; \
+ ASSERT((BP)->mem <= (HENDP) && (HENDP) <= (BP)->mem + real_size__); \
+ (BP)->size = (HENDP) - (BP)->mem; \
+ /* We do not reallocate since buffer *might* be moved. */ \
+ /* FIXME: Memory count is wrong, but at least it's almost */ \
+ /* right... */ \
+} while (0)
+
+typedef struct erl_mesg {
+ struct erl_mesg* next; /* Next message */
+ union {
+ ErtsDistExternal *dist_ext;
+ ErlHeapFragment *heap_frag;
+ void *attached;
+ } data;
+ Eterm m[2]; /* m[0] = message, m[1] = seq trace token */
+} ErlMessage;
+
+#define ERL_MESSAGE_TERM(mp) ((mp)->m[0])
+#define ERL_MESSAGE_TOKEN(mp) ((mp)->m[1])
+
+/* Size of default message buffer (erl_message.c) */
+#define ERL_MESSAGE_BUF_SZ 500
+
+typedef struct {
+ ErlMessage* first;
+ ErlMessage** last; /* point to the last next pointer */
+ ErlMessage** save;
+ int len; /* queue length */
+} ErlMessageQueue;
+
+#ifdef ERTS_SMP
+
+typedef struct {
+ ErlMessage* first;
+ ErlMessage** last; /* point to the last next pointer */
+ int len; /* queue length */
+} ErlMessageInQueue;
+
+#endif
+
+/* Get "current" message */
+#define PEEK_MESSAGE(p) (*(p)->msg.save)
+
+
+/* Add message last in private message queue */
+#define LINK_MESSAGE_PRIVQ(p, mp) do { \
+ *(p)->msg.last = (mp); \
+ (p)->msg.last = &(mp)->next; \
+ (p)->msg.len++; \
+} while(0)
+
+
+#ifdef ERTS_SMP
+
+/* Move in message queue to end of private message queue */
+#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \
+do { \
+ if ((P)->msg_inq.first) { \
+ *(P)->msg.last = (P)->msg_inq.first; \
+ (P)->msg.last = (P)->msg_inq.last; \
+ (P)->msg.len += (P)->msg_inq.len; \
+ (P)->msg_inq.first = NULL; \
+ (P)->msg_inq.last = &(P)->msg_inq.first; \
+ (P)->msg_inq.len = 0; \
+ } \
+} while (0)
+
+/* Add message last in message queue */
+#define LINK_MESSAGE(p, mp) do { \
+ *(p)->msg_inq.last = (mp); \
+ (p)->msg_inq.last = &(mp)->next; \
+ (p)->msg_inq.len++; \
+} while(0)
+
+#else
+
+#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P)
+
+/* Add message last in message queue */
+#define LINK_MESSAGE(p, mp) LINK_MESSAGE_PRIVQ((p), (mp))
+
+#endif
+
+/* Unlink current message */
+#define UNLINK_MESSAGE(p,msgp) do { \
+ ErlMessage* __mp = (msgp)->next; \
+ *(p)->msg.save = __mp; \
+ (p)->msg.len--; \
+ if (__mp == NULL) \
+ (p)->msg.last = (p)->msg.save; \
+} while(0)
+
+/* Reset message save point (after receive match) */
+#define JOIN_MESSAGE(p) \
+ (p)->msg.save = &(p)->msg.first
+
+/* Save current message */
+#define SAVE_MESSAGE(p) \
+ (p)->msg.save = &(*(p)->msg.save)->next
+
+/*
+ * ErtsMoveMsgAttachmentIntoProc() moves data attached to a message
+ * onto the heap of a process. The attached data is the content of
+ * the the message either on the internal format or on the external
+ * format, and also possibly a seq trace token on the internal format.
+ * If the message content is on the external format, the decode might
+ * fail. If the decoding fails, ERL_MESSAGE_TERM(M) will contain
+ * THE_NON_VALUE. That is, ERL_MESSAGE_TERM(M) *has* to be checked
+ * afterwards and taken care of appropriately.
+ *
+ * ErtsMoveMsgAttachmentIntoProc() will shallow copy to heap if
+ * possible; otherwise, move to heap via garbage collection.
+ *
+ * ErtsMoveMsgAttachmentIntoProc() is used when receiveing messages
+ * in process_main() and in hipe_check_get_msg().
+ */
+
+#define ErtsMoveMsgAttachmentIntoProc(M, P, ST, HT, FC, SWPO, SWPI) \
+do { \
+ if ((M)->data.attached) { \
+ Uint need__ = erts_msg_attached_data_size((M)); \
+ if ((ST) - (HT) >= need__) { \
+ Uint *htop__ = (HT); \
+ erts_move_msg_attached_data_to_heap(&htop__, &MSO((P)), (M));\
+ ASSERT(htop__ - (HT) <= need__); \
+ (HT) = htop__; \
+ } \
+ else { \
+ { SWPO ; } \
+ (FC) -= erts_garbage_collect((P), 0, NULL, 0); \
+ { SWPI ; } \
+ } \
+ ASSERT(!(M)->data.attached); \
+ } \
+} while (0)
+
+#define ERTS_SND_FLG_NO_SEQ_TRACE (((unsigned) 1) << 0)
+
+#define ERTS_HEAP_FRAG_SIZE(DATA_WORDS) \
+ (sizeof(ErlHeapFragment) - sizeof(Eterm) + (DATA_WORDS)*sizeof(Eterm))
+#define ERTS_INIT_HEAP_FRAG(HEAP_FRAG_P, DATA_WORDS) \
+do { \
+ (HEAP_FRAG_P)->next = NULL; \
+ (HEAP_FRAG_P)->size = (DATA_WORDS); \
+ (HEAP_FRAG_P)->off_heap.mso = NULL; \
+ (HEAP_FRAG_P)->off_heap.funs = NULL; \
+ (HEAP_FRAG_P)->off_heap.externals = NULL; \
+ (HEAP_FRAG_P)->off_heap.overhead = 0; \
+} while (0)
+
+void init_message(void);
+void free_message(ErlMessage *);
+ErlHeapFragment* new_message_buffer(Uint);
+ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
+ Eterm *, Uint);
+void free_message_buffer(ErlHeapFragment *);
+void erts_queue_dist_message(Process*, ErtsProcLocks*, ErtsDistExternal *, Eterm);
+void erts_queue_message(Process*, ErtsProcLocks*, ErlHeapFragment*, Eterm, Eterm);
+void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
+void erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
+void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
+
+void erts_move_msg_mbuf_to_heap(Eterm**, ErlOffHeap*, ErlMessage *);
+
+Uint erts_msg_attached_data_size_aux(ErlMessage *msg);
+void erts_move_msg_attached_data_to_heap(Eterm **, ErlOffHeap *, ErlMessage *);
+
+Eterm erts_msg_distext2heap(Process *, ErtsProcLocks *, ErlHeapFragment **,
+ Eterm *, ErtsDistExternal *);
+
+ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg)
+{
+ ASSERT(msg->data.attached);
+ if (is_value(ERL_MESSAGE_TERM(msg)))
+ return msg->data.heap_frag->size;
+ else if (msg->data.dist_ext->heap_size < 0)
+ return erts_msg_attached_data_size_aux(msg);
+ else {
+ Uint sz = msg->data.dist_ext->heap_size;
+ if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) {
+ ErlHeapFragment *heap_frag;
+ heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ sz += heap_frag->size;
+ }
+ return sz;
+ }
+}
+#endif
+
+#endif
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
new file mode 100644
index 0000000000..d873c7a701
--- /dev/null
+++ b/erts/emulator/beam/erl_monitors.c
@@ -0,0 +1,1019 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/**************************************************************************
+ * Monitors and links data structure manipulation.
+ * Monitors and links are organized as AVL trees with the reference as
+ * key in the monitor case and the pid of the linked process as key in the
+ * link case. Lookups the order of the references is somewhat special. Local
+ * references are strictly smaller than remote references and are sorted
+ * by inlined comparision functionality. Remote references are handled by the
+ * usual cmp function.
+ * Each Monitor is tagged with different tags depending on which end of the
+ * monitor it is.
+ * A monitor is removed either explicitly by reference or all monitors are
+ * removed when the process exits. No need to access the monitor by pid.
+ **************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "erl_db.h"
+#include "bif.h"
+#include "big.h"
+#include "erl_monitors.h"
+
+#define STACK_NEED 50
+#define MAX_MONITORS 0xFFFFFFFFUL
+
+#define DIR_LEFT 0
+#define DIR_RIGHT 1
+#define DIR_END 2
+
+static erts_smp_atomic_t tot_link_lh_size;
+
+/* Implements the sort order in monitor trees, which is different from
+ the ordinary term order.
+ No short local ref's should ever exist (the ref is created by the bif's
+ in runtime), therefore:
+ All local ref's are less than external ref's
+ Local ref's are inline-compared,
+ External ref's are compared by cmp */
+
+#if 0
+#define CMP_MON_REF(Ref1,Ref2) \
+cmp((Ref1),(Ref2)) /* XXX, the inline comparision yet to be done */
+#else
+#define CMP_MON_REF(Ref1,Ref2) cmp_mon_ref((Ref1),(Ref2))
+#endif
+
+static ERTS_INLINE int cmp_mon_ref(Eterm ref1, Eterm ref2)
+{
+ Eterm *b1, *b2;
+
+
+ b1 = boxed_val(ref1);
+ b2 = boxed_val(ref2);
+ if (is_ref_thing_header(*b1)) {
+ if (is_ref_thing_header(*b2)) {
+ return memcmp(b1+1,b2+1,ERTS_REF_WORDS*sizeof(Uint));
+ }
+ return -1;
+ }
+ if (is_ref_thing_header(*b2)) {
+ return 1;
+ }
+ return cmp(ref1,ref2);
+}
+
+#define CP_LINK_VAL(To, Hp, From) \
+do { \
+ if (IS_CONST(From)) \
+ (To) = (From); \
+ else { \
+ Uint i__; \
+ Uint len__; \
+ ASSERT((Hp)); \
+ ASSERT(is_internal_ref((From)) || is_external((From))); \
+ (To) = make_boxed((Hp)); \
+ len__ = thing_arityval(*boxed_val((From))) + 1; \
+ for(i__ = 0; i__ < len__; i__++) \
+ (*((Hp)++)) = boxed_val((From))[i__]; \
+ if (is_external((To))) { \
+ external_thing_ptr((To))->next = NULL; \
+ erts_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\
+ } \
+ } \
+} while (0)
+
+static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name)
+{
+ Uint mon_size = ERTS_MONITOR_SIZE;
+ ErtsMonitor *n;
+ Eterm *hp;
+
+ mon_size += NC_HEAP_SIZE(ref);
+ if (!IS_CONST(pid)) {
+ mon_size += NC_HEAP_SIZE(pid);
+ }
+
+ if (mon_size <= ERTS_MONITOR_SH_SIZE) {
+ n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_SH,
+ mon_size*sizeof(Uint));
+ } else {
+ n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH,
+ mon_size*sizeof(Uint));
+ erts_smp_atomic_add(&tot_link_lh_size, mon_size*sizeof(Uint));
+ }
+ hp = n->heap;
+
+
+ n->left = n->right = NULL; /* Always the same initial value*/
+ n->type = (Uint16) type;
+ n->balance = 0; /* Always the same initial value */
+ n->name = name; /* atom() or [] */
+ CP_LINK_VAL(n->ref, hp, ref); /*XXX Unneccesary check, never immediate*/
+ CP_LINK_VAL(n->pid, hp, pid);
+
+ return n;
+}
+
+static ErtsLink *create_link(Uint type, Eterm pid)
+{
+ Uint lnk_size = ERTS_LINK_SIZE;
+ ErtsLink *n;
+ Eterm *hp;
+
+ if (!IS_CONST(pid)) {
+ lnk_size += NC_HEAP_SIZE(pid);
+ }
+
+ if (lnk_size <= ERTS_LINK_SH_SIZE) {
+ n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_SH,
+ lnk_size*sizeof(Uint));
+ } else {
+ n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH,
+ lnk_size*sizeof(Uint));
+ erts_smp_atomic_add(&tot_link_lh_size, lnk_size*sizeof(Uint));
+ }
+ hp = n->heap;
+
+
+ n->left = n->right = NULL; /* Always the same initial value*/
+ n->type = (Uint16) type;
+ n->balance = 0; /* Always the same initial value */
+ if (n->type == LINK_NODE) {
+ ERTS_LINK_REFC(n) = 0;
+ } else {
+ ERTS_LINK_ROOT(n) = NULL;
+ }
+ CP_LINK_VAL(n->pid, hp, pid);
+
+ return n;
+}
+
+#undef CP_LINK_VAL
+
+static ErtsSuspendMonitor *create_suspend_monitor(Eterm pid)
+{
+ ErtsSuspendMonitor *smon = erts_alloc(ERTS_ALC_T_SUSPEND_MON,
+ sizeof(ErtsSuspendMonitor));
+ smon->left = smon->right = NULL; /* Always the same initial value */
+ smon->balance = 0; /* Always the same initial value */
+ smon->pending = 0;
+ smon->active = 0;
+ smon->pid = pid;
+ return smon;
+}
+
+void
+erts_init_monitors(void)
+{
+ erts_smp_atomic_init(&tot_link_lh_size, 0);
+}
+
+Uint
+erts_tot_link_lh_size(void)
+{
+ return (Uint) erts_smp_atomic_read(&tot_link_lh_size);
+}
+
+void erts_destroy_monitor(ErtsMonitor *mon)
+{
+ Uint mon_size = ERTS_MONITOR_SIZE;
+ ErlNode *node;
+
+ ASSERT(!IS_CONST(mon->ref));
+ mon_size += NC_HEAP_SIZE(mon->ref);
+ if (is_external(mon->ref)) {
+ node = external_thing_ptr(mon->ref)->node;
+ erts_deref_node_entry(node);
+ }
+ if (!IS_CONST(mon->pid)) {
+ mon_size += NC_HEAP_SIZE(mon->pid);
+ if (is_external(mon->pid)) {
+ node = external_thing_ptr(mon->pid)->node;
+ erts_deref_node_entry(node);
+ }
+ }
+ if (mon_size <= ERTS_MONITOR_SH_SIZE) {
+ erts_free(ERTS_ALC_T_MONITOR_SH, (void *) mon);
+ } else {
+ erts_free(ERTS_ALC_T_MONITOR_LH, (void *) mon);
+ erts_smp_atomic_add(&tot_link_lh_size, -1*mon_size*sizeof(Uint));
+ }
+}
+
+void erts_destroy_link(ErtsLink *lnk)
+{
+ Uint lnk_size = ERTS_LINK_SIZE;
+ ErlNode *node;
+
+ ASSERT(lnk->type == LINK_NODE || ERTS_LINK_ROOT(lnk) == NULL);
+
+ if (!IS_CONST(lnk->pid)) {
+ lnk_size += NC_HEAP_SIZE(lnk->pid);
+ if (is_external(lnk->pid)) {
+ node = external_thing_ptr(lnk->pid)->node;
+ erts_deref_node_entry(node);
+ }
+ }
+ if (lnk_size <= ERTS_LINK_SH_SIZE) {
+ erts_free(ERTS_ALC_T_NLINK_SH, (void *) lnk);
+ } else {
+ erts_free(ERTS_ALC_T_NLINK_LH, (void *) lnk);
+ erts_smp_atomic_add(&tot_link_lh_size, -1*lnk_size*sizeof(Uint));
+ }
+}
+
+void erts_destroy_suspend_monitor(ErtsSuspendMonitor *smon)
+{
+ erts_free(ERTS_ALC_T_SUSPEND_MON, smon);
+}
+
+static void insertion_rotation(int dstack[], int dpos,
+ void *tstack[], int tpos,
+ int state) {
+
+ ErtsMonitorOrLink **this;
+ ErtsMonitorOrLink *p1, *p2, *p;
+ int dir;
+
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ p = *this;
+ if (dir == DIR_LEFT) {
+ switch (p->balance) {
+ case 1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = -1;
+ break;
+ case -1: /* The icky case */
+ p1 = p->left;
+ if (p1->balance == -1) { /* Single LL rotation */
+ p->left = p1->right;
+ p1->right = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RR rotation */
+ p2 = p1->right;
+ p1->right = p2->left;
+ p2->left = p1;
+ p->left = p2->right;
+ p2->right = p;
+ p->balance = (p2->balance == -1) ? +1 : 0;
+ p1->balance = (p2->balance == 1) ? -1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ } else { /* dir == DIR_RIGHT */
+ switch (p->balance) {
+ case -1:
+ p->balance = 0;
+ state = 0;
+ break;
+ case 0:
+ p->balance = 1;
+ break;
+ case 1:
+ p1 = p->right;
+ if (p1->balance == 1) { /* Single RR rotation */
+ p->right = p1->left;
+ p1->left = p;
+ p->balance = 0;
+ (*this) = p1;
+ } else { /* Double RL rotation */
+ p2 = p1->left;
+ p1->left = p2->right;
+ p2->right = p1;
+ p->right = p2->left;
+ p2->left = p;
+ p->balance = (p2->balance == 1) ? -1 : 0;
+ p1->balance = (p2->balance == -1) ? 1 : 0;
+ (*this) = p2;
+ }
+ (*this)->balance = 0;
+ state = 0;
+ break;
+ }
+ }
+ }
+}
+
+void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
+ Eterm name)
+{
+ void *tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int state = 0;
+ ErtsMonitor **this = root;
+ Sint c;
+
+ dstack[0] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Found our place */
+ state = 1;
+ *this = create_monitor(type,ref,pid,name);
+ break;
+ } else if ((c = CMP_MON_REF(ref,(*this)->ref)) < 0) {
+ /* go left */
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Equal key is an error for monitors */
+ erl_exit(1,"Insertion of already present monitor!");
+ break;
+ }
+ }
+ insertion_rotation(dstack, dpos, tstack, tpos, state);
+}
+
+
+/* Returns 0 if OK, < 0 if already present */
+int erts_add_link(ErtsLink **root, Uint type, Eterm pid)
+{
+ void *tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int state = 0;
+ ErtsLink **this = root;
+ Sint c;
+
+ dstack[0] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Found our place */
+ state = 1;
+ *this = create_link(type,pid);
+ break;
+ } else if ((c = cmp(pid,(*this)->pid)) < 0) {
+ /* go left */
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Equal key is an error for monitors */
+ return -1;
+ }
+ }
+ insertion_rotation(dstack, dpos, tstack, tpos, state);
+ return 0;
+}
+
+ErtsSuspendMonitor *
+erts_add_or_lookup_suspend_monitor(ErtsSuspendMonitor **root, Eterm pid)
+{
+ void *tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int state = 0;
+ ErtsSuspendMonitor **this = root;
+ ErtsSuspendMonitor *res;
+ Sint c;
+
+ dstack[0] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Found our place */
+ state = 1;
+ res = *this = create_suspend_monitor(pid);
+ break;
+ } else if ((c = cmp(pid,(*this)->pid)) < 0) {
+ /* go left */
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Already here... */
+ ASSERT((*this)->pid == pid);
+ return *this;
+ }
+ }
+ insertion_rotation(dstack, dpos, tstack, tpos, state);
+ return res;
+}
+
+
+/* Returns the new or old link structure */
+ErtsLink *erts_add_or_lookup_link(ErtsLink **root, Uint type, Eterm pid)
+{
+ void *tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int state = 0;
+ ErtsLink **this = root;
+ Sint c;
+ ErtsLink *ret = NULL;
+
+ dstack[0] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Found our place */
+ state = 1;
+ *this = create_link(type,pid);
+ ret = *this;
+ break;
+ } else if ((c = cmp(pid,(*this)->pid)) < 0) {
+ /* go left */
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Equal key is an error for monitors */
+ return *this;
+ }
+ }
+ insertion_rotation(dstack, dpos, tstack, tpos, state);
+ return ret;
+}
+
+
+/*
+ * Deletion helpers
+ */
+static int balance_left(ErtsMonitorOrLink **this)
+{
+ ErtsMonitorOrLink *p, *p1, *p2;
+ int b1, b2, h = 1;
+
+ p = *this;
+ switch (p->balance) {
+ case -1:
+ p->balance = 0;
+ break;
+ case 0:
+ p->balance = 1;
+ h = 0;
+ break;
+ case 1:
+ p1 = p->right;
+ b1 = p1->balance;
+ if (b1 >= 0) { /* Single RR rotation */
+ p->right = p1->left;
+ p1->left = p;
+ if (b1 == 0) {
+ p->balance = 1;
+ p1->balance = -1;
+ h = 0;
+ } else {
+ p->balance = p1->balance = 0;
+ }
+ (*this) = p1;
+ } else { /* Double RL rotation */
+ p2 = p1->left;
+ b2 = p2->balance;
+ p1->left = p2->right;
+ p2->right = p1;
+ p->right = p2->left;
+ p2->left = p;
+ p->balance = (b2 == 1) ? -1 : 0;
+ p1->balance = (b2 == -1) ? 1 : 0;
+ p2->balance = 0;
+ (*this) = p2;
+ }
+ break;
+ }
+ return h;
+}
+
+static int balance_right(ErtsMonitorOrLink **this)
+{
+ ErtsMonitorOrLink *p, *p1, *p2;
+ int b1, b2, h = 1;
+
+ p = *this;
+ switch (p->balance) {
+ case 1:
+ p->balance = 0;
+ break;
+ case 0:
+ p->balance = -1;
+ h = 0;
+ break;
+ case -1:
+ p1 = p->left;
+ b1 = p1->balance;
+ if (b1 <= 0) { /* Single LL rotation */
+ p->left = p1->right;
+ p1->right = p;
+ if (b1 == 0) {
+ p->balance = -1;
+ p1->balance = 1;
+ h = 0;
+ } else {
+ p->balance = p1->balance = 0;
+ }
+ (*this) = p1;
+ } else { /* Double LR rotation */
+ p2 = p1->right;
+ b2 = p2->balance;
+ p1->right = p2->left;
+ p2->left = p1;
+ p->left = p2->right;
+ p2->right = p;
+ p->balance = (b2 == -1) ? 1 : 0;
+ p1->balance = (b2 == 1) ? -1 : 0;
+ p2->balance = 0;
+ (*this) = p2;
+ }
+ }
+ return h;
+}
+
+static int delsub(ErtsMonitorOrLink **this)
+{
+ ErtsMonitorOrLink **tstack[STACK_NEED];
+ int tpos = 0;
+ ErtsMonitorOrLink *q = (*this);
+ ErtsMonitorOrLink **r = &(q->left);
+ int h;
+
+ /*
+ * Walk down the tree to the right and search
+ * for a void right child, pick that child out
+ * and return it to be put in the deleted
+ * object's place.
+ */
+
+ while ((*r)->right != NULL) {
+ tstack[tpos++] = r;
+ r = &((*r)->right);
+ }
+ *this = *r;
+ *r = (*r)->left;
+ (*this)->left = q->left;
+ (*this)->right = q->right;
+ (*this)->balance = q->balance;
+ tstack[0] = &((*this)->left);
+ h = 1;
+ while (tpos && h) {
+ r = tstack[--tpos];
+ h = balance_right(r);
+ }
+ return h;
+}
+
+ErtsMonitor *erts_remove_monitor(ErtsMonitor **root, Eterm ref)
+{
+ ErtsMonitor **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int state = 0;
+ ErtsMonitor **this = root;
+ Sint c;
+ int dir;
+ ErtsMonitor *q = NULL;
+
+ dstack[0] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Failure */
+ return NULL;
+ } else if ((c = CMP_MON_REF(ref,(*this)->ref)) < 0) {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Equal key, found the one to delete */
+ q = (*this);
+ if (q->right == NULL) {
+ (*this) = q->left;
+ state = 1;
+ } else if (q->left == NULL) {
+ (*this) = q->right;
+ state = 1;
+ } else {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ state = delsub((ErtsMonitorOrLink **) this);
+ }
+ break;
+ }
+ }
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ if (dir == DIR_LEFT) {
+ state = balance_left((ErtsMonitorOrLink **) this);
+ } else {
+ state = balance_right((ErtsMonitorOrLink **) this);
+ }
+ }
+ return q;
+}
+
+ErtsLink *erts_remove_link(ErtsLink **root, Eterm pid)
+{
+ ErtsLink **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int state = 0;
+ ErtsLink **this = root;
+ Sint c;
+ int dir;
+ ErtsLink *q = NULL;
+
+ dstack[0] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Failure */
+ return NULL;
+ } else if ((c = cmp(pid,(*this)->pid)) < 0) {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Equal key, found the one to delete */
+ q = (*this);
+ if (q->right == NULL) {
+ (*this) = q->left;
+ state = 1;
+ } else if (q->left == NULL) {
+ (*this) = q->right;
+ state = 1;
+ } else {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ state = delsub((ErtsMonitorOrLink **) this);
+ }
+ break;
+ }
+ }
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ if (dir == DIR_LEFT) {
+ state = balance_left((ErtsMonitorOrLink **) this);
+ } else {
+ state = balance_right((ErtsMonitorOrLink **) this);
+ }
+ }
+ return q;
+}
+
+void
+erts_delete_suspend_monitor(ErtsSuspendMonitor **root, Eterm pid)
+{
+ ErtsSuspendMonitor **tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int state = 0;
+ ErtsSuspendMonitor **this = root;
+ Sint c;
+ int dir;
+ ErtsSuspendMonitor *q = NULL;
+
+ dstack[0] = DIR_END;
+ for (;;) {
+ if (!*this) { /* Nothing found */
+ return;
+ } else if ((c = cmp(pid,(*this)->pid)) < 0) {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ this = &((*this)->left);
+ } else if (c > 0) { /* go right */
+ dstack[dpos++] = DIR_RIGHT;
+ tstack[tpos++] = this;
+ this = &((*this)->right);
+ } else { /* Equal key, found the one to delete */
+ q = (*this);
+ ASSERT(q->pid == pid);
+ if (q->right == NULL) {
+ (*this) = q->left;
+ state = 1;
+ } else if (q->left == NULL) {
+ (*this) = q->right;
+ state = 1;
+ } else {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = this;
+ state = delsub((ErtsMonitorOrLink **) this);
+ }
+ erts_destroy_suspend_monitor(q);
+ break;
+ }
+ }
+ while (state && ( dir = dstack[--dpos] ) != DIR_END) {
+ this = tstack[--tpos];
+ if (dir == DIR_LEFT) {
+ state = balance_left((ErtsMonitorOrLink **) this);
+ } else {
+ state = balance_right((ErtsMonitorOrLink **) this);
+ }
+ }
+}
+
+ErtsMonitor *erts_lookup_monitor(ErtsMonitor *root, Eterm ref)
+{
+ Sint c;
+
+ for (;;) {
+ if (root == NULL || (c = CMP_MON_REF(ref,root->ref)) == 0) {
+ return root;
+ } else if (c < 0) {
+ root = root->left;
+ } else { /* c > 0 */
+ root = root->right;
+ }
+ }
+}
+
+ErtsLink *erts_lookup_link(ErtsLink *root, Eterm pid)
+{
+ Sint c;
+
+ for (;;) {
+ if (root == NULL || (c = cmp(pid,root->pid)) == 0) {
+ return root;
+ } else if (c < 0) {
+ root = root->left;
+ } else { /* c > 0 */
+ root = root->right;
+ }
+ }
+}
+
+ErtsSuspendMonitor *
+erts_lookup_suspend_monitor(ErtsSuspendMonitor *root, Eterm pid)
+{
+ Sint c;
+
+ for (;;) {
+ if (root == NULL || (c = cmp(pid,root->pid)) == 0) {
+ return root;
+ } else if (c < 0) {
+ root = root->left;
+ } else { /* c > 0 */
+ root = root->right;
+ }
+ }
+}
+
+void erts_sweep_monitors(ErtsMonitor *root,
+ void (*doit)(ErtsMonitor *, void *),
+ void *context)
+{
+ ErtsMonitor *tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int dir;
+
+ dstack[0] = DIR_END;
+
+ for (;;) {
+ if (root == NULL) {
+ if ((dir = dstack[dpos-1]) == DIR_END) {
+ return;
+ }
+ if (dir == DIR_LEFT) {
+ /* Still has DIR_RIGHT to do */
+ dstack[dpos-1] = DIR_RIGHT;
+ root = (tstack[tpos-1])->right;
+ } else {
+ /* stacktop is an object to be deleted */
+ (*doit)(tstack[--tpos],context); /* expeted to do the
+ deletion */
+ --dpos;
+ root = NULL;
+ }
+ } else {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = root;
+ root = root->left;
+ }
+ }
+}
+
+void erts_sweep_links(ErtsLink *root,
+ void (*doit)(ErtsLink *, void *),
+ void *context)
+{
+ ErtsLink *tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int dir;
+
+ dstack[0] = DIR_END;
+
+ for (;;) {
+ if (root == NULL) {
+ if ((dir = dstack[dpos-1]) == DIR_END) {
+ return;
+ }
+ if (dir == DIR_LEFT) {
+ /* Still has DIR_RIGHT to do */
+ dstack[dpos-1] = DIR_RIGHT;
+ root = (tstack[tpos-1])->right;
+ } else {
+ /* stacktop is an object to be deleted */
+ (*doit)(tstack[--tpos],context); /* expeted to do the
+ deletion */
+ --dpos;
+ root = NULL;
+ }
+ } else {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = root;
+ root = root->left;
+ }
+ }
+}
+
+void erts_sweep_suspend_monitors(ErtsSuspendMonitor *root,
+ void (*doit)(ErtsSuspendMonitor *, void *),
+ void *context)
+{
+ ErtsSuspendMonitor *tstack[STACK_NEED];
+ int tpos = 0;
+ int dstack[STACK_NEED+1];
+ int dpos = 1;
+ int dir;
+
+ dstack[0] = DIR_END;
+
+ for (;;) {
+ if (root == NULL) {
+ if ((dir = dstack[dpos-1]) == DIR_END) {
+ return;
+ }
+ if (dir == DIR_LEFT) {
+ /* Still has DIR_RIGHT to do */
+ dstack[dpos-1] = DIR_RIGHT;
+ root = (tstack[tpos-1])->right;
+ } else {
+ /* stacktop is an object to be deleted */
+ (*doit)(tstack[--tpos],context); /* expeted to do the
+ deletion */
+ --dpos;
+ root = NULL;
+ }
+ } else {
+ dstack[dpos++] = DIR_LEFT;
+ tstack[tpos++] = root;
+ root = root->left;
+ }
+ }
+}
+
+
+/* Debug BIF, always present, but undocumented... */
+
+static void erts_dump_monitors(ErtsMonitor *root, int indent)
+{
+ if (root == NULL)
+ return;
+ erts_dump_monitors(root->right,indent+2);
+ erts_printf("%*s[%b16d:%b16u:%T:%T:%T]\n", indent, "", root->balance,
+ root->type, root->ref, root->pid, root->name);
+ erts_dump_monitors(root->left,indent+2);
+}
+
+static void erts_dump_links_aux(ErtsLink *root, int indent,
+ erts_dsprintf_buf_t *dsbufp)
+{
+ if (root == NULL)
+ return;
+ erts_dump_links_aux(root->right, indent+2, dsbufp);
+ dsbufp->str_len = 0;
+ erts_dsprintf(dsbufp, "%*s[%b16d:%b16u:%T:%p]", indent, "",
+ root->balance, root->type, root->pid, ERTS_LINK_ROOT(root));
+ if (ERTS_LINK_ROOT(root) != NULL) {
+ ErtsLink *sub = ERTS_LINK_ROOT(root);
+ int len = dsbufp->str_len;
+ erts_dump_links_aux(sub->right, indent+len+5, dsbufp);
+ erts_dsprintf(dsbufp, "-> %*s[%b16d:%b16u:%T:%p]", indent, "",
+ sub->balance, sub->type, sub->pid, ERTS_LINK_ROOT(sub));
+ erts_printf("%s\n", dsbufp->str);
+ erts_dump_links_aux(sub->left, indent+len+5, dsbufp);
+ } else {
+ erts_printf("%s\n", dsbufp->str);
+ }
+ erts_dump_links_aux(root->left, indent+2, dsbufp);
+}
+
+static void erts_dump_links(ErtsLink *root, int indent)
+{
+ erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
+ erts_dump_links_aux(root, indent, dsbufp);
+ erts_destroy_tmp_dsbuf(dsbufp);
+}
+
+Eterm erts_debug_dump_monitors_1(Process *p, Eterm pid)
+{
+ Process *rp;
+ DistEntry *dep;
+ rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK);
+ if (!rp) {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(p);
+ if (is_atom(pid) && is_node_name_atom(pid) &&
+ (dep = erts_find_dist_entry(pid)) != NULL) {
+ erts_printf("Dumping dist monitors-------------------\n");
+ erts_smp_de_links_lock(dep);
+ erts_dump_monitors(dep->monitors,0);
+ erts_smp_de_links_unlock(dep);
+ erts_printf("Monitors dumped-------------------------\n");
+ erts_deref_dist_entry(dep);
+ BIF_RET(am_true);
+ } else {
+ BIF_ERROR(p,BADARG);
+ }
+ } else {
+ erts_printf("Dumping pid monitors--------------------\n");
+ erts_dump_monitors(rp->monitors,0);
+ erts_printf("Monitors dumped-------------------------\n");
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ BIF_RET(am_true);
+ }
+}
+
+Eterm erts_debug_dump_links_1(Process *p, Eterm pid)
+{
+ Process *rp;
+ DistEntry *dep;
+ if (is_internal_port(pid)) {
+ Port *rport = erts_id2port(pid, p, ERTS_PROC_LOCK_MAIN);
+ if (rport) {
+ erts_printf("Dumping port links----------------------\n");
+ erts_dump_links(rport->nlinks,0);
+ erts_printf("Links dumped----------------------------\n");
+ erts_smp_port_unlock(rport);
+ BIF_RET(am_true);
+ } else {
+ BIF_ERROR(p,BADARG);
+ }
+ } else {
+ rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK);
+ if (!rp) {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(p);
+ if (is_atom(pid) && is_node_name_atom(pid) &&
+ (dep = erts_find_dist_entry(pid)) != NULL) {
+ erts_printf("Dumping dist links----------------------\n");
+ erts_smp_de_links_lock(dep);
+ erts_dump_links(dep->nlinks,0);
+ erts_smp_de_links_unlock(dep);
+ erts_printf("Links dumped----------------------------\n");
+ erts_deref_dist_entry(dep);
+ BIF_RET(am_true);
+ } else {
+ BIF_ERROR(p,BADARG);
+ }
+
+ } else {
+ erts_printf("Dumping pid links-----------------------\n");
+ erts_dump_links(rp->nlinks,0);
+ erts_printf("Links dumped----------------------------\n");
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ BIF_RET(am_true);
+ }
+ }
+}
diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h
new file mode 100644
index 0000000000..d3f6d410dd
--- /dev/null
+++ b/erts/emulator/beam/erl_monitors.h
@@ -0,0 +1,180 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/**********************************************************************
+ * Header for monitors and links data structures.
+ * Monitors are kept in an AVL tree and the data structures for
+ * the four different types of monitors are like this:
+ **********************************************************************
+ * Local monitor by pid/port:
+ * (Ref is always same in all involved data structures)
+ **********************************************************************
+ * Process/Port X Process Y
+ * +-------------+ +-------------+
+ * Type: | MON_ORIGIN | | MON_TARGET |
+ * +-------------+ +-------------+
+ * Pid: | Pid(Y) | | Pid/Port(X) |
+ * +-------------+ +-------------+
+ * Name: | [] | | [] |
+ * +-------------+ +-------------+
+ **********************************************************************
+ * Local monitor by name: (Ref is always same in all involved data structures)
+ **********************************************************************
+ * Process X Process Y (name foo)
+ * +-------------+ +-------------+
+ * Type: | MON_ORIGIN | | MON_TARGET |
+ * +-------------+ +-------------+
+ * Pid: | Pid(Y) | | Pid(X) |
+ * +-------------+ +-------------+
+ * Name: | Atom(foo) | | Atom(foo) |
+ * +-------------+ +-------------+
+ **********************************************************************
+ * Remote monitor by pid: (Ref is always same in all involved data structures)
+ **********************************************************************
+ * Node A | Node B
+ * ---------------------------------+----------------------------------
+ * Process X (@A) Distentry @A Distentry @B Process Y (@B)
+ * for node B for node A
+ * +-------------+ +-------------+ +-------------+ +-------------+
+ * Type: | MON_ORIGIN | | MON_TARGET | | MON_ORIGIN | | MON_TARGET |
+ * +-------------+ +-------------+ +-------------+ +-------------+
+ * Pid: | Pid(Y) | | Pid(X) | | Pid(Y) | | Pid(X) |
+ * +-------------+ +-------------+ +-------------+ +-------------+
+ * Name: | [] | | [] | | [] | | [] |
+ * +-------------+ +-------------+ +-------------+ +-------------+
+ **********************************************************************
+ * Remote monitor by name: (Ref is always same in all involved data structures)
+ **********************************************************************
+ * Node A | Node B
+ * ---------------------------------+----------------------------------
+ * Process X (@A) Distentry @A Distentry @B Process Y (@B)
+ * for node B for node A (name foo)
+ * +-------------+ +-------------+ +-------------+ +-------------+
+ * Type: | MON_ORIGIN | | MON_TARGET | | MON_ORIGIN | | MON_TARGET |
+ * +-------------+ +-------------+ +-------------+ +-------------+
+ * Pid: | Atom(node B)| | Pid(X) | | Pid(Y) | | Pid(X) |
+ * +-------------+ +-------------+ +-------------+ +-------------+
+ * Name: | Atom(foo) | | Atom(foo) | | Atom(foo) | | Atom(foo) |
+ * +-------------+ +-------------+ +-------------+ +-------------+
+ * The reason for the node atom in X->pid is that we don't know the actual
+ * pid of the monitored process on the other node when setting the monitor
+ * (which is done asyncronously).
+ **********************************************************************/
+#ifndef _ERL_MONITORS_H
+#define _ERL_MONITORS_H
+
+/* Type tags for monitors */
+#define MON_ORIGIN 1
+#define MON_TARGET 3
+
+/* Type tags for links */
+#define LINK_PID 1 /* ...Or port */
+#define LINK_NODE 3 /* "Node monitor" */
+
+/* Size of a monitor without heap, in words (fixalloc) */
+#define ERTS_MONITOR_SIZE ((sizeof(ErtsMonitor) - sizeof(Uint))/sizeof(Uint))
+#define ERTS_MONITOR_SH_SIZE (ERTS_MONITOR_SIZE + REF_THING_SIZE)
+#define ERTS_LINK_SIZE ((sizeof(ErtsLink) - sizeof(Uint))/sizeof(Uint))
+#define ERTS_LINK_SH_SIZE ERTS_LINK_SIZE /* Size of fix-alloced links */
+
+/* ErtsMonitor and ErtsLink *need* to begin in a similar way as
+ ErtsMonitorOrLink */
+typedef struct erts_monitor_or_link {
+ struct erts_monitor_or_link *left, *right;
+ Sint16 balance;
+} ErtsMonitorOrLink;
+
+typedef struct erts_monitor {
+ struct erts_monitor *left, *right;
+ Sint16 balance;
+ Uint16 type; /* MON_ORIGIN | MON_TARGET */
+ Eterm ref;
+ Eterm pid; /* In case of distributed named monitor, this is the
+ nodename atom in MON_ORIGIN process, otherwise a pid or
+ , in case of a MON_TARGET, a port */
+ Eterm name; /* When monitoring a named process: atom() else [] */
+ Uint heap[1]; /* Larger in reality */
+} ErtsMonitor;
+
+typedef struct erts_link {
+ struct erts_link *left, *right;
+ Sint16 balance;
+ Uint16 type; /* LINK_PID | LINK_NODE */
+ Eterm pid; /* When node monitor,
+ the node atom is here instead */
+ union {
+ struct erts_link *root; /* Used only in dist entries */
+ Uint refc;
+ } shared;
+ Uint heap[1]; /* Larger in reality */
+} ErtsLink;
+
+typedef struct erts_suspend_monitor {
+ struct erts_suspend_monitor *left, *right;
+ Sint16 balance;
+
+ int pending;
+ int active;
+ Eterm pid;
+} ErtsSuspendMonitor;
+
+#define ERTS_LINK_ROOT(Linkp) ((Linkp)->shared.root)
+#define ERTS_LINK_REFC(Linkp) ((Linkp)->shared.refc)
+
+#define ERTS_LINK_ROOT_AS_UINT(Linkp) (*((Uint *) &((Linkp)->root)))
+
+Uint erts_tot_link_lh_size(void);
+
+
+/* Prototypes */
+void erts_destroy_monitor(ErtsMonitor *mon);
+void erts_add_monitor(ErtsMonitor **root, Uint type, Eterm ref, Eterm pid,
+ Eterm name);
+ErtsMonitor *erts_remove_monitor(ErtsMonitor **root, Eterm ref);
+ErtsMonitor *erts_lookup_monitor(ErtsMonitor *root, Eterm ref);
+void erts_sweep_monitors(ErtsMonitor *root,
+ void (*doit)(ErtsMonitor *, void *),
+ void *context);
+
+void erts_destroy_link(ErtsLink *lnk);
+/* Returns 0 if OK, < 0 if already present */
+int erts_add_link(ErtsLink **root, Uint type, Eterm pid);
+ErtsLink *erts_add_or_lookup_link(ErtsLink **root, Uint type, Eterm pid);
+ErtsLink *erts_remove_link(ErtsLink **root, Eterm pid);
+ErtsLink *erts_lookup_link(ErtsLink *root, Eterm pid);
+void erts_sweep_links(ErtsLink *root,
+ void (*doit)(ErtsLink *, void *),
+ void *context);
+
+void erts_destroy_suspend_monitor(ErtsSuspendMonitor *sproc);
+void erts_sweep_suspend_monitors(ErtsSuspendMonitor *root,
+ void (*doit)(ErtsSuspendMonitor *, void *),
+ void *context);
+ErtsSuspendMonitor *erts_add_or_lookup_suspend_monitor(ErtsSuspendMonitor **root,
+ Eterm pid);
+ErtsSuspendMonitor *erts_lookup_suspend_monitor(ErtsSuspendMonitor *root,
+ Eterm pid);
+void erts_delete_suspend_monitor(ErtsSuspendMonitor **root, Eterm pid);
+void erts_init_monitors(void);
+
+#define erts_doforall_monitors erts_sweep_monitors
+#define erts_doforall_links erts_sweep_links
+#define erts_doforall_suspend_monitors erts_sweep_suspend_monitors
+
+#endif /* _ERL_MONITORS_H */
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
new file mode 100644
index 0000000000..8b8ac2ec80
--- /dev/null
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -0,0 +1,1240 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Memory allocation trace. The trace is sent over a
+ * tcp/ip connection.
+ *
+ * The trace format is not intended to be documented.
+ * Instead a library for parsing the trace will be
+ * distributed. This in order to more easily be able
+ * to make changes in the trace format. The library
+ * for parsing the trace is currently not included in
+ * the OTP distribution, but will be in the future.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+#include "erl_sock.h"
+#include "erl_threads.h"
+#include "erl_memory_trace_protocol.h"
+#include "erl_mtrace.h"
+
+#if defined(MAXHOSTNAMELEN) && MAXHOSTNAMELEN > 255
+# undef MAXHOSTNAMELEN
+#endif
+
+#ifndef MAXHOSTNAMELEN
+# define MAXHOSTNAMELEN 255
+#endif
+
+#define TRACE_PRINTOUTS 0
+#ifdef TRACE_PRINTOUTS
+#define MSB2BITS(X) ((((unsigned)(X))+1)*8)
+#endif
+
+static erts_mtx_t mtrace_op_mutex;
+static erts_mtx_t mtrace_buf_mutex;
+
+#define TRACE_BUF_SZ (16*1024)
+
+
+#define UI8_MSB_EHF_SZ ERTS_MT_UI8_MSB_EHDR_FLD_SZ
+#define UI16_MSB_EHF_SZ ERTS_MT_UI16_MSB_EHDR_FLD_SZ
+#define UI32_MSB_EHF_SZ ERTS_MT_UI32_MSB_EHDR_FLD_SZ
+#define UI64_MSB_EHF_SZ ERTS_MT_UI64_MSB_EHDR_FLD_SZ
+#define UI_MSB_EHF_SZ ERTS_MT_UI64_MSB_EHDR_FLD_SZ
+#define TAG_EHF_SZ ERTS_MT_TAG_EHDR_FLD_SZ
+
+#define UI8_MSB_EHF_MSK ERTS_MT_UI8_MSB_EHDR_FLD_MSK
+#define UI16_MSB_EHF_MSK ERTS_MT_UI16_MSB_EHDR_FLD_MSK
+#define UI32_MSB_EHF_MSK ERTS_MT_UI32_MSB_EHDR_FLD_MSK
+#define UI_MSB_EHF_MSK ERTS_MT_UI64_MSB_EHDR_FLD_MSK
+#define UI64_MSB_EHF_MSK ERTS_MT_UI64_MSB_EHDR_FLD_MSK
+#define TAG_EHF_MSK ERTS_MT_TAG_EHDR_FLD_MSK
+
+#define UI8_SZ (1)
+#define UI16_SZ (2)
+#define UI32_SZ (4)
+#define UI64_SZ (8)
+#ifdef ARCH_64
+# define UI_SZ UI64_SZ
+#else
+# define UI_SZ UI32_SZ
+#endif
+
+#define WRITE_UI8(P, V) (*(P) = (byte) ((V) & 0xff))
+
+#define WRITE_UI16(P, V) \
+ ((P)[0] = (byte) (((V) >> 8) & 0xff), \
+ (P)[1] = (byte) ( (V) & 0xff))
+
+#define WRITE_UI32(P, V) \
+ ((P)[0] = (byte) (((V) >> 24) & 0xff), \
+ (P)[1] = (byte) (((V) >> 16) & 0xff), \
+ (P)[2] = (byte) (((V) >> 8) & 0xff), \
+ (P)[3] = (byte) ( (V) & 0xff))
+
+#define WRITE_UI64(P, V) \
+ ((P)[0] = (byte) (((V) >> 56) & 0xff), \
+ (P)[1] = (byte) (((V) >> 48) & 0xff), \
+ (P)[2] = (byte) (((V) >> 40) & 0xff), \
+ (P)[3] = (byte) (((V) >> 32) & 0xff), \
+ (P)[4] = (byte) (((V) >> 24) & 0xff), \
+ (P)[5] = (byte) (((V) >> 16) & 0xff), \
+ (P)[6] = (byte) (((V) >> 8) & 0xff), \
+ (P)[7] = (byte) ( (V) & 0xff))
+
+#define PUT_UI8(P, V) (WRITE_UI8((P), (V)), (P) += UI8_SZ)
+#define PUT_UI16(P, V) (WRITE_UI16((P), (V)), (P) += UI16_SZ)
+#define PUT_UI32(P, V) (WRITE_UI32((P), (V)), (P) += UI32_SZ)
+#define PUT_UI64(P, V) (WRITE_UI64((P), (V)), (P) += UI64_SZ)
+
+#define PUT_VSZ_UI16(P, M, V) \
+do { \
+ Uint16 v__ = (Uint16) (V); \
+ if (v__ >= (((Uint16) 1) << 8)) (M) = 1; else (M) = 0; \
+ switch ((M)) { \
+ case 1: *((P)++) = (byte) ((v__ >> 8) & 0xff); \
+ case 0: *((P)++) = (byte) ( v__ & 0xff); \
+ } \
+} while (0)
+
+#define PUT_VSZ_UI32(P, M, V) \
+do { \
+ Uint32 v__ = (Uint32) (V); \
+ if (v__ >= (((Uint32) 1) << 16)) { \
+ if (v__ >= (((Uint32) 1) << 24)) (M) = 3; else (M) = 2; \
+ } else { \
+ if (v__ >= (((Uint32) 1) << 8)) (M) = 1; else (M) = 0; \
+ } \
+ switch ((M)) { \
+ case 3: *((P)++) = (byte) ((v__ >> 24) & 0xff); \
+ case 2: *((P)++) = (byte) ((v__ >> 16) & 0xff); \
+ case 1: *((P)++) = (byte) ((v__ >> 8) & 0xff); \
+ case 0: *((P)++) = (byte) ( v__ & 0xff); \
+ } \
+} while (0)
+
+#ifdef ARCH_64
+
+#define PUT_VSZ_UI64(P, M, V) \
+do { \
+ Uint64 v__ = (Uint64) (V); \
+ if (v__ >= (((Uint64) 1) << 32)) { \
+ if (v__ >= (((Uint64) 1) << 48)) { \
+ if (v__ >= (((Uint64) 1) << 56)) (M) = 7; else (M) = 6; \
+ } else { \
+ if (v__ >= (((Uint64) 1) << 40)) (M) = 5; else (M) = 4; \
+ } \
+ } else { \
+ if (v__ >= (((Uint64) 1) << 16)) { \
+ if (v__ >= (((Uint64) 1) << 24)) (M) = 3; else (M) = 2; \
+ } else { \
+ if (v__ >= (((Uint64) 1) << 8)) (M) = 1; else (M) = 0; \
+ } \
+ } \
+ switch ((M)) { \
+ case 7: *((P)++) = (byte) ((v__ >> 56) & 0xff); \
+ case 6: *((P)++) = (byte) ((v__ >> 48) & 0xff); \
+ case 5: *((P)++) = (byte) ((v__ >> 40) & 0xff); \
+ case 4: *((P)++) = (byte) ((v__ >> 32) & 0xff); \
+ case 3: *((P)++) = (byte) ((v__ >> 24) & 0xff); \
+ case 2: *((P)++) = (byte) ((v__ >> 16) & 0xff); \
+ case 1: *((P)++) = (byte) ((v__ >> 8) & 0xff); \
+ case 0: *((P)++) = (byte) ( v__ & 0xff); \
+ } \
+} while (0)
+
+#define PUT_VSZ_UI PUT_VSZ_UI64
+#else /* #ifdef ARCH_64 */
+#define PUT_VSZ_UI PUT_VSZ_UI32
+#endif /* #ifdef ARCH_64 */
+
+#define MAKE_TBUF_SZ(SZ) \
+ (TRACE_BUF_SZ < (SZ) \
+ ? (disable_trace(1, "Internal buffer overflow", 0), 0) \
+ : (endp - tracep < (SZ) ? send_trace_buffer() : 1))
+
+
+static void disable_trace(int error, char *reason, int eno);
+static int send_trace_buffer(void);
+
+#ifdef DEBUG
+void
+check_alloc_entry(byte *sp, byte *ep,
+ byte tag,
+ Uint16 ct_no, int ct_no_n,
+ Uint16 type, int type_n,
+ Uint res, int res_n,
+ Uint size, int size_n,
+ Uint32 ti,int ti_n);
+void
+check_realloc_entry(byte *sp, byte *ep,
+ byte tag,
+ Uint16 ct_no, int ct_no_n,
+ Uint16 type, int type_n,
+ Uint res, int res_n,
+ Uint ptr, int ptr_n,
+ Uint size, int size_n,
+ Uint32 ti,int ti_n);
+void
+check_free_entry(byte *sp, byte *ep,
+ byte tag,
+ Uint16 ct_no, int ct_no_n,
+ Uint16 t_no, int t_no_n,
+ Uint ptr, int ptr_n,
+ Uint32 ti,int ti_n);
+void
+check_time_inc_entry(byte *sp, byte *ep,
+ Uint32 secs, int secs_n,
+ Uint32 usecs, int usecs_n);
+#endif
+
+
+
+int erts_mtrace_enabled;
+static erts_sock_t socket_desc;
+static byte trace_buffer[TRACE_BUF_SZ];
+static byte *tracep;
+static byte *endp;
+static SysTimeval last_tv;
+
+#if ERTS_MTRACE_SEGMENT_ID >= ERTS_ALC_A_MIN || ERTS_MTRACE_SEGMENT_ID < 0
+#error ERTS_MTRACE_SEGMENT_ID >= ERTS_ALC_A_MIN || ERTS_MTRACE_SEGMENT_ID < 0
+#endif
+
+char* erl_errno_id(int error);
+
+#define INVALID_TIME_INC (0xffffffff)
+
+static ERTS_INLINE Uint32
+get_time_inc(void)
+{
+ Sint32 secs;
+ Sint32 usecs;
+ Uint32 res;
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+
+ secs = tv.tv_sec - last_tv.tv_sec;
+ if (tv.tv_usec >= last_tv.tv_usec)
+ usecs = tv.tv_usec - last_tv.tv_usec;
+ else {
+ secs--;
+ usecs = 1000000 + tv.tv_usec - last_tv.tv_usec;
+ }
+
+ ASSERT(0 <= usecs);
+ ASSERT(usecs < 1000000);
+
+ if (secs < 0) {
+ /* Clock stepped backwards; we pretend that no time has past. */
+ res = 0;
+ }
+ else if (secs < ERTS_MT_TIME_INC_SECS_MASK) {
+ res = ((((Uint32) secs) << ERTS_MT_TIME_INC_SECS_SHIFT)
+ | (((Uint32) usecs) << ERTS_MT_TIME_INC_USECS_SHIFT));
+ }
+ else {
+ /* Increment too large to fit in a 32-bit integer;
+ put a time inc entry in trace ... */
+ if (MAKE_TBUF_SZ(UI8_SZ + UI16_SZ + 2*UI32_SZ)) {
+ byte *hdrp;
+ Uint16 hdr;
+ int secs_n, usecs_n;
+
+ *(tracep++) = ERTS_MT_TIME_INC_BDY_TAG;
+
+ hdrp = tracep;
+ tracep += 2;
+
+ PUT_VSZ_UI32(tracep, secs_n, secs);
+ PUT_VSZ_UI32(tracep, usecs_n, usecs);
+
+ hdr = usecs_n;
+
+ hdr <<= UI32_MSB_EHF_SZ;
+ hdr |= secs_n;
+
+ WRITE_UI16(hdrp, hdr);
+#ifdef DEBUG
+ check_time_inc_entry(hdrp-1, tracep,
+ (Uint32) secs, secs_n,
+ (Uint32) usecs, usecs_n);
+#endif
+ res = 0;
+ }
+ else {
+ res = INVALID_TIME_INC;
+ }
+ }
+
+ last_tv = tv;
+ return res;
+}
+
+
+static void
+disable_trace(int error, char *reason, int eno)
+{
+ char *mt_dis = "Memory trace disabled";
+ char *eno_str;
+
+ erts_mtrace_enabled = 0;
+ erts_sock_close(socket_desc);
+ socket_desc = ERTS_SOCK_INVALID_SOCKET;
+
+ if (eno == 0)
+ erts_fprintf(stderr, "%s: %s\n", mt_dis, reason);
+ else {
+ eno_str = erl_errno_id(eno);
+ if (strcmp(eno_str, "unknown") == 0)
+ erts_fprintf(stderr, "%s: %s: %d\n", mt_dis, reason, eno);
+ else
+ erts_fprintf(stderr, "%s: %s: %s\n", mt_dis, reason, eno_str);
+ }
+}
+
+static int
+send_trace_buffer(void)
+{
+ ssize_t ssz;
+ size_t sz;
+
+ sz = tracep - trace_buffer;
+ tracep = trace_buffer;
+
+ do {
+ ssz = erts_sock_send(socket_desc, (void *) tracep, sz);
+ if (ssz < 0) {
+ int socket_errno = erts_sock_errno();
+
+#ifdef EINTR
+ if (socket_errno == EINTR)
+ continue;
+#endif
+ disable_trace(0, "Connection lost", socket_errno);
+ return 0;
+ }
+ if (ssz > sz) {
+ disable_trace(1, "Unexpected error", 0);
+ return 0;
+ }
+ tracep += ssz;
+ sz -= ssz;
+ } while (sz);
+
+ tracep = trace_buffer;
+ return 1;
+}
+
+#if ERTS_ALC_N_MAX >= (1 << 16)
+#error "Excessively large type numbers"
+#endif
+
+
+static int
+write_trace_header(char *nodename, char *pid, char *hostname)
+{
+#ifdef DEBUG
+ byte *startp;
+#endif
+ Uint16 entry_sz;
+ Uint32 flags, n_len, h_len, p_len, hdr_prolog_len;
+ int i, no, str_len;
+ const char *str;
+ struct {
+ Uint32 gsec;
+ Uint32 sec;
+ Uint32 usec;
+ } start_time;
+
+ sys_gettimeofday(&last_tv);
+
+ start_time.gsec = (Uint32) (last_tv.tv_sec / 1000000000);
+ start_time.sec = (Uint32) (last_tv.tv_sec % 1000000000);
+ start_time.usec = (Uint32) last_tv.tv_usec;
+
+ if (!MAKE_TBUF_SZ(3*UI32_SZ))
+ return 0;
+
+ flags = 0;
+#ifdef ARCH_64
+ flags |= ERTS_MT_64_BIT_FLAG;
+#endif
+ flags |= ERTS_MT_CRR_INFO;
+#ifdef ERTS_CAN_TRACK_MALLOC
+ flags |= ERTS_MT_SEG_CRR_INFO;
+#endif
+
+ /*
+ * The following 3 ui32 words *always* have to come
+ * first in the trace.
+ */
+ PUT_UI32(tracep, ERTS_MT_START_WORD);
+ PUT_UI32(tracep, ERTS_MT_MAJOR_VSN);
+ PUT_UI32(tracep, ERTS_MT_MINOR_VSN);
+
+ n_len = strlen(nodename);
+ h_len = strlen(hostname);
+ p_len = strlen(pid);
+ hdr_prolog_len = (2*UI32_SZ
+ + 3*UI16_SZ
+ + 3*UI32_SZ
+ + 3*UI8_SZ
+ + n_len
+ + h_len
+ + p_len);
+
+ if (!MAKE_TBUF_SZ(hdr_prolog_len))
+ return 0;
+
+ /*
+ * New stuff can be added at the end the of header prolog
+ * (EOHP). The reader should skip stuff at the end, that it
+ * doesn't understand.
+ */
+
+#ifdef DEBUG
+ startp = tracep;
+#endif
+
+ PUT_UI32(tracep, hdr_prolog_len);
+ PUT_UI32(tracep, flags);
+ PUT_UI16(tracep, ERTS_MTRACE_SEGMENT_ID);
+ PUT_UI16(tracep, ERTS_ALC_A_MAX);
+ PUT_UI16(tracep, ERTS_ALC_N_MAX);
+
+ PUT_UI32(tracep, start_time.gsec);
+ PUT_UI32(tracep, start_time.sec);
+ PUT_UI32(tracep, start_time.usec);
+
+ PUT_UI8(tracep, (byte) n_len);
+ memcpy((void *) tracep, (void *) nodename, n_len);
+ tracep += n_len;
+
+ PUT_UI8(tracep, (byte) h_len);
+ memcpy((void *) tracep, (void *) hostname, h_len);
+ tracep += h_len;
+
+ PUT_UI8(tracep, (byte) p_len);
+ memcpy((void *) tracep, (void *) pid, p_len);
+ tracep += p_len;
+
+ ASSERT(startp + hdr_prolog_len == tracep);
+
+ /*
+ * EOHP
+ */
+
+ /*
+ * All tags from here on should be followed by an Uint16 size
+ * field containing the total size of the entry.
+ *
+ * New stuff can eigther be added at the end of an entry, or
+ * as a new tagged entry. The reader should skip stuff at the
+ * end, that it doesn't understand.
+ */
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ Uint16 aflags = 0;
+
+#ifndef ERTS_CAN_TRACK_MALLOC
+ if (i != ERTS_ALC_A_SYSTEM)
+#endif
+ aflags |= ERTS_MT_ALLCTR_USD_CRR_INFO;
+
+ str = ERTS_ALC_A2AD(i);
+ ASSERT(str);
+ str_len = strlen(str);
+ if (str_len >= (1 << 8)) {
+ disable_trace(1, "Excessively large allocator string", 0);
+ return 0;
+ }
+
+ entry_sz = UI8_SZ + 3*UI16_SZ + UI8_SZ;
+ entry_sz += (erts_allctrs_info[i].alloc_util ? 2 : 1)*UI16_SZ;
+ entry_sz += UI8_SZ + str_len;
+
+ if (!MAKE_TBUF_SZ(entry_sz))
+ return 0;
+
+#ifdef DEBUG
+ startp = tracep;
+#endif
+ PUT_UI8(tracep, ERTS_MT_ALLOCATOR_HDR_TAG);
+ PUT_UI16(tracep, entry_sz);
+ PUT_UI16(tracep, aflags);
+ PUT_UI16(tracep, (Uint16) i);
+ PUT_UI8( tracep, (byte) str_len);
+ memcpy((void *) tracep, (void *) str, str_len);
+ tracep += str_len;
+ if (erts_allctrs_info[i].alloc_util) {
+ PUT_UI8(tracep, 2);
+ PUT_UI16(tracep, ERTS_MTRACE_SEGMENT_ID);
+ PUT_UI16(tracep, ERTS_ALC_A_SYSTEM);
+ }
+ else {
+ PUT_UI8(tracep, 1);
+ switch (i) {
+ case ERTS_ALC_A_SYSTEM:
+ PUT_UI16(tracep, ERTS_MTRACE_SEGMENT_ID);
+ break;
+ case ERTS_ALC_A_FIXED_SIZE:
+ if (erts_allctrs_info[ERTS_FIX_CORE_ALLOCATOR].enabled)
+ PUT_UI16(tracep, ERTS_FIX_CORE_ALLOCATOR);
+ else
+ PUT_UI16(tracep, ERTS_ALC_A_SYSTEM);
+ break;
+ default:
+ PUT_UI16(tracep, ERTS_MTRACE_SEGMENT_ID);
+ break;
+ }
+ }
+ ASSERT(startp + entry_sz == tracep);
+ }
+
+ for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
+ Uint16 nflags = 0;
+ str = ERTS_ALC_N2TD(i);
+ ASSERT(str);
+
+ str_len = strlen(str);
+ if (str_len >= (1 << 8)) {
+ disable_trace(1, "Excessively large type string", 0);
+ return 0;
+ }
+
+ no = ERTS_ALC_T2A(ERTS_ALC_N2T(i));
+ if (!erts_allctrs_info[no].enabled)
+ no = ERTS_ALC_A_SYSTEM;
+ ASSERT(ERTS_ALC_A_MIN <= no && no <= ERTS_ALC_A_MAX);
+
+ entry_sz = UI8_SZ + 3*UI16_SZ + UI8_SZ + str_len + UI16_SZ;
+
+ if (!MAKE_TBUF_SZ(entry_sz))
+ return 0;
+
+#ifdef DEBUG
+ startp = tracep;
+#endif
+ PUT_UI8(tracep, ERTS_MT_BLOCK_TYPE_HDR_TAG);
+ PUT_UI16(tracep, entry_sz);
+ PUT_UI16(tracep, nflags);
+ PUT_UI16(tracep, (Uint16) i);
+ PUT_UI8(tracep, (byte) str_len);
+ memcpy((void *) tracep, (void *) str, str_len);
+ tracep += str_len;
+ PUT_UI16(tracep, no);
+ ASSERT(startp + entry_sz == tracep);
+ }
+
+ entry_sz = UI8_SZ + UI16_SZ;
+ if (!MAKE_TBUF_SZ(entry_sz))
+ return 0;
+ PUT_UI8(tracep, ERTS_MT_END_OF_HDR_TAG);
+ PUT_UI16(tracep, entry_sz);
+
+ return 1;
+}
+
+static void *mtrace_alloc(ErtsAlcType_t, void *, Uint);
+static void *mtrace_realloc(ErtsAlcType_t, void *, void *, Uint);
+static void mtrace_free(ErtsAlcType_t, void *, void *);
+
+static ErtsAllocatorFunctions_t real_allctrs[ERTS_ALC_A_MAX+1];
+
+void erts_mtrace_pre_init(void)
+{
+}
+
+void erts_mtrace_init(char *receiver, char *nodename)
+{
+ char hostname[MAXHOSTNAMELEN];
+ char pid[21]; /* enough for a 64 bit number */
+
+ socket_desc = ERTS_SOCK_INVALID_SOCKET;
+ erts_mtrace_enabled = receiver != NULL;
+
+ if (erts_mtrace_enabled) {
+ unsigned a, b, c, d, p;
+ byte ip_addr[4];
+ Uint16 port;
+
+ erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
+ erts_mtx_set_forksafe(&mtrace_buf_mutex);
+ erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
+ erts_mtx_set_forksafe(&mtrace_op_mutex);
+
+ socket_desc = erts_sock_open();
+ if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
+ disable_trace(1, "Failed to open socket", erts_sock_errno());
+ return;
+ }
+
+ if (5 != sscanf(receiver, "%u.%u.%u.%u:%u", &a, &b, &c, &d, &p)
+ || a >= (1 << 8) || b >= (1 << 8)|| c >= (1 << 8) || d >= (1 << 8)
+ || p >= (1 << 16)) {
+ disable_trace(1, "Invalid receiver address", 0);
+ return;
+ }
+
+ ip_addr[0] = (byte) a;
+ ip_addr[1] = (byte) b;
+ ip_addr[2] = (byte) c;
+ ip_addr[3] = (byte) d;
+
+ port = (Uint16) p;
+
+ if (!erts_sock_connect(socket_desc, ip_addr, 4, port)) {
+ disable_trace(1, "Failed to connect to receiver",
+ erts_sock_errno());
+ return;
+ }
+ tracep = trace_buffer;
+ endp = trace_buffer + TRACE_BUF_SZ;
+ if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN) != 0)
+ hostname[0] = '\0';
+ hostname[MAXHOSTNAMELEN-1] = '\0';
+ sys_get_pid(pid);
+ write_trace_header(nodename ? nodename : "", pid, hostname);
+ erts_mtrace_update_heap_size();
+ }
+}
+
+void
+erts_mtrace_install_wrapper_functions(void)
+{
+ if (erts_mtrace_enabled) {
+ int i;
+ /* Install trace functions */
+ ASSERT(sizeof(erts_allctrs) == sizeof(real_allctrs));
+
+ sys_memcpy((void *) real_allctrs,
+ (void *) erts_allctrs,
+ sizeof(erts_allctrs));
+
+ for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
+ erts_allctrs[i].alloc = mtrace_alloc;
+ erts_allctrs[i].realloc = mtrace_realloc;
+ erts_allctrs[i].free = mtrace_free;
+ erts_allctrs[i].extra = (void *) &real_allctrs[i];
+ }
+ }
+}
+
+void
+erts_mtrace_stop(void)
+{
+ erts_mtx_lock(&mtrace_op_mutex);
+ erts_mtx_lock(&mtrace_buf_mutex);
+ if (erts_mtrace_enabled) {
+ Uint32 ti = get_time_inc();
+
+ if (ti != INVALID_TIME_INC
+ && MAKE_TBUF_SZ(UI8_SZ + UI16_SZ + UI32_SZ)) {
+ byte *hdrp;
+ Uint16 hdr;
+ int ti_n;
+
+ *(tracep++) = ERTS_MT_STOP_BDY_TAG;
+
+ hdrp = tracep;
+ tracep += 2;
+
+ PUT_VSZ_UI32(tracep, ti_n, ti);
+
+ hdr = ti_n;
+
+ WRITE_UI16(hdrp, hdr);
+
+ if(send_trace_buffer()) {
+ erts_mtrace_enabled = 0;
+ erts_sock_close(socket_desc);
+ socket_desc = ERTS_SOCK_INVALID_SOCKET;
+ }
+ }
+ }
+ erts_mtx_unlock(&mtrace_buf_mutex);
+ erts_mtx_unlock(&mtrace_op_mutex);
+}
+
+void
+erts_mtrace_exit(Uint32 exit_value)
+{
+ erts_mtx_lock(&mtrace_op_mutex);
+ erts_mtx_lock(&mtrace_buf_mutex);
+ if (erts_mtrace_enabled) {
+ Uint32 ti = get_time_inc();
+
+ if (ti != INVALID_TIME_INC
+ && MAKE_TBUF_SZ(UI8_SZ + UI16_SZ + 2*UI32_SZ)) {
+ byte *hdrp;
+ Uint16 hdr;
+ int ti_n, exit_value_n;
+
+ *(tracep++) = ERTS_MT_EXIT_BDY_TAG;
+
+ hdrp = tracep;
+ tracep += 2;
+
+ PUT_VSZ_UI32(tracep, exit_value_n, exit_value);
+ PUT_VSZ_UI32(tracep, ti_n, ti);
+
+ hdr = ti_n;
+
+ hdr <<= UI32_MSB_EHF_SZ;
+ hdr |= exit_value_n;
+
+ WRITE_UI16(hdrp, hdr);
+
+ if(send_trace_buffer()) {
+ erts_mtrace_enabled = 0;
+ erts_sock_close(socket_desc);
+ socket_desc = ERTS_SOCK_INVALID_SOCKET;
+ }
+ }
+ }
+ erts_mtx_unlock(&mtrace_buf_mutex);
+ erts_mtx_unlock(&mtrace_op_mutex);
+}
+
+static ERTS_INLINE void
+write_alloc_entry(byte tag,
+ void *res,
+ ErtsAlcType_t x,
+ ErtsAlcType_t y,
+ Uint size)
+{
+ erts_mtx_lock(&mtrace_buf_mutex);
+ if (erts_mtrace_enabled) {
+ Uint32 ti = get_time_inc();
+
+ if (ti != INVALID_TIME_INC
+ && MAKE_TBUF_SZ(UI8_SZ + 2*UI16_SZ + 2*UI_SZ + UI32_SZ)) {
+ Uint16 hdr, t_no = (Uint16) x, ct_no = (Uint16) y;
+ byte *hdrp;
+ int t_no_n, ct_no_n = 0, res_n, size_n, ti_n;
+
+ *(tracep++) = tag;
+
+ hdrp = tracep;
+ tracep += 2;
+
+ if (tag == ERTS_MT_CRR_ALLOC_BDY_TAG) {
+ PUT_VSZ_UI16(tracep, ct_no_n, ct_no);
+ }
+ PUT_VSZ_UI16(tracep, t_no_n, t_no);
+ PUT_VSZ_UI( tracep, res_n, res);
+ PUT_VSZ_UI( tracep, size_n, size);
+ PUT_VSZ_UI32(tracep, ti_n, ti);
+
+ hdr = ti_n;
+
+ hdr <<= UI_MSB_EHF_SZ;
+ hdr |= size_n;
+
+ hdr <<= UI_MSB_EHF_SZ;
+ hdr |= res_n;
+
+ hdr <<= UI16_MSB_EHF_SZ;
+ hdr |= t_no_n;
+
+ if (tag == ERTS_MT_CRR_ALLOC_BDY_TAG) {
+ hdr <<= UI16_MSB_EHF_SZ;
+ hdr |= ct_no_n;
+ }
+
+ WRITE_UI16(hdrp, hdr);
+
+#if TRACE_PRINTOUTS
+ print_trace_entry(tag,
+ ct_no, ct_no_n,
+ t_no, t_no_n,
+ (Uint) res, res_n,
+ 0, 0,
+ size, size_n,
+ ti, ti_n);
+#endif
+
+#ifdef DEBUG
+ check_alloc_entry(hdrp-1, tracep,
+ tag,
+ ct_no, ct_no_n,
+ t_no, t_no_n,
+ (Uint) res, res_n,
+ size, size_n,
+ ti, ti_n);
+#endif
+
+ }
+
+ }
+ erts_mtx_unlock(&mtrace_buf_mutex);
+
+}
+
+static ERTS_INLINE void
+write_realloc_entry(byte tag,
+ void *res,
+ ErtsAlcType_t x,
+ ErtsAlcType_t y,
+ void *ptr,
+ Uint size)
+{
+ erts_mtx_lock(&mtrace_buf_mutex);
+ if (erts_mtrace_enabled) {
+ Uint32 ti = get_time_inc();
+
+ if (ti != INVALID_TIME_INC
+ && MAKE_TBUF_SZ(UI8_SZ + 2*UI16_SZ + 3*UI_SZ + UI32_SZ)) {
+ Uint16 hdr, t_no = (Uint16) x, ct_no = (Uint16) y;
+ byte *hdrp;
+ int t_no_n, ct_no_n = 0, res_n, ptr_n, size_n, ti_n;
+
+ *(tracep++) = tag;
+
+ hdrp = tracep;
+ tracep += 2;
+
+ if (tag == ERTS_MT_CRR_REALLOC_BDY_TAG) {
+ PUT_VSZ_UI16(tracep, ct_no_n, ct_no);
+ }
+ PUT_VSZ_UI16(tracep, t_no_n, t_no);
+ PUT_VSZ_UI( tracep, res_n, res);
+ PUT_VSZ_UI( tracep, ptr_n, ptr);
+ PUT_VSZ_UI( tracep, size_n, size);
+ PUT_VSZ_UI32(tracep, ti_n, ti);
+
+ hdr = ti_n;
+
+ hdr <<= UI_MSB_EHF_SZ;
+ hdr |= size_n;
+
+ hdr <<= UI_MSB_EHF_SZ;
+ hdr |= ptr_n;
+
+ hdr <<= UI_MSB_EHF_SZ;
+ hdr |= res_n;
+
+ hdr <<= UI16_MSB_EHF_SZ;
+ hdr |= t_no_n;
+
+ if (tag == ERTS_MT_CRR_REALLOC_BDY_TAG) {
+ hdr <<= UI16_MSB_EHF_SZ;
+ hdr |= ct_no_n;
+ }
+
+ WRITE_UI16(hdrp, hdr);
+
+#if TRACE_PRINTOUTS
+ print_trace_entry(tag,
+ ct_no, ct_no_n,
+ t_no, t_no_n,
+ (Uint) res, res_n,
+ (Uint) ptr, ptr_n,
+ size, size_n,
+ ti, ti_n);
+#endif
+
+#ifdef DEBUG
+ check_realloc_entry(hdrp-1, tracep,
+ tag,
+ ct_no, ct_no_n,
+ t_no, t_no_n,
+ (Uint) res, res_n,
+ (Uint) ptr, ptr_n,
+ size, size_n,
+ ti, ti_n);
+#endif
+
+ }
+ }
+ erts_mtx_unlock(&mtrace_buf_mutex);
+}
+
+static ERTS_INLINE void
+write_free_entry(byte tag,
+ ErtsAlcType_t x,
+ ErtsAlcType_t y,
+ void *ptr)
+{
+ erts_mtx_lock(&mtrace_buf_mutex);
+ if (erts_mtrace_enabled) {
+ Uint32 ti = get_time_inc();
+
+ if (ti != INVALID_TIME_INC
+ && MAKE_TBUF_SZ(UI8_SZ + 2*UI16_SZ + UI_SZ + UI32_SZ)) {
+ Uint16 hdr, t_no = (Uint16) x, ct_no = (Uint16) y;
+ byte *hdrp;
+ int t_no_n, ct_no_n = 0, ptr_n, ti_n;
+
+ *(tracep++) = tag;
+
+ hdrp = tracep;
+ tracep += 2;
+
+ if (tag == ERTS_MT_CRR_FREE_BDY_TAG) {
+ PUT_VSZ_UI16(tracep, ct_no_n, ct_no);
+ }
+ PUT_VSZ_UI16(tracep, t_no_n, t_no);
+ PUT_VSZ_UI( tracep, ptr_n, ptr);
+ PUT_VSZ_UI32(tracep, ti_n, ti);
+
+ hdr = ti_n;
+
+ hdr <<= UI_MSB_EHF_SZ;
+ hdr |= ptr_n;
+
+ hdr <<= UI16_MSB_EHF_SZ;
+ hdr |= t_no_n;
+
+ if (tag == ERTS_MT_CRR_FREE_BDY_TAG) {
+ hdr <<= UI16_MSB_EHF_SZ;
+ hdr |= ct_no_n;
+ }
+
+ WRITE_UI16(hdrp, hdr);
+
+#if TRACE_PRINTOUTS
+ print_trace_entry(tag,
+ ct_no, ct_no_n,
+ t_no, t_no_n,
+ (Uint) 0, 0,
+ (Uint) ptr, ptr_n,
+ 0, 0,
+ ti, ti_n);
+#endif
+
+#ifdef DEBUG
+ check_free_entry(hdrp-1, tracep,
+ tag,
+ ct_no, ct_no_n,
+ t_no, t_no_n,
+ (Uint) ptr, ptr_n,
+ ti, ti_n);
+#endif
+ }
+
+ }
+ erts_mtx_unlock(&mtrace_buf_mutex);
+}
+
+static void *
+mtrace_alloc(ErtsAlcType_t n, void *extra, Uint size)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ void *res;
+
+ erts_mtx_lock(&mtrace_op_mutex);
+
+ res = (*real_af->alloc)(n, real_af->extra, size);
+ write_alloc_entry(ERTS_MT_ALLOC_BDY_TAG, res, n, 0, size);
+
+ erts_mtx_unlock(&mtrace_op_mutex);
+
+ return res;
+}
+
+static void *
+mtrace_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+ void *res;
+
+ erts_mtx_lock(&mtrace_op_mutex);
+
+ res = (*real_af->realloc)(n, real_af->extra, ptr, size);
+ write_realloc_entry(ERTS_MT_REALLOC_BDY_TAG, res, n, 0, ptr, size);
+
+ erts_mtx_unlock(&mtrace_op_mutex);
+
+ return res;
+
+}
+
+static void
+mtrace_free(ErtsAlcType_t n, void *extra, void *ptr)
+{
+ ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
+
+ erts_mtx_lock(&mtrace_op_mutex);
+
+ (*real_af->free)(n, real_af->extra, ptr);
+ write_free_entry(ERTS_MT_FREE_BDY_TAG, n, 0, ptr);
+
+ erts_mtx_unlock(&mtrace_op_mutex);
+}
+
+
+void
+erts_mtrace_crr_alloc(void *res, ErtsAlcType_t n, ErtsAlcType_t m, Uint size)
+{
+ write_alloc_entry(ERTS_MT_CRR_ALLOC_BDY_TAG, res, n, m, size);
+}
+
+void
+erts_mtrace_crr_realloc(void *res, ErtsAlcType_t n, ErtsAlcType_t m, void *ptr,
+ Uint size)
+{
+ write_realloc_entry(ERTS_MT_CRR_REALLOC_BDY_TAG, res, n, m, ptr, size);
+}
+
+void
+erts_mtrace_crr_free(ErtsAlcType_t n, ErtsAlcType_t m, void *ptr)
+{
+ write_free_entry(ERTS_MT_CRR_FREE_BDY_TAG, n, m, ptr);
+}
+
+
+#if TRACE_PRINTOUTS
+static void
+print_trace_entry(byte tag,
+ Uint16 t_no, int t_no_n,
+ Uint16 ct_no, int ct_no_n,
+ Uint res, int res_n,
+ Uint ptr, int ptr_n,
+ Uint size, int size_n,
+ Uint32 ti,int ti_n)
+{
+ switch (tag) {
+ case ERTS_MT_ALLOC_BDY_TAG:
+ fprintf(stderr,
+ "{alloc, {%lu, %lu, %lu}, {%u, %u, %u, %u}}\n\r",
+
+ (unsigned long) t_no, (unsigned long) res,
+ (unsigned long) size,
+
+ MSB2BITS(t_no_n), MSB2BITS(res_n),
+ MSB2BITS(size_n), MSB2BITS(ti_n));
+ break;
+ case ERTS_MT_REALLOC_BDY_TAG:
+ fprintf(stderr,
+ "{realloc, {%lu, %lu, %lu, %lu}, {%u, %u, %u, %u, %u}}\n\r",
+
+ (unsigned long) t_no, (unsigned long) res,
+ (unsigned long) ptr, (unsigned long) size,
+
+ MSB2BITS(t_no_n), MSB2BITS(res_n),
+ MSB2BITS(ptr_n), MSB2BITS(size_n), MSB2BITS(ti_n));
+ break;
+ case ERTS_MT_FREE_BDY_TAG:
+ fprintf(stderr,
+ "{free, {%lu, %lu}, {%u, %u, %u, %u, %u}}\n\r",
+
+ (unsigned long) t_no, (unsigned long) ptr,
+
+ MSB2BITS(t_no_n), MSB2BITS(ptr_n), MSB2BITS(ti_n));
+ break;
+ case ERTS_MT_CRR_ALLOC_BDY_TAG:
+ fprintf(stderr,
+ "{crr_alloc, {%lu, %lu, %lu, %lu}, {%u, %u, %u, %u, %u}}\n\r",
+
+ (unsigned long) ct_no, (unsigned long) t_no,
+ (unsigned long) res, (unsigned long) size,
+
+ MSB2BITS(ct_no_n), MSB2BITS(t_no_n),
+ MSB2BITS(res_n), MSB2BITS(size_n),
+ MSB2BITS(ti_n));
+ break;
+ case ERTS_MT_CRR_REALLOC_BDY_TAG:
+ fprintf(stderr,
+ "{crr_realloc, {%lu, %lu, %lu, %lu, %lu}, "
+ "{%u, %u, %u, %u, %u, %u}}\n\r",
+
+ (unsigned long) ct_no, (unsigned long) t_no,
+ (unsigned long) res, (unsigned long) ptr,
+ (unsigned long) size,
+
+ MSB2BITS(ct_no_n), MSB2BITS(t_no_n),
+ MSB2BITS(res_n), MSB2BITS(ptr_n),
+ MSB2BITS(size_n), MSB2BITS(ti_n));
+ break;
+ case ERTS_MT_CRR_FREE_BDY_TAG:
+ fprintf(stderr,
+ "{crr_free, {%lu, %lu, %lu}, {%u, %u, %u, %u}}\n\r",
+
+ (unsigned long) ct_no, (unsigned long) t_no,
+ (unsigned long) ptr,
+
+ MSB2BITS(ct_no_n), MSB2BITS(t_no_n),
+ MSB2BITS(ptr_n), MSB2BITS(ti_n));
+ break;
+ default:
+ fprintf(stderr, "{'\?\?\?'}\n\r");
+ break;
+ }
+}
+
+#endif /* #if TRACE_PRINTOUTS */
+
+#ifdef DEBUG
+
+#define GET_UI16(P) ((P) += UI16_SZ, \
+ (((Uint16) (*((P) - 2) << 8)) | ((Uint16) (*((P) - 1)))))
+
+static void
+check_ui(Uint16 *hdrp, byte **pp, Uint ui, int msb,
+ Uint16 f_mask, Uint16 f_size)
+{
+ Uint x;
+ int n;
+
+ ASSERT((msb & ~f_mask) == 0);
+
+ n = (int) (*hdrp & f_mask);
+
+ ASSERT(n == msb);
+
+ *hdrp >>= f_size;
+
+ x = 0;
+ switch (n) {
+#ifdef ARCH_64
+ case 7: x |= *((*pp)++); x <<= 8;
+ case 6: x |= *((*pp)++); x <<= 8;
+ case 5: x |= *((*pp)++); x <<= 8;
+ case 4: x |= *((*pp)++); x <<= 8;
+#endif
+ case 3: x |= *((*pp)++); x <<= 8;
+ case 2: x |= *((*pp)++); x <<= 8;
+ case 1: x |= *((*pp)++); x <<= 8;
+ case 0: x |= *((*pp)++); break;
+ default: ASSERT(0);
+ }
+
+ ASSERT(x == ui);
+}
+
+
+void
+check_alloc_entry(byte *sp, byte *ep,
+ byte tag,
+ Uint16 ct_no, int ct_no_n,
+ Uint16 t_no, int t_no_n,
+ Uint res, int res_n,
+ Uint size, int size_n,
+ Uint32 ti,int ti_n)
+{
+ byte *p = sp;
+ Uint16 hdr;
+
+ ASSERT(*p == tag);
+ p++;
+
+ hdr = GET_UI16(p);
+
+ if (tag == ERTS_MT_CRR_ALLOC_BDY_TAG)
+ check_ui(&hdr, &p, ct_no, ct_no_n, UI16_MSB_EHF_MSK, UI16_MSB_EHF_SZ);
+ check_ui(&hdr, &p, t_no, t_no_n, UI16_MSB_EHF_MSK, UI16_MSB_EHF_SZ);
+ check_ui(&hdr, &p, res, res_n, UI_MSB_EHF_MSK, UI_MSB_EHF_SZ);
+ check_ui(&hdr, &p, size, size_n, UI_MSB_EHF_MSK, UI_MSB_EHF_SZ);
+ check_ui(&hdr, &p, ti, ti_n, UI32_MSB_EHF_MSK, UI32_MSB_EHF_SZ);
+
+ ASSERT(hdr == 0);
+ ASSERT(p == ep);
+}
+
+void
+check_realloc_entry(byte *sp, byte *ep,
+ byte tag,
+ Uint16 ct_no, int ct_no_n,
+ Uint16 t_no, int t_no_n,
+ Uint res, int res_n,
+ Uint ptr, int ptr_n,
+ Uint size, int size_n,
+ Uint32 ti,int ti_n)
+{
+ byte *p = sp;
+ Uint16 hdr;
+
+ ASSERT(*p == tag);
+ p++;
+
+ hdr = GET_UI16(p);
+
+ if (tag == ERTS_MT_CRR_REALLOC_BDY_TAG)
+ check_ui(&hdr, &p, ct_no, ct_no_n, UI16_MSB_EHF_MSK, UI16_MSB_EHF_SZ);
+ check_ui(&hdr, &p, t_no, t_no_n, UI16_MSB_EHF_MSK, UI16_MSB_EHF_SZ);
+ check_ui(&hdr, &p, res, res_n, UI_MSB_EHF_MSK, UI_MSB_EHF_SZ);
+ check_ui(&hdr, &p, ptr, ptr_n, UI_MSB_EHF_MSK, UI_MSB_EHF_SZ);
+ check_ui(&hdr, &p, size, size_n, UI_MSB_EHF_MSK, UI_MSB_EHF_SZ);
+ check_ui(&hdr, &p, ti, ti_n, UI32_MSB_EHF_MSK, UI32_MSB_EHF_SZ);
+
+ ASSERT(hdr == 0);
+ ASSERT(p == ep);
+}
+
+void
+check_free_entry(byte *sp, byte *ep,
+ byte tag,
+ Uint16 ct_no, int ct_no_n,
+ Uint16 t_no, int t_no_n,
+ Uint ptr, int ptr_n,
+ Uint32 ti,int ti_n)
+{
+ byte *p = sp;
+ Uint16 hdr;
+
+ ASSERT(*p == tag);
+ p++;
+
+ hdr = GET_UI16(p);
+
+ if (tag == ERTS_MT_CRR_FREE_BDY_TAG)
+ check_ui(&hdr, &p, ct_no, ct_no_n, UI16_MSB_EHF_MSK, UI16_MSB_EHF_SZ);
+ check_ui(&hdr, &p, t_no, t_no_n, UI16_MSB_EHF_MSK, UI16_MSB_EHF_SZ);
+ check_ui(&hdr, &p, ptr, ptr_n, UI_MSB_EHF_MSK, UI_MSB_EHF_SZ);
+ check_ui(&hdr, &p, ti, ti_n, UI32_MSB_EHF_MSK, UI32_MSB_EHF_SZ);
+
+ ASSERT(hdr == 0);
+ ASSERT(p == ep);
+
+}
+
+void
+check_time_inc_entry(byte *sp, byte *ep,
+ Uint32 secs, int secs_n,
+ Uint32 usecs, int usecs_n)
+{
+ byte *p = sp;
+ Uint16 hdr;
+
+ ASSERT(*p == ERTS_MT_TIME_INC_BDY_TAG);
+ p++;
+
+ hdr = GET_UI16(p);
+
+ check_ui(&hdr, &p, secs, secs_n, UI32_MSB_EHF_MSK, UI32_MSB_EHF_SZ);
+ check_ui(&hdr, &p, usecs, usecs_n, UI32_MSB_EHF_MSK, UI32_MSB_EHF_SZ);
+
+ ASSERT(hdr == 0);
+ ASSERT(p == ep);
+
+}
+
+#endif /* #ifdef DEBUG */
+
diff --git a/erts/emulator/beam/erl_mtrace.h b/erts/emulator/beam/erl_mtrace.h
new file mode 100644
index 0000000000..204543ddb0
--- /dev/null
+++ b/erts/emulator/beam/erl_mtrace.h
@@ -0,0 +1,51 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_MTRACE_H__
+#define ERL_MTRACE_H__
+
+#include "erl_alloc_types.h"
+
+#if (defined(ERTS___AFTER_MORECORE_HOOK_CAN_TRACK_MALLOC) \
+ || defined(ERTS_BRK_WRAPPERS_CAN_TRACK_MALLOC))
+#undef ERTS_CAN_TRACK_MALLOC
+#define ERTS_CAN_TRACK_MALLOC
+#endif
+
+#define ERTS_MTRACE_SEGMENT_ID ERTS_ALC_A_INVALID
+
+extern int erts_mtrace_enabled;
+
+void erts_mtrace_pre_init(void);
+void erts_mtrace_init(char *receiver, char *nodename);
+void erts_mtrace_install_wrapper_functions(void);
+void erts_mtrace_stop(void);
+void erts_mtrace_exit(Uint32 exit_value);
+
+void erts_mtrace_crr_alloc(void*, ErtsAlcType_t, ErtsAlcType_t, Uint);
+void erts_mtrace_crr_realloc(void*, ErtsAlcType_t, ErtsAlcType_t, void*, Uint);
+void erts_mtrace_crr_free(ErtsAlcType_t, ErtsAlcType_t, void*);
+
+
+void erts_mtrace_update_heap_size(void); /* Implemented in
+ * ../sys/common/erl_mtrace_sys_wrap.c
+ */
+
+#endif /* #ifndef ERL_MTRACE_H__ */
+
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
new file mode 100644
index 0000000000..fa4454a3f3
--- /dev/null
+++ b/erts/emulator/beam/erl_nif.c
@@ -0,0 +1,641 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/* Erlang Native InterFace
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_nif.h"
+
+#include "sys.h"
+#include "global.h"
+#include "erl_binary.h"
+#include "bif.h"
+#include "error.h"
+#include "big.h"
+#include "beam_bp.h"
+
+#include <limits.h>
+
+/*
+static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, unsigned need)
+{
+ return HAlloc(env->proc, need);
+}
+*/
+
+#define MIN_HEAP_FRAG_SZ 200
+static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need);
+
+static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, unsigned need)
+{
+ Eterm* hp = env->hp;
+ env->hp += need;
+ if (env->hp <= env->hp_end) {
+ return hp;
+ }
+ env->hp = hp;
+ return alloc_heap_heavy(env,need);
+}
+
+static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need)
+{
+ Eterm* hp;
+
+ if (env->heap_frag_sz == 0) {
+ ASSERT(HEAP_LIMIT(env->proc) == env->hp_end);
+ HEAP_TOP(env->proc) = env->hp;
+ env->heap_frag_sz = need + MIN_HEAP_FRAG_SZ;
+ }
+ else {
+ HRelease(env->proc, env->hp_end, env->hp);
+ env->heap_frag_sz *= 2;
+ }
+ hp = erts_heap_alloc(env->proc, env->heap_frag_sz);
+ env->hp = hp + need;
+ env->hp_end = hp + env->heap_frag_sz;
+ return hp;
+}
+
+void erts_pre_nif(ErlNifEnv* env, Process* p, void* nif_data)
+{
+ env->nif_data = nif_data;
+ env->proc = p;
+ env->hp = HEAP_TOP(p);
+ env->hp_end = HEAP_LIMIT(p);
+ env->heap_frag_sz = 0;
+ env->fpe_was_unmasked = erts_block_fpe();
+}
+
+void erts_post_nif(ErlNifEnv* env)
+{
+ erts_unblock_fpe(env->fpe_was_unmasked);
+ if (env->heap_frag_sz == 0) {
+ ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
+ ASSERT(env->hp >= HEAP_TOP(env->proc));
+ ASSERT(env->hp <= HEAP_LIMIT(env->proc));
+ HEAP_TOP(env->proc) = env->hp;
+ }
+ else {
+ ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
+ ASSERT(env->hp_end - env->hp <= env->heap_frag_sz);
+ HRelease(env->proc, env->hp_end, env->hp);
+ }
+}
+
+void* enif_get_data(ErlNifEnv* env)
+{
+ return env->nif_data;
+}
+
+void* enif_alloc(ErlNifEnv* env, size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_NIF, (Uint) size);
+}
+
+void enif_free(ErlNifEnv* env, void* ptr)
+{
+ erts_free(ERTS_ALC_T_NIF, ptr);
+}
+
+
+int enif_is_binary(ErlNifEnv* env, ERL_NIF_TERM term)
+{
+ return is_binary(term) && (binary_bitsize(term) % 8 == 0);
+}
+
+
+int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
+{
+ bin->tmp_alloc = NULL;
+ bin->data = erts_get_aligned_binary_bytes(bin_term, &bin->tmp_alloc);
+ if (bin->data == NULL) {
+ return 0;
+ }
+ bin->bin_term = bin_term;
+ bin->size = binary_size(bin_term);
+ bin->ref_bin = NULL;
+ return 1;
+}
+
+
+int enif_alloc_binary(ErlNifEnv* env, unsigned size, ErlNifBinary* bin)
+{
+ Binary* refbin;
+
+ refbin = erts_bin_drv_alloc_fnf(size); /* BUGBUG: alloc type? */
+ if (refbin == NULL) {
+ return 0; /* The NIF must take action */
+ }
+ refbin->flags = BIN_FLAG_DRV; /* BUGBUG: Flag? */
+ erts_refc_init(&refbin->refc, 1);
+ refbin->orig_size = (long) size;
+
+ bin->size = size;
+ bin->data = (unsigned char*) refbin->orig_bytes;
+ bin->bin_term = THE_NON_VALUE;
+ bin->tmp_alloc = NULL;
+ bin->ref_bin = refbin;
+ return 1;
+}
+
+void enif_release_binary(ErlNifEnv* env, ErlNifBinary* bin)
+{
+ if (bin->ref_bin == NULL) {
+ erts_free_aligned_binary_bytes(bin->tmp_alloc);
+ }
+ else {
+ Binary* refbin = bin->ref_bin;
+ ASSERT(bin->tmp_alloc == NULL);
+ ASSERT(bin->bin_term == THE_NON_VALUE);
+ if (erts_refc_dectest(&refbin->refc, 0) == 0) {
+ erts_bin_free(refbin);
+ }
+ }
+#ifdef DEBUG
+ bin->bin_term = THE_NON_VALUE;
+ bin->tmp_alloc = NULL;
+ bin->ref_bin = NULL;
+#endif
+}
+
+Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
+{
+ if (bin->ref_bin == NULL) {
+ erts_free_aligned_binary_bytes(bin->tmp_alloc);
+ return bin->bin_term;
+ }
+ else {
+ Binary* bptr = bin->ref_bin;
+ ProcBin* pb;
+ ASSERT(bin->tmp_alloc == NULL);
+
+ /* !! Copy-paste from new_binary() !! */
+ pb = (ProcBin *) alloc_heap(env, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = bptr->orig_size;
+ pb->next = MSO(env->proc).mso;
+ MSO(env->proc).mso = pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+
+ MSO(env->proc).overhead += pb->size / sizeof(Eterm);
+ return make_binary(pb);
+ }
+}
+
+ERL_NIF_TERM enif_make_badarg(ErlNifEnv* env)
+{
+ BIF_ERROR(env->proc, BADARG);
+}
+
+
+int enif_get_int(ErlNifEnv* env, Eterm term, int* ip)
+{
+#if SIZEOF_INT == SIZEOF_VOID_P
+ return term_to_Sint(term, ip);
+#elif SIZEOF_LONG == SIZEOF_VOID_P
+ Sint i;
+ if (!term_to_Sint(term, &i) || i < INT_MIN || i > INT_MAX) {
+ return 0;
+ }
+ *ip = (int) i;
+ return 1;
+#else
+# error Unknown word size
+#endif
+}
+
+int enif_get_ulong(ErlNifEnv* env, Eterm term, unsigned long* ip)
+{
+#if SIZEOF_LONG == SIZEOF_VOID_P
+ return term_to_Uint(term, ip);
+#else
+# error Unknown long word size
+#endif
+}
+
+int enif_get_list_cell(ErlNifEnv* env, Eterm term, Eterm* head, Eterm* tail)
+{
+ Eterm* val;
+ if (is_not_list(term)) return 0;
+ val = list_val(term);
+ *head = CAR(val);
+ *tail = CDR(val);
+ return 1;
+}
+
+ERL_NIF_TERM enif_make_int(ErlNifEnv* env, int i)
+{
+#if SIZEOF_INT == SIZEOF_VOID_P
+ return IS_SSMALL(i) ? make_small(i) : small_to_big(i,alloc_heap(env,2));
+#elif SIZEOF_LONG == SIZEOF_VOID_P
+ return make_small(i);
+#endif
+}
+
+ERL_NIF_TERM enif_make_ulong(ErlNifEnv* env, unsigned long i)
+{
+#if SIZEOF_LONG == SIZEOF_VOID_P
+ Eterm* hp;
+ Uint sz = 0;
+ erts_bld_uint(NULL, &sz, i);
+ hp = alloc_heap(env,sz);
+ return erts_bld_uint(&hp, NULL, i);
+#else
+# error Unknown long word size
+#endif
+
+}
+
+
+ERL_NIF_TERM enif_make_atom(ErlNifEnv* env, const char* name)
+{
+ return am_atom_put(name, sys_strlen(name));
+}
+
+
+ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...)
+{
+ Eterm* hp = alloc_heap(env,cnt+1);
+ Eterm ret = make_tuple(hp);
+ va_list ap;
+
+ *hp++ = make_arityval(cnt);
+ va_start(ap,cnt);
+ while (cnt--) {
+ *hp++ = va_arg(ap,Eterm);
+ }
+ va_end(ap);
+ return ret;
+}
+
+ERL_NIF_TERM enif_make_list_cell(ErlNifEnv* env, Eterm car, Eterm cdr)
+{
+ Eterm* hp = alloc_heap(env,2);
+ Eterm ret = make_list(hp);
+
+ CAR(hp) = car;
+ CDR(hp) = cdr;
+ return ret;
+}
+
+ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...)
+{
+ Eterm* hp = alloc_heap(env,cnt*2);
+ Eterm ret = make_list(hp);
+ Eterm* last = &ret;
+ va_list ap;
+
+ va_start(ap,cnt);
+ while (cnt--) {
+ *last = make_list(hp);
+ *hp = va_arg(ap,Eterm);
+ last = ++hp;
+ ++hp;
+ }
+ va_end(ap);
+ *last = NIL;
+ return ret;
+}
+
+ERL_NIF_TERM enif_make_string(ErlNifEnv* env, const char* string)
+{
+ Sint n = strlen(string);
+ Eterm* hp = alloc_heap(env,n*2);
+ return erts_bld_string_n(&hp,NULL,string,n);
+}
+
+
+
+
+/***************************************************************************
+ ** load_nif/2 **
+ ***************************************************************************/
+
+
+static Uint** get_func_pp(Eterm* mod_code, Eterm f_atom, unsigned arity)
+{
+ int n = (int) mod_code[MI_NUM_FUNCTIONS];
+ int j;
+ for (j = 0; j < n; ++j) {
+ Uint* code_ptr = (Uint*) mod_code[MI_FUNCTIONS+j];
+ ASSERT(code_ptr[0] == (Uint) BeamOp(op_i_func_info_IaaI));
+ if (f_atom == ((Eterm) code_ptr[3])
+ && arity == ((unsigned) code_ptr[4])) {
+
+ return (Uint**) &mod_code[MI_FUNCTIONS+j];
+ }
+ }
+ return NULL;
+}
+
+#define in_area(ptr,start,nbytes) \
+ ((unsigned long)((char*)(ptr) - (char*)(start)) < (nbytes))
+
+static void refresh_cached_nif_data(Eterm* mod_code,
+ struct erl_module_nif* mod_nif)
+{
+ int i;
+ for (i=0; i < mod_nif->entry->num_of_funcs; i++) {
+ Eterm f_atom;
+ ErlNifFunc* func = &mod_nif->entry->funcs[i];
+ Uint* code_ptr;
+
+ erts_atom_get(func->name, strlen(func->name), &f_atom);
+ code_ptr = *get_func_pp(mod_code, f_atom, func->arity);
+ code_ptr[5+2] = (Uint) mod_nif->data;
+ }
+}
+
+static Eterm mkatom(const char *str)
+{
+ return am_atom_put(str, sys_strlen(str));
+}
+
+static struct tainted_module_t
+{
+ struct tainted_module_t* next;
+ Eterm module_atom;
+}*first_tainted_module = NULL;
+
+static void add_taint(Eterm mod_atom)
+{
+ struct tainted_module_t* t;
+ for (t=first_tainted_module ; t!=NULL; t=t->next) {
+ if (t->module_atom == mod_atom) {
+ return;
+ }
+ }
+ t = erts_alloc_fnf(ERTS_ALC_T_TAINT, sizeof(*t));
+ if (t != NULL) {
+ t->module_atom = mod_atom;
+ t->next = first_tainted_module;
+ first_tainted_module = t;
+ }
+}
+
+Eterm erts_nif_taints(Process* p)
+{
+ struct tainted_module_t* t;
+ unsigned cnt = 0;
+ Eterm list = NIL;
+ Eterm* hp;
+ for (t=first_tainted_module ; t!=NULL; t=t->next) {
+ cnt++;
+ }
+ hp = HAlloc(p,cnt*2);
+ for (t=first_tainted_module ; t!=NULL; t=t->next) {
+ list = CONS(hp, t->module_atom, list);
+ hp += 2;
+ }
+ return list;
+}
+
+
+static Eterm load_nif_error(Process* p, const char* atom, const char* format, ...)
+{
+ erts_dsprintf_buf_t* dsbufp = erts_create_tmp_dsbuf(0);
+ Eterm ret;
+ Eterm* hp;
+ Eterm** hpp = NULL;
+ Uint sz = 0;
+ Uint* szp = &sz;
+ va_list arglist;
+
+ va_start(arglist, format);
+ erts_vdsprintf(dsbufp, format, arglist);
+ va_end(arglist);
+
+ for (;;) {
+ Eterm txt = erts_bld_string_n(hpp, &sz, dsbufp->str, dsbufp->str_len);
+ ret = erts_bld_tuple(hpp, szp, 3, am_error, mkatom(atom), txt);
+ if (hpp != NULL) {
+ break;
+ }
+ hp = HAlloc(p,sz);
+ hpp = &hp;
+ szp = NULL;
+ }
+ erts_destroy_tmp_dsbuf(dsbufp);
+ return ret;
+}
+
+BIF_RETTYPE load_nif_2(BIF_ALIST_2)
+{
+ static const char bad_lib[] = "bad_lib";
+ static const char reload[] = "reload";
+ static const char upgrade[] = "upgrade";
+ char lib_name[256]; /* BUGBUG: Max-length? */
+ void* handle = NULL;
+ void* init_func;
+ ErlNifEntry* entry = NULL;
+ ErlNifEnv env;
+ int len, i, err;
+ Module* mod;
+ Eterm mod_atom;
+ Eterm f_atom;
+ Eterm* caller;
+ ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
+ Eterm ret = am_ok;
+ int veto;
+
+ len = intlist_to_buf(BIF_ARG_1, lib_name, sizeof(lib_name)-1);
+ if (len < 1) {
+ /*erts_fprintf(stderr, "Invalid library path name '%T'\r\n", BIF_ARG_1);*/
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ lib_name[len] = '\0';
+
+ /* Block system (is this the right place to do it?) */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
+ /* Find calling module */
+ ASSERT(BIF_P->current != NULL);
+ ASSERT(BIF_P->current[0] == am_erlang
+ && BIF_P->current[1] == am_load_nif
+ && BIF_P->current[2] == 2);
+ caller = find_function_from_pc(BIF_P->cp);
+ ASSERT(caller != NULL);
+ mod_atom = caller[0];
+ ASSERT(is_atom(mod_atom));
+ mod=erts_get_module(mod_atom);
+ ASSERT(mod != NULL);
+
+ if (!in_area(caller, mod->code, mod->code_length)) {
+ ASSERT(in_area(caller, mod->old_code, mod->old_code_length));
+
+ ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old "
+ "module '%T' not allowed", mod_atom);
+ }
+ else if ((err=erts_sys_ddll_open2(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
+ ret = load_nif_error(BIF_P, "load_failed", "Failed to load NIF library"
+ " %s: '%s'", lib_name, errdesc.str);
+ }
+ else if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) != ERL_DE_NO_ERROR) {
+ ret = load_nif_error(BIF_P, bad_lib, "Failed to find library init"
+ " function: '%s'", errdesc.str);
+
+ }
+ else if ((add_taint(mod_atom),
+ (entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) {
+ ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful");
+ }
+ else if (entry->major != ERL_NIF_MAJOR_VERSION
+ || entry->minor > ERL_NIF_MINOR_VERSION) {
+
+ ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).",
+ entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
+ }
+ else if (!erts_is_atom_str((char*)entry->name, mod_atom)) {
+ ret = load_nif_error(BIF_P, bad_lib, "Library module name '%s' does not"
+ " match calling module '%T'", entry->name, mod_atom);
+ }
+ else {
+ /*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
+
+ for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
+ Uint** code_pp;
+ ErlNifFunc* f = &entry->funcs[i];
+ if (f->arity > 3) {
+ ret = load_nif_error(BIF_P,bad_lib,"Function arity too high for NIF %s/%u",
+ f->name, f->arity);
+ }
+ else if (!erts_atom_get(f->name, strlen(f->name), &f_atom)
+ || (code_pp = get_func_pp(mod->code, f_atom, f->arity))==NULL) {
+ ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
+ mod_atom, f->name, f->arity);
+ }
+ else if (code_pp[1] - code_pp[0] < (5+3)) {
+ ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
+ " in module (%T:%s/%u to small)",
+ mod_atom, entry->funcs[i].name, entry->funcs[i].arity);
+ }
+ /*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
+ mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/
+ }
+ }
+
+ if (ret != am_ok) {
+ goto error;
+ }
+
+ /* Call load, reload or upgrade:
+ */
+ if (mod->nif.handle != NULL) { /* Reload */
+ int k;
+ ASSERT(mod->nif.entry != NULL);
+ if (entry->reload == NULL) {
+ ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library.");
+ goto error;
+ }
+ /* Check that no NIF is removed */
+ for (k=0; k < mod->nif.entry->num_of_funcs; k++) {
+ ErlNifFunc* old_func = &mod->nif.entry->funcs[k];
+ for (i=0; i < entry->num_of_funcs; i++) {
+ if (old_func->arity == entry->funcs[i].arity
+ && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) {
+ break;
+ }
+ }
+ if (i == entry->num_of_funcs) {
+ ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
+ "function %T:%s/%u\r\n", mod_atom,
+ old_func->name, old_func->arity);
+ goto error;
+ }
+ }
+ erts_pre_nif(&env, BIF_P, mod->nif.data);
+ veto = entry->reload(&env, &env.nif_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful.");
+ }
+ else {
+ erts_sys_ddll_close(mod->nif.handle);
+ }
+ }
+ else {
+ if (mod->old_nif.handle != NULL) { /* Upgrade */
+ void* prev_old_data = mod->old_nif.data;
+ if (entry->upgrade == NULL) {
+ ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
+ goto error;
+ }
+ erts_pre_nif(&env, BIF_P, NULL);
+ veto = entry->upgrade(&env, &env.nif_data, &mod->old_nif.data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ mod->old_nif.data = prev_old_data;
+ ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful.");
+ }
+ else if (mod->old_nif.data != prev_old_data) {
+ refresh_cached_nif_data(mod->old_code, &mod->old_nif);
+ }
+ }
+ else if (entry->load != NULL) { /* Initial load */
+ erts_pre_nif(&env, BIF_P, NULL);
+ veto = entry->load(&env, &env.nif_data, BIF_ARG_2);
+ erts_post_nif(&env);
+ if (veto) {
+ ret = load_nif_error(BIF_P, "load", "Library load-call unsuccessful.");
+ }
+ }
+ }
+ if (ret == am_ok) {
+ /*
+ ** Everything ok, patch the beam code with op_call_nif
+ */
+ mod->nif.data = env.nif_data;
+ mod->nif.handle = handle;
+ mod->nif.entry = entry;
+ for (i=0; i < entry->num_of_funcs; i++)
+ {
+ Uint* code_ptr;
+ erts_atom_get(entry->funcs[i].name, strlen(entry->funcs[i].name), &f_atom);
+ code_ptr = *get_func_pp(mod->code, f_atom, entry->funcs[i].arity);
+
+ if (code_ptr[1] == 0) {
+ code_ptr[5+0] = (Uint) BeamOp(op_call_nif);
+ } else { /* Function traced, patch the original instruction word */
+ BpData* bp = (BpData*) code_ptr[1];
+ bp->orig_instr = (Uint) BeamOp(op_call_nif);
+ }
+ code_ptr[5+1] = (Uint) entry->funcs[i].fptr;
+ code_ptr[5+2] = (Uint) mod->nif.data;
+ }
+ }
+ else {
+ error:
+ ASSERT(ret != am_ok);
+ if (handle != NULL) {
+ erts_sys_ddll_close(handle);
+ }
+ erts_sys_ddll_free_error(&errdesc);
+ }
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(ret);
+}
+
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
new file mode 100644
index 0000000000..8650b7ce47
--- /dev/null
+++ b/erts/emulator/beam/erl_nif.h
@@ -0,0 +1,122 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Include file for writers of Native Implemented Functions.
+*/
+
+#define ERL_NIF_MAJOR_VERSION 0
+#define ERL_NIF_MINOR_VERSION 1
+
+#include <stdlib.h>
+
+typedef unsigned long ERL_NIF_TERM;
+
+typedef struct
+{
+ const char* name;
+ unsigned arity;
+ void* fptr; //ERL_NIF_TERM (*fptr)(void*, ...);
+}ErlNifFunc;
+
+struct enif_environment_t;
+typedef struct enif_environment_t ErlNifEnv;
+
+typedef struct enif_entry_t
+{
+ int major;
+ int minor;
+ const char* name;
+ int num_of_funcs;
+ ErlNifFunc* funcs;
+ int (*load) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
+ int (*reload) (ErlNifEnv*, void** priv_data, ERL_NIF_TERM load_info);
+ int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
+ void (*unload) (ErlNifEnv*, void* priv_data);
+}ErlNifEntry;
+
+
+
+typedef struct
+{
+ unsigned size;
+ unsigned char* data;
+
+ /* Internals (avert your eyes) */
+ ERL_NIF_TERM bin_term;
+ unsigned char* tmp_alloc;
+ void* ref_bin;
+
+}ErlNifBinary;
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+# define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) RET_TYPE (*NAME) ARGS
+typedef struct {
+# include "erl_nif_api_funcs.h"
+} TWinDynNifCallbacks;
+extern TWinDynNifCallbacks WinDynNifCallbacks;
+# undef ERL_NIF_API_FUNC_DECL
+#endif
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) && !defined(STATIC_ERLANG_DRIVER)
+# define ERL_NIF_API_FUNC_MACRO(NAME) (WinDynNifCallbacks.NAME)
+# include "erl_nif_api_funcs.h"
+/* note that we have to keep ERL_NIF_API_FUNC_MACRO defined */
+
+#else /* non windows or included from emulator itself */
+
+# define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) extern RET_TYPE NAME ARGS
+# include "erl_nif_api_funcs.h"
+# undef ERL_NIF_API_FUNC_DECL
+#endif
+
+
+
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+# define ERL_NIF_INIT_GLOB TWinDynNifCallbacks WinDynNifCallbacks;
+# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
+# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+#else
+# define ERL_NIF_INIT_GLOB
+# define ERL_NIF_INIT_BODY
+# if defined(VXWORKS)
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _init(void)
+# else
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
+# endif
+#endif
+
+
+#define ERL_NIF_INIT(NAME, FUNCS, LOAD, RELOAD, UPGRADE, UNLOAD) \
+ERL_NIF_INIT_GLOB \
+ERL_NIF_INIT_DECL(NAME) \
+{ \
+ static ErlNifEntry entry = \
+ { \
+ ERL_NIF_MAJOR_VERSION, \
+ ERL_NIF_MINOR_VERSION, \
+ #NAME, \
+ sizeof(FUNCS) / sizeof(*FUNCS), \
+ FUNCS, \
+ LOAD, RELOAD, UPGRADE, UNLOAD \
+ }; \
+ ERL_NIF_INIT_BODY; \
+ return &entry; \
+}
+
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
new file mode 100644
index 0000000000..400c1822cc
--- /dev/null
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -0,0 +1,68 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#if !defined(ERL_NIF_API_FUNC_DECL) && !defined(ERL_NIF_API_FUNC_MACRO)
+# error This file should not be included directly
+#endif
+
+#ifdef ERL_NIF_API_FUNC_DECL
+ERL_NIF_API_FUNC_DECL(void*,enif_get_data,(ErlNifEnv*));
+ERL_NIF_API_FUNC_DECL(void*,enif_alloc,(ErlNifEnv*, size_t size));
+ERL_NIF_API_FUNC_DECL(void,enif_free,(ErlNifEnv*, void* ptr));
+ERL_NIF_API_FUNC_DECL(int,enif_is_binary,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(int,enif_inspect_binary,(ErlNifEnv*, ERL_NIF_TERM bin_term, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_alloc_binary,(ErlNifEnv*, unsigned size, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(void,enif_release_binary,(ErlNifEnv*, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(int,enif_get_int,(ErlNifEnv*, ERL_NIF_TERM term, int* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_ulong,(ErlNifEnv*, ERL_NIF_TERM term, unsigned long* ip));
+ERL_NIF_API_FUNC_DECL(int,enif_get_list_cell,(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM* head, ERL_NIF_TERM* tail));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_binary,(ErlNifEnv* env, ErlNifBinary* bin));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_badarg,(ErlNifEnv* env));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_int,(ErlNifEnv* env, int i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_ulong,(ErlNifEnv* env, unsigned long i));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_atom,(ErlNifEnv* env, const char* name));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_tuple,(ErlNifEnv* env, unsigned cnt, ...));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list,(ErlNifEnv* env, unsigned cnt, ...));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_list_cell,(ErlNifEnv* env, ERL_NIF_TERM car, ERL_NIF_TERM cdr));
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_string,(ErlNifEnv* env, const char* string));
+#endif
+
+#ifdef ERL_NIF_API_FUNC_MACRO
+# define enif_get_data ERL_NIF_API_FUNC_MACRO(enif_get_data)
+# define enif_alloc ERL_NIF_API_FUNC_MACRO(enif_alloc)
+# define enif_free ERL_NIF_API_FUNC_MACRO(enif_free)
+# define enif_is_binary ERL_NIF_API_FUNC_MACRO(enif_is_binary)
+# define enif_inspect_binary ERL_NIF_API_FUNC_MACRO(enif_inspect_binary)
+# define enif_alloc_binary ERL_NIF_API_FUNC_MACRO(enif_alloc_binary)
+# define enif_release_binary ERL_NIF_API_FUNC_MACRO(enif_release_binary)
+# define enif_get_int ERL_NIF_API_FUNC_MACRO(enif_get_int)
+# define enif_get_ulong ERL_NIF_API_FUNC_MACRO(enif_get_ulong)
+# define enif_get_list_cell ERL_NIF_API_FUNC_MACRO(enif_get_list_cell)
+
+# define enif_make_binary ERL_NIF_API_FUNC_MACRO(enif_make_binary)
+# define enif_make_badarg ERL_NIF_API_FUNC_MACRO(enif_make_badarg)
+# define enif_make_int ERL_NIF_API_FUNC_MACRO(enif_make_int)
+# define enif_make_ulong ERL_NIF_API_FUNC_MACRO(enif_make_ulong)
+# define enif_make_atom ERL_NIF_API_FUNC_MACRO(enif_make_atom)
+# define enif_make_tuple ERL_NIF_API_FUNC_MACRO(enif_make_tuple)
+# define enif_make_list ERL_NIF_API_FUNC_MACRO(enif_make_list)
+# define enif_make_list_cell ERL_NIF_API_FUNC_MACRO(enif_make_list_cell)
+# define enif_make_string ERL_NIF_API_FUNC_MACRO(enif_make_string)
+#endif
+
diff --git a/erts/emulator/beam/erl_nmgc.c b/erts/emulator/beam/erl_nmgc.c
new file mode 100644
index 0000000000..626d4e295a
--- /dev/null
+++ b/erts/emulator/beam/erl_nmgc.c
@@ -0,0 +1,1402 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "global.h"
+#include "erl_gc.h"
+#include "erl_binary.h"
+#include "erl_nmgc.h"
+#include "erl_debug.h"
+#if HIPE
+#include "hipe_bif0.h" /* for hipe_constants_{start,next} */
+#include "hipe_stack.h"
+#endif
+
+
+#ifdef INCREMENTAL
+/***************************************************************************
+ * *
+ * Incremental Garbage Collector for the Message Area *
+ * *
+ ***************************************************************************/
+
+/*
+ * The heap pointers are declared in erl_init.c
+ * global_heap is the nursery
+ * global_old_heap is the old generation
+ */
+unsigned char *blackmap = NULL;
+INC_Page *inc_used_mem = NULL;
+INC_MemBlock *inc_free_list = NULL;
+Eterm *inc_fromspc;
+Eterm *inc_fromend;
+Eterm *inc_nursery_scn_ptr;
+Eterm **fwdptrs;
+Eterm *inc_alloc_limit;
+Process *inc_active_proc;
+Process *inc_active_last;
+int inc_words_to_go;
+
+static Eterm *inc_last_nursery;
+static int inc_pages = INC_NoPAGES;
+static INC_Page *inc_bibop = NULL;
+static int inc_used_pages;
+
+/* Used when growing the old generation */
+/*
+#define INC_ROOTSAVE 16384
+static Eterm *root_save[INC_ROOTSAVE];
+static int roots_saved = 0;
+*/
+
+INC_STORAGE_DECLARATION(,gray);
+
+static void inc_minor_gc(Process *p, int need, Eterm* objv, int nobj);
+static void inc_major_gc(Process *p, int need, Eterm* objv, int nobj);
+
+#ifdef INC_TIME_BASED
+#if USE_PERFCTR
+
+/*
+ * This uses the Linux perfctr extension to virtualise the
+ * time-stamp counter.
+ */
+#include "libperfctr.h"
+static struct vperfctr *vperfctr;
+static double cpu_khz;
+static double tsc_to_cpu_mult;
+
+static void inc_start_hrvtime(void)
+{
+ struct perfctr_info info;
+ struct vperfctr_control control;
+
+ if( vperfctr != NULL )
+ return;
+ vperfctr = vperfctr_open();
+ if( vperfctr == NULL )
+ return;
+ if( vperfctr_info(vperfctr, &info) >= 0 ) {
+ cpu_khz = (double)info.cpu_khz;
+ tsc_to_cpu_mult = (double)(info.tsc_to_cpu_mult ? : 1);
+ if( info.cpu_features & PERFCTR_FEATURE_RDTSC ) {
+ memset(&control, 0, sizeof control);
+ control.cpu_control.tsc_on = 1;
+ if( vperfctr_control(vperfctr, &control) >= 0 )
+ return;
+ }
+ }
+ vperfctr_close(vperfctr);
+ vperfctr = NULL;
+}
+
+#define inc_get_hrvtime() (((double)vperfctr_read_tsc(vperfctr) * tsc_to_cpu_mult) / cpu_khz)
+
+#endif /* USE_PERFCTR */
+#endif /* INC_TIME_BASED */
+
+#ifdef INC_TIME_BASED
+# define timeslice 1 /* milli seconds */
+# define WORK_MORE (inc_get_hrvtime() < start_time + timeslice)
+#else
+//# define inc_min_work 100 /* words */
+# define inc_min_work global_heap_sz + inc_pages * INC_FULLPAGE /* words */
+# define WORK_MORE (inc_words_to_go > 0)
+#endif
+
+void erts_init_incgc(void)
+{
+ int i;
+ int size = inc_pages * INC_FULLPAGE;
+
+ /* Young generation */
+ global_heap = (Eterm *)erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
+ sizeof(Eterm) * global_heap_sz);
+ global_hend = global_heap + global_heap_sz;
+ global_htop = global_heap;
+ inc_alloc_limit = global_hend;
+
+ /* Fromspace */
+ inc_last_nursery = (Eterm *) erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
+ global_heap_sz * sizeof(Eterm));
+ inc_fromspc = inc_fromend = NULL;
+
+ /* Forward-pointers */
+ fwdptrs = erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
+ global_heap_sz * sizeof(Eterm*));
+ /* Old generation */
+ global_old_heap = (Eterm *)erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
+ size * sizeof(Eterm));
+ global_old_hend = global_old_heap + size;
+
+ /* Pages i BiBOP */
+ for (i = 0; i < inc_pages; i++)
+ {
+ INC_Page *this = (INC_Page*)(global_old_heap + i * INC_FULLPAGE);
+ this->next = (INC_Page*)((Eterm*)this + INC_FULLPAGE);
+ }
+
+ inc_bibop = (INC_Page*)global_old_heap;
+ ((INC_Page*)(global_old_heap + (inc_pages - 1) * INC_FULLPAGE))->next =
+ NULL;
+
+ inc_used_mem = inc_bibop;
+ inc_bibop = inc_bibop->next;
+ inc_used_mem->next = NULL;
+ inc_used_pages = 1;
+
+ /* Free-list */
+ inc_free_list = (INC_MemBlock*)inc_used_mem->start;
+ inc_free_list->size = INC_PAGESIZE;
+ inc_free_list->prev = NULL;
+ inc_free_list->next = NULL;
+
+ /* Blackmap */
+ blackmap = (unsigned char*)erts_alloc(ERTS_ALC_T_MESSAGE_AREA,
+ INC_FULLPAGE * inc_pages);
+ /* Gray stack */
+ INC_STORAGE_INIT(gray);
+
+ inc_active_proc = NULL;
+ inc_active_last = NULL;
+
+#ifdef INC_TIME_BASED
+ inc_start_hrvtime();
+#endif
+}
+
+void erts_cleanup_incgc(void)
+{
+ INC_STORAGE_ERASE(gray);
+
+ if (inc_fromspc)
+ inc_last_nursery = inc_fromspc;
+
+ erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_heap);
+ erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)inc_last_nursery);
+ erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_old_heap);
+ erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)blackmap);
+ erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)fwdptrs);
+}
+
+void erts_incremental_gc(Process* p, int need, Eterm* objv, int nobj)
+{
+ int repeat_minor;
+#ifdef INC_TIME_BASED
+ double start_time = inc_get_hrvtime();
+ int work_left_before = inc_words_to_go;
+#endif
+ /* Used when growing the fromspace */
+ static char inc_growing_nurs = 0;
+
+ BM_STOP_TIMER(system);
+ //BM_MMU_READ();
+ BM_RESET_TIMER(gc);
+ BM_START_TIMER(gc);
+
+ VERBOSE(DEBUG_HYBRID_GC,
+ ("INCGC: Incremental GC START Caused by: %T Need: %d\n",
+ p->id,need));
+
+ ma_gc_flags |= GC_GLOBAL;
+ ma_gc_flags &= ~GC_CYCLE_START;
+
+#ifndef INC_TIME_BASED
+ /* Decide how much work to do this GC stage. The work is meassured
+ * in number of words copied from the young generation to the old
+ * plus number of work marked in the old generation.
+ */
+ if (ma_gc_flags & GC_MAJOR) {
+ int wm = (need > inc_min_work) ? need : inc_min_work;
+ inc_words_to_go = (int)((wm * (((inc_used_pages * INC_PAGESIZE) /
+ (double)global_heap_sz) + 1)) + 0.5);
+ }
+ else
+ inc_words_to_go = (need > inc_min_work) ? need : inc_min_work;
+#endif
+
+ do {
+ if (ma_gc_flags & GC_MAJOR) {
+ /* This is a major collection cycle. */
+ inc_major_gc(p,need,objv,nobj);
+ } else if (ma_gc_flags & GC_CYCLE) {
+ /* This is a minor collection cycle. */
+ inc_minor_gc(p,need,objv,nobj);
+ } else {
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Collection cycle START\n"));
+ ma_gc_flags |= (GC_CYCLE | GC_CYCLE_START);
+ inc_fromspc = global_heap;
+ inc_fromend = global_htop;
+ global_heap = global_htop = inc_last_nursery;
+ global_hend = global_heap + global_heap_sz;
+ inc_nursery_scn_ptr = global_heap;
+#ifdef INC_TIME_BASED
+ work_left_before = inc_words_to_go = global_heap_sz;
+#endif
+#ifdef DEBUG
+ inc_last_nursery = NULL;
+#endif
+ memset(fwdptrs,0,global_heap_sz * sizeof(Eterm));
+
+ {
+ /* TODO: Alla processer ska v�l egentligen inte aktiveras h�r... */
+ int i;
+ for (i = 0; i < erts_num_active_procs; i++) {
+ Process *cp = erts_active_procs[i];
+ INC_ACTIVATE(cp);
+ cp->scan_top = cp->high_water;
+ }
+ }
+
+ if (ma_gc_flags & GC_NEED_MAJOR) {
+ /* The previous collection cycle caused the old generation to
+ * overflow. This collection cycle will therefore be a major
+ * one.
+ */
+ BM_COUNT(major_gc_cycles);
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: MAJOR cycle\n"));
+ inc_major_gc(p,need,objv,nobj);
+ } else {
+ BM_COUNT(minor_gc_cycles);
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: MINOR cycle\n"));
+ inc_minor_gc(p,need,objv,nobj);
+ }
+ }
+
+ repeat_minor = 0;
+ if (!(ma_gc_flags & GC_CYCLE)) {
+ inc_alloc_limit = global_hend;
+ inc_last_nursery = inc_fromspc;
+ inc_fromspc = inc_fromend = NULL;
+ ASSERT(INC_STORAGE_EMPTY(gray));
+
+ if (inc_growing_nurs) {
+ /*
+ * The previous collection cycle caused the nursery to
+ * grow, now we have to grow the from-space as well.
+ */
+ inc_last_nursery =
+ (Eterm*) erts_realloc(ERTS_ALC_T_MESSAGE_AREA,
+ (void*)inc_last_nursery,
+ sizeof(Eterm) * global_heap_sz);
+ inc_growing_nurs = 0;
+ }
+
+ if (global_hend - global_htop <= need) {
+ /*
+ * Initiate a new GC cycle immediately and, if necessary,
+ * enlarge the nursery.
+ */
+ if (global_heap_sz <= need) {
+ VERBOSE(DEBUG_HYBRID_GC,
+ ("INCGC: Allocating a larger nursery\n"));
+ global_heap_sz = erts_next_heap_size(need * 1.5,0);
+ inc_last_nursery =
+ (Eterm*) erts_realloc(ERTS_ALC_T_MESSAGE_AREA,
+ (void*)inc_last_nursery,
+ sizeof(Eterm) * global_heap_sz);
+ fwdptrs = erts_realloc(ERTS_ALC_T_MESSAGE_AREA,fwdptrs,
+ global_heap_sz * sizeof(Eterm*));
+ inc_growing_nurs = 1;
+ }
+ repeat_minor = 1;
+ }
+
+#ifdef DEBUG
+ /* Fill the from-space with bad things */
+ memset(inc_last_nursery,DEBUG_BAD_BYTE,
+ global_heap_sz * sizeof(Eterm));
+#endif
+ }
+ } while (repeat_minor);
+
+
+ /* Clean up after garbage collection ********************************/
+
+ if (inc_alloc_limit != global_hend) {
+
+#ifdef INC_TIME_BASED
+ if ((work_left_before - inc_words_to_go) == 0) {
+ inc_alloc_limit = global_htop + need;
+ } else {
+ inc_alloc_limit = (global_hend - global_htop) /
+ (inc_words_to_go / (work_left_before - inc_words_to_go)) +
+ global_htop;
+ if (inc_alloc_limit > global_hend)
+ inc_alloc_limit = global_hend;
+ }
+#else
+ inc_alloc_limit = (Eterm*)(global_htop + (need > inc_min_work) ?
+ need : inc_min_work);
+ if (inc_alloc_limit > global_hend)
+ inc_alloc_limit = global_hend;
+#endif
+ }
+
+ ma_gc_flags &= ~GC_GLOBAL;
+
+ /* INC_TIME_BASED: If this fails we have to increase the timeslice! */
+ ASSERT(inc_alloc_limit - global_htop > need);
+
+ BM_STOP_TIMER(gc);
+#ifdef BM_TIMERS
+ minor_global_gc_time += gc_time;
+ if (gc_time > max_global_minor_time)
+ max_global_minor_time = gc_time;
+
+ pause_times[(((gc_time * 1000) < MAX_PAUSE_TIME) ?
+ (int)(gc_time * 1000) :
+ MAX_PAUSE_TIME - 1)]++;
+#endif
+ //BM_MMU_INIT();
+ { static long long verif = 0;
+ //erts_printf("innan verify: %d\n",++verif);
+ if (verif==168) print_memory(NULL);
+ verify_everything();
+ //erts_printf("efter verify: %d\n",verif);
+ }
+ BM_START_TIMER(system);
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Incremental GC END\n"));
+}
+
+
+/***************************************************************************
+ * *
+ * Minor collection - Copy live data from young generation to old *
+ * *
+ ***************************************************************************/
+
+#define MINOR_SCAN(PTR,END) do { \
+ ASSERT(PTR <= END); \
+ while (WORK_MORE && PTR < END) { \
+ Eterm val = *PTR; \
+ Eterm *obj_ptr = ptr_val(val); \
+ switch (primary_tag(val)) { \
+ case TAG_PRIMARY_LIST: \
+ if (ptr_within(obj_ptr,inc_fromspc,inc_fromend)) { \
+ if (INC_IS_FORWARDED(obj_ptr)) { \
+ *PTR = make_list(INC_FORWARD_VALUE(obj_ptr)); \
+ } \
+ else { \
+ Eterm *hp = erts_inc_alloc(2); \
+ INC_STORE(gray,hp,2); \
+ INC_COPY_CONS(obj_ptr,hp,PTR); \
+ } \
+ } \
+ break; \
+ case TAG_PRIMARY_BOXED: \
+ if (ptr_within(obj_ptr,inc_fromspc,inc_fromend)) { \
+ if (INC_IS_FORWARDED(obj_ptr)) { \
+ *PTR = make_boxed(INC_FORWARD_VALUE(obj_ptr)); \
+ } \
+ else { \
+ Eterm *hp = erts_inc_alloc(BOXED_NEED(obj_ptr,*obj_ptr)); \
+ INC_STORE(gray,hp,BOXED_NEED(obj_ptr,*obj_ptr)); \
+ INC_COPY_BOXED(obj_ptr,hp,PTR); \
+ } \
+ } \
+ break; \
+ case TAG_PRIMARY_HEADER: \
+ switch (val & _TAG_HEADER_MASK) { \
+ case ARITYVAL_SUBTAG: break; \
+ default: PTR += thing_arityval(val); break; \
+ } \
+ break; \
+ } \
+ PTR++; \
+ } \
+} while(0)
+
+
+/* Returns: TRUE (1) if the need is greater than the available space
+ * and the garbage collector needs to be restarted immediately. FALSE
+ * (0) otherwise.
+ */
+static void inc_minor_gc(Process* p, int need, Eterm* objv, int nobj)
+{
+ BM_COUNT(minor_gc_stages);
+
+ /* Start with looking at gray objects found in earlier collection
+ * stages.
+ */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Rescue gray found from nursery\n"));
+ {
+ INC_Object *obj = NULL;
+ Eterm *ptr;
+
+ while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
+ obj = INC_STORAGE_GET(gray);
+ if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
+ ptr = obj->this + thing_arityval(*obj->this) + 1;
+ } else {
+ ptr = obj->this;
+ }
+ MINOR_SCAN(ptr,obj->this + obj->size);
+ }
+ /* TODO: Se f�reg�ende uppdatering av gr� objekt */
+ if (!WORK_MORE && obj != NULL)
+ INC_STORE(gray,obj->this,obj->size);
+ }
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan root-set\n"));
+ while (WORK_MORE && inc_active_proc) {
+ Rootset rootset;
+ Process *cp = inc_active_proc;
+
+ ASSERT(INC_IS_ACTIVE(cp));
+
+ /* TODO: Hur dyrt �r det att bygga nytt rootset varje g�ng? */
+
+ /* TODO: Fundera p� ordningen! Rootset, Heap, Old heap... */
+
+ /* TODO: Scanna stacken fr�n p->send till p->stop! [Brooks84] */
+ /* Notera: Vi GC:ar inte de yngsta objekten - de som allokeras
+ under GC-cykeln. Detta ger ynglingarna en chans att d� innan
+ GC:n b�rjar kopiera dem. [StefanovicMcKinleyMoss@OOPSLA99] */
+
+ /* TODO: N�r rootset �r scannat borde processen inte vara
+ aktiv mer. Den b�r aktiveras i schedule, endast om en
+ process har k�rt beh�ver vi scanna rootset igen. */
+
+ /* MT: In a multithreaded system the process cp needs to be
+ * locked here.
+ */
+
+ if (cp == p)
+ rootset.n = setup_rootset(cp, objv, nobj, &rootset);
+ else
+ rootset.n = setup_rootset(cp, cp->arg_reg, cp->arity, &rootset);
+
+ //MA_GENSWEEP_NSTACK(cp, old_htop, n_htop, objv, nobj);
+
+ while (WORK_MORE && rootset.n--) {
+ Eterm *g_ptr = rootset.v[rootset.n];
+ Uint g_sz = rootset.sz[rootset.n];
+
+ while (WORK_MORE && g_sz--) {
+ Eterm gval = *g_ptr;
+ switch (primary_tag(gval)) {
+ case TAG_PRIMARY_LIST: {
+ Eterm *ptr = list_val(gval);
+ if (ptr_within(ptr,inc_fromspc,inc_fromend)) {
+ if (INC_IS_FORWARDED(ptr)) {
+ *g_ptr++ = make_list(INC_FORWARD_VALUE(ptr));
+ }
+ else {
+ Eterm *hp = erts_inc_alloc(2);
+ INC_STORE(gray,hp,2);
+ INC_COPY_CONS(ptr,hp,g_ptr++);
+ }
+ }
+ else
+ ++g_ptr;
+ continue;
+ }
+
+ case TAG_PRIMARY_BOXED: {
+ Eterm *ptr = boxed_val(gval);
+ if (ptr_within(ptr,inc_fromspc,inc_fromend)) {
+ if (INC_IS_FORWARDED(ptr)) {
+ *g_ptr++ = make_boxed(INC_FORWARD_VALUE(ptr));
+ }
+ else {
+ Eterm *hp = erts_inc_alloc(BOXED_NEED(ptr,*ptr));
+ INC_STORE(gray,hp,BOXED_NEED(ptr,*ptr));
+ INC_COPY_BOXED(ptr,hp,g_ptr++);
+ }
+ }
+ else
+ ++g_ptr;
+ continue;
+ }
+
+ default:
+ g_ptr++;
+ continue;
+ }
+ }
+ }
+
+ restore_one_rootset(cp, &rootset);
+
+ /* MT: cp can be unlocked now. */
+
+ /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan private nursery\n")); */
+ if (cp->scan_top != HEAP_TOP(cp)) {
+ Eterm *ptr = cp->scan_top;
+ MINOR_SCAN(ptr,HEAP_TOP(cp));
+ /* TODO: F�r att spara scan_top h�r m�ste alla ma-pekare
+ * som hittas l�ggas till i cp->rrma.
+ */
+ //cp->scan_top = ptr;
+ }
+
+ /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan heap fragments\n")); */
+ {
+ ErlHeapFragment* bp = MBUF(cp);
+
+ while (WORK_MORE && bp) {
+ Eterm *ptr = bp->mem;
+ if ((ARITH_HEAP(cp) >= bp->mem) &&
+ (ARITH_HEAP(cp) < bp->mem + bp->size)) {
+ MINOR_SCAN(ptr,ARITH_HEAP(cp));
+ } else {
+ MINOR_SCAN(ptr,bp->mem + bp->size);
+ }
+ bp = bp->next;
+ }
+ }
+
+ /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan gray\n")); */
+ {
+ INC_Object *obj = NULL;
+ Eterm *ptr;
+ while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
+ obj = INC_STORAGE_GET(gray);
+ if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
+ ptr = obj->this + thing_arityval(*obj->this) + 1;
+ } else {
+ ptr = obj->this;
+ }
+ MINOR_SCAN(ptr,obj->this + obj->size);
+ }
+ /* TODO: INC_STORE(gray,ptr,obj->size-(ptr-obj->this)); Typ.. */
+ if (!WORK_MORE && obj != NULL)
+ INC_STORE(gray,obj->this,obj->size);
+ }
+
+ if (WORK_MORE) {
+ //printf("Rootset after:\r\n");
+ //print_one_rootset(&rootset);
+ INC_DEACTIVATE(cp);
+ }
+ }
+
+ /* Update new pointers in the nursery to new copies in old generation. */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update nursery\n"));
+ {
+ Eterm *ptr = inc_nursery_scn_ptr;
+ MINOR_SCAN(ptr,global_htop);
+ inc_nursery_scn_ptr = ptr;
+ }
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Rescue gray found from nursery\n"));
+ {
+ INC_Object *obj = NULL;
+ Eterm *ptr;
+
+ while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
+ obj = INC_STORAGE_GET(gray);
+ if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
+ ptr = obj->this + thing_arityval(*obj->this) + 1;
+ } else {
+ ptr = obj->this;
+ }
+ MINOR_SCAN(ptr,obj->this + obj->size);
+ }
+ /* TODO: Se f�reg�ende uppdatering av gr� objekt */
+ if (!WORK_MORE && obj != NULL)
+ INC_STORE(gray,obj->this,obj->size);
+ }
+
+ /* Atomic phase */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update copy stack\n"));
+ {
+ Uint i;
+ for (i = 0; i < ma_dst_top; i++) {
+ if (ptr_within(ma_dst_stack[i],inc_fromspc,inc_fromend)) {
+ if (INC_IS_FORWARDED(ma_dst_stack[i]))
+ ma_dst_stack[i] = INC_FORWARD_VALUE(ma_dst_stack[i]);
+ }
+ }
+ }
+
+ if (WORK_MORE) {
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update offheap-lists\n"));
+ {
+ ExternalThing **prev = &erts_global_offheap.externals;
+ ExternalThing *ptr = erts_global_offheap.externals;
+
+ /* Atomic phase */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep proc externals\n"));
+ while (ptr) {
+ Eterm *ppt = (Eterm*) ptr;
+
+ if (ptr_within(ppt,global_old_heap,global_old_hend)) {
+ prev = &ptr->next;
+ ptr = ptr->next;
+ } else if (ptr_within(ppt, inc_fromspc, inc_fromend) &&
+ INC_IS_FORWARDED(ppt)) {
+ ExternalThing *ro = (ExternalThing*)INC_FORWARD_VALUE(ppt);
+ *prev = ro; /* Patch to moved pos */
+ prev = &ro->next;
+ ptr = ro->next;
+ } else {
+ erts_deref_node_entry(ptr->node);
+ *prev = ptr = ptr->next;
+ }
+ }
+ ASSERT(*prev == NULL);
+ }
+
+ {
+ ProcBin **prev = &erts_global_offheap.mso;
+ ProcBin *ptr = erts_global_offheap.mso;
+
+ /* Atomic phase */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep proc bins\n"));
+ while (ptr) {
+ Eterm *ppt = (Eterm*)ptr;
+
+ if (ptr_within(ppt,global_old_heap,global_old_hend)) {
+ prev = &ptr->next;
+ ptr = ptr->next;
+ } else if (ptr_within(ppt, inc_fromspc, inc_fromend) &&
+ INC_IS_FORWARDED(ppt)) {
+ ProcBin *ro = (ProcBin*)INC_FORWARD_VALUE(ppt);
+ *prev = ro; /* Patch to moved pos */
+ prev = &ro->next;
+ ptr = ro->next;
+ } else {
+ Binary *bptr;
+ *prev = ptr->next;
+ bptr = ptr->val;
+ if (erts_refc_dectest(&bptr->refc, 0) == 0)
+ erts_bin_free(bptr);
+ ptr = *prev;
+ }
+ }
+ ASSERT(*prev == NULL);
+ }
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Minor collection cycle END\n"));
+ ma_gc_flags &= ~GC_CYCLE;
+ }
+}
+
+
+
+
+/***************************************************************************
+ * *
+ * Major collection - CopyMark - Copy young to old, Mark-Sweep old *
+ * *
+ ***************************************************************************/
+
+#define COPYMARK(PTR,END) do { \
+ ASSERT(PTR <= END); \
+ while (WORK_MORE && PTR < END) { \
+ Eterm val = *PTR; \
+ Eterm *obj_ptr = ptr_val(val); \
+ switch (primary_tag(val)) { \
+ case TAG_PRIMARY_LIST: \
+ COPYMARK_CONS(obj_ptr,aging_htop,PTR,aging_end); break; \
+ case TAG_PRIMARY_BOXED: \
+ COPYMARK_BOXED(obj_ptr,aging_htop,PTR,aging_end); break; \
+ case TAG_PRIMARY_HEADER: \
+ switch (val & _TAG_HEADER_MASK) { \
+ case ARITYVAL_SUBTAG: break; \
+ default: \
+ PTR += thing_arityval(val); \
+ break; \
+ } \
+ break; \
+ default: break; \
+ } \
+ PTR++; \
+ } \
+} while(0);
+/* TODO:
+ if (aging_htop + 10 > aging + INC_FULLPAGE) {
+ aging->next = inc_used_mem;
+ inc_used_mem = aging;
+ }
+*/
+
+static void inc_major_gc(Process *p, int need, Eterm* objv, int nobj)
+{
+ Eterm *free_start = NULL;
+ Uint live = 0;
+ Uint old_gen_sz = 0;
+ static INC_Page *aging;
+ static Eterm *aging_htop;
+ static Eterm *aging_end;
+ BM_NEW_TIMER(old_gc);
+
+ BM_SWAP_TIMER(gc,old_gc);
+ BM_COUNT(major_gc_stages);
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Major collection START\n"));
+
+ ma_gc_flags |= GC_INCLUDE_ALL;
+
+ if (ma_gc_flags & GC_NEED_MAJOR)
+ {
+ INC_Page *page = inc_used_mem;
+
+ ma_gc_flags |= GC_MAJOR;
+ ma_gc_flags &= ~GC_NEED_MAJOR;
+
+ while (page)
+ {
+ memset(blackmap +
+ ((void*)page - (void*)global_old_heap) / sizeof(void*),
+ 0, INC_FULLPAGE);
+ page = page->next;
+ }
+
+ if (inc_bibop) {
+ aging = inc_bibop;
+ inc_bibop = inc_bibop->next;
+ aging->next = NULL;
+ memset(blackmap +
+ ((void*)aging - (void*)global_old_heap) / sizeof(void*),
+ 1, INC_FULLPAGE);
+ aging_htop = aging->start;
+ aging_end = aging->start + INC_PAGESIZE;
+ }
+ else {
+ /* There are no free pages.. Either fragmentation is a
+ * problem or we are simply out of memory. Allocation in
+ * the old generation will be done through the free-list
+ * this GC cycle.
+ */
+ aging = NULL;
+ aging_htop = aging_end = NULL;
+ }
+ }
+
+ /* Start with looking at gray objects found in earlier collection
+ * stages.
+ */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray\n"));
+ {
+ INC_Object *obj = NULL;
+
+ while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
+ Eterm *ptr;
+
+ obj = INC_STORAGE_GET(gray);
+ if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
+ ptr = obj->this + thing_arityval(*obj->this) + 1;
+ } else {
+ ptr = obj->this;
+ }
+ COPYMARK(ptr,obj->this + obj->size);
+ }
+ /* TODO: Titta p� motsvarande i minor. */
+ if (!WORK_MORE && obj != NULL)
+ INC_STORE(gray,obj->this,obj->size);
+ }
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark roots\n"));
+ while (WORK_MORE && inc_active_proc)
+ {
+ /* For each process: Scan all areas containing pointers to the
+ * message area. When a process is done here, all it's
+ * message-pointers should be to the old generation.
+ */
+ Rootset rootset;
+ Process *cp = inc_active_proc;
+
+ ASSERT(INC_IS_ACTIVE(cp));
+
+ /* MT: In a multithreaded system the process cp needs to be
+ * locked here.
+ */
+ if (cp == p)
+ rootset.n = setup_rootset(cp, objv, nobj, &rootset);
+ else
+ rootset.n = setup_rootset(cp, cp->arg_reg, cp->arity, &rootset);
+
+ while (WORK_MORE && rootset.n--)
+ {
+ Eterm *ptr = rootset.v[rootset.n];
+ Eterm *end = ptr + rootset.sz[rootset.n];
+
+ while (WORK_MORE && ptr < end) {
+ Eterm val = *ptr;
+ Eterm *obj_ptr = ptr_val(val);
+
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_LIST:
+ {
+ COPYMARK_CONS(obj_ptr,aging_htop,ptr,aging_end);
+ break;
+ }
+
+ case TAG_PRIMARY_BOXED:
+ {
+ COPYMARK_BOXED(obj_ptr,aging_htop,ptr,aging_end);
+ break;
+ }
+ }
+ ptr++;
+ }
+ }
+
+#ifdef HIPE
+ /* Atomic phase */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Native stack scan: %T\n",cp->id));
+ aging_htop = ma_fullsweep_nstack(cp,aging_htop,aging_end);
+#endif
+ restore_one_rootset(cp, &rootset);
+
+ /* MT: cp can be unlocked now. But beware!! The message queue
+ * might be updated with new pointers to the fromspace while
+ * we work below. The send operation can not assume that all
+ * active processes will look through their message queue
+ * before deactivating as is the case in non-MT incremental
+ * collection.
+ */
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark process heap\n"));
+ {
+ Eterm *ptr = cp->scan_top;
+ COPYMARK(ptr,cp->htop);
+ //cp->scan_top = ptr;
+ }
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark heap fragments\n"));
+ {
+ ErlHeapFragment* bp = MBUF(cp);
+
+ while (WORK_MORE && bp) {
+ Eterm *ptr = bp->mem;
+ Eterm *end;
+
+ if ((ARITH_HEAP(cp) >= bp->mem) &&
+ (ARITH_HEAP(cp) < bp->mem + bp->size)) {
+ end = ARITH_HEAP(cp);
+ } else {
+ end = bp->mem + bp->size;
+ }
+
+ COPYMARK(ptr,end);
+ bp = bp->next;
+ }
+ }
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray stack\n"));
+ {
+ INC_Object *obj = NULL;
+
+ while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
+ Eterm *ptr;
+
+ obj = INC_STORAGE_GET(gray);
+ if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
+ ptr = obj->this + thing_arityval(*obj->this) + 1;
+ } else {
+ ptr = obj->this;
+ }
+ COPYMARK(ptr,obj->this + obj->size);
+ }
+ /* TODO: Titta p� motsvarande i minor. */
+ if (!WORK_MORE && obj != NULL)
+ INC_STORE(gray,obj->this,obj->size);
+ }
+
+ if (WORK_MORE) {
+ INC_DEACTIVATE(cp);
+ }
+ }
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark nursery\n"));
+ {
+ Eterm *ptr = inc_nursery_scn_ptr;
+ COPYMARK(ptr,global_htop);
+ inc_nursery_scn_ptr = ptr;
+ }
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray found in nursery\n"));
+ {
+ INC_Object *obj = NULL;
+
+ while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) {
+ Eterm *ptr;
+
+ obj = INC_STORAGE_GET(gray);
+ if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) {
+ ptr = obj->this + thing_arityval(*obj->this) + 1;
+ } else {
+ ptr = obj->this;
+ }
+ COPYMARK(ptr,obj->this + obj->size);
+ }
+ /* TODO: Titta p� motsvarande i minor. */
+ if (!WORK_MORE && obj != NULL)
+ INC_STORE(gray,obj->this,obj->size);
+ }
+
+
+ /**********************************************************************/
+ if (WORK_MORE) {
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep phase\n"));
+
+ /* Atomic phase */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep externals in old generation\n"));
+ {
+ ExternalThing** prev = &erts_global_offheap.externals;
+ ExternalThing* ptr = erts_global_offheap.externals;
+
+ while (ptr) {
+ Eterm* ppt = (Eterm *) ptr;
+
+ if ((ptr_within(ppt, global_old_heap, global_old_hend) &&
+ blackmap[ppt - global_old_heap] == 0) ||
+ (ptr_within(ppt, inc_fromspc, inc_fromend) &&
+ !INC_IS_FORWARDED(ppt)))
+ {
+ erts_deref_node_entry(ptr->node);
+ *prev = ptr = ptr->next;
+ } else if (ptr_within(ppt, inc_fromspc, inc_fromend)) {
+ ExternalThing* ro = (ExternalThing*)INC_FORWARD_VALUE(ppt);
+ *prev = ro; /* Patch to moved pos */
+ prev = &ro->next;
+ ptr = ro->next;
+ } else {
+ prev = &ptr->next;
+ ptr = ptr->next;
+ }
+ }
+ ASSERT(*prev == NULL);
+ }
+
+ /* Atomic phase */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep refc bins in old generation\n"));
+ {
+ ProcBin** prev = &erts_global_offheap.mso;
+ ProcBin* ptr = erts_global_offheap.mso;
+
+ while (ptr) {
+ Eterm *ppt = (Eterm*)ptr;
+
+ if ((ptr_within(ppt, global_old_heap, global_old_hend) &&
+ blackmap[ppt - global_old_heap] == 0) ||
+ (ptr_within(ppt, inc_fromspc, inc_fromend) &&
+ !INC_IS_FORWARDED(ppt)))
+ {
+ Binary* bptr;
+ *prev = ptr->next;
+ bptr = ptr->val;
+ if (erts_refc_dectest(&bptr->refc, 0) == 0)
+ erts_bin_free(bptr);
+ ptr = *prev;
+ } else if (ptr_within(ppt, inc_fromspc, inc_fromend)) {
+ ProcBin* ro = (ProcBin*)INC_FORWARD_VALUE(ppt);
+ *prev = ro; /* Patch to moved pos */
+ prev = &ro->next;
+ ptr = ro->next;
+ } else {
+ prev = &ptr->next;
+ ptr = ptr->next;
+ }
+ }
+ ASSERT(*prev == NULL);
+ }
+
+ /* TODO: Currently atomic phase - Can not be later of course. */
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep old generation\n"));
+ {
+ INC_Page *page = inc_used_mem;
+ INC_Page *prev = NULL;
+ inc_free_list = NULL;
+
+ while (page) {
+ int scavenging = 0;
+ int n = page->start - global_old_heap;
+ int stop = n + INC_PAGESIZE;
+
+ old_gen_sz += INC_PAGESIZE;
+ while (n < stop) {
+ if (blackmap[n] != 0) {
+ if (scavenging) {
+ Eterm *ptr = global_old_heap + n;
+ scavenging = 0;
+ if ((ptr - free_start) * sizeof(Eterm) >=
+ sizeof(INC_MemBlock))
+ {
+ INC_MemBlock *new = (INC_MemBlock*)free_start;
+ new->size = ptr - free_start;
+ new->prev = NULL;
+ new->next = inc_free_list;
+ if (inc_free_list)
+ inc_free_list->prev = new;
+ inc_free_list = new;
+ }
+ }
+ if (blackmap[n] == 255) {
+ unsigned int size =
+ *(unsigned int*)(((long)&blackmap[n]+4) & ~3);
+ live += size;
+ n += size;
+ }
+ else {
+ live += blackmap[n];
+ n += blackmap[n];
+ }
+ }
+ else if (!scavenging) {
+ free_start = global_old_heap + n;
+ scavenging = 1;
+ n++;
+ }
+ else {
+ n++;
+ }
+ }
+
+ if (scavenging) {
+ if ((global_old_heap + n - free_start) * sizeof(Eterm) >
+ sizeof(INC_MemBlock))
+ {
+ INC_MemBlock *new = (INC_MemBlock*)free_start;
+ new->size = global_old_heap + n - free_start;
+ new->prev = NULL;
+ new->next = inc_free_list;
+ if (inc_free_list)
+ inc_free_list->prev = new;
+ inc_free_list = new;
+ }
+ else if (free_start == page->start) {
+ INC_Page *next = page->next;
+
+ if (prev)
+ prev->next = page->next;
+ else
+ inc_used_mem = page->next;
+
+ page->next = inc_bibop;
+ inc_bibop = page;
+ inc_used_pages--;
+ page = next;
+ continue;
+ }
+ }
+ prev = page;
+ page = page->next;
+ }
+ }
+ }
+
+ ASSERT(inc_bibop);
+ /*
+ This code is not expected to work right now.
+ if (!inc_bibop) {
+ int i;
+ int new_pages = inc_pages * 2;
+ int size = sizeof(Eterm) * new_pages * INC_FULLPAGE;
+ Eterm *new_heap = erts_alloc(ERTS_ALC_T_MESSAGE_AREA,size);
+ Eterm *new_hend = new_heap + size;
+ Eterm *new_htop;
+ Eterm *last_page_end;
+ INC_Page *new_used_mem;
+ INC_Page *page;
+
+ erts_printf("The last page has been allocated..\n");
+ erts_printf("We need to copy things!\n");
+
+ / * Create new, bigger bag of pages * /
+ for (i = 0; i < new_pages; i++)
+ {
+ INC_Page *this =
+ (INC_Page*)(new_heap + i * INC_FULLPAGE);
+ this->next = (INC_Page*)((Eterm*)this + INC_FULLPAGE);
+ }
+ inc_bibop = (INC_Page*)new_heap;
+ ((INC_Page*)(new_heap + (new_pages - 1) *
+ INC_FULLPAGE))->next = NULL;
+
+ new_used_mem = inc_bibop;
+ inc_bibop = inc_bibop->next;
+ new_used_mem->next = NULL;
+
+ / * Move stuff from old bag to new * /
+ inc_free_list = NULL;
+ new_htop = new_used_mem->start;
+ last_page_end = new_htop + INC_PAGESIZE;
+ page = inc_used_mem;
+ while (page)
+ {
+ Eterm *ptr = page->start;
+ Eterm *page_end = ptr + INC_PAGESIZE;
+ int n = offsetof(INC_Page,start) / sizeof(void*) +
+ ((Eterm*)page - global_old_heap);
+ while (ptr < page_end)
+ {
+ if (blackmap[n] > 0)
+ {
+ if (last_page_end - new_htop < blackmap[n])
+ {
+ INC_Page *new_page = inc_bibop;
+ inc_bibop = inc_bibop->next;
+ new_page->next = new_used_mem;
+ new_used_mem = new_page;
+ new_htop = new_page->start;
+ last_page_end = new_htop + INC_PAGESIZE;
+ }
+
+ memcpy(new_htop,ptr,blackmap[n] * sizeof(Eterm));
+ for (i = 0; i < blackmap[n]; i++)
+ {
+ *ptr++ = (Eterm)new_htop++;
+ }
+ //new_htop += blackmap[n];
+ //ptr += blackmap[n];
+ / *
+ if (blackmap[n] == 255) Do the right thing...
+ * /
+ n += blackmap[n];
+ }
+ else
+ {
+ n++; ptr++;
+ }
+ }
+ page = page->next;
+ }
+
+ page = inc_used_mem;
+ while (page)
+ {
+ Eterm *ptr = page->start;
+ Eterm *page_end = ptr + INC_PAGESIZE;
+
+ / * TODO: If inc_used_mem is sorted in address order, this
+ * pass can be done at the same time as copying. * /
+ while (ptr < page_end)
+ {
+ if (ptr_within(ptr_val(*ptr),global_old_heap,global_old_hend))
+ {
+ *ptr = *((Eterm*)ptr_val(*ptr));
+ }
+ ptr++;
+ }
+ page = page->next;
+ }
+
+ printf("Restore rootset after heap move. Roots: %d\r\n",roots_saved);
+ while (roots_saved--)
+ {
+ Eterm *ptr = root_save[roots_saved];
+ *ptr = *((Eterm*)ptr_val(*ptr));
+ }
+
+ erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_old_heap);
+
+ global_old_heap = new_heap;
+ global_old_hend = new_hend;
+ inc_used_mem = new_used_mem;
+ inc_pages = new_pages;
+
+ if ((last_page_end - new_htop) * sizeof(Eterm) >=
+ sizeof(INC_MemBlock))
+ {
+ inc_free_list = (INC_MemBlock*)(new_htop);
+ inc_free_list->size = last_page_end - new_htop;
+ inc_free_list->prev = NULL;
+ inc_free_list->next = NULL;
+ }
+ }
+ */
+
+ /* I vilka l�gen kan vi vilja sl�nga p� en extra sida.. ( < 25% kvar?)
+ if ()
+ {
+ INC_Page *new_page = inc_bibop;
+ INC_MemBlock *new_free =
+ (INC_MemBlock*)new_page->start;
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Fetching new page\n"));
+ inc_bibop = inc_bibop->next;
+
+ new_page->next = inc_used_mem;
+ if (inc_used_mem)
+ inc_used_mem->prev = new_page;
+ inc_used_mem = new_page;
+
+ // kolla detta med normal sidstorlek! old_gen_sz += INC_PAGESIZE;
+ //BM_SWAP_TIMER(gc,misc1);
+ memset(blackmap +
+ ((void*)new_page - (void*)global_old_heap) / sizeof(void*),
+ 0, INC_FULLPAGE);
+ //BM_SWAP_TIMER(misc1,gc);
+
+ new_free->prev = NULL;
+ new_free->next = inc_free_list;
+ new_free->size = INC_PAGESIZE;
+ if (inc_free_list)
+ inc_free_list->prev = new_free;
+ inc_free_list = new_free;
+ //printf("Snatched a new page @ 0x%08x\r\n",(int)new_page);
+ //print_free_list();
+ found = new_free;
+ }
+ */
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update copy stack\n"));
+ {
+ Uint i;
+ for (i = 0; i < ma_dst_top; i++) {
+ if (ptr_within(ma_dst_stack[i],inc_fromspc,inc_fromend)) {
+ if (INC_IS_FORWARDED(ma_dst_stack[i]))
+ ma_dst_stack[i] = INC_FORWARD_VALUE(ma_dst_stack[i]);
+ }
+ }
+ }
+
+ if (WORK_MORE)
+ {
+ int size_left = INC_PAGESIZE - (aging_htop - aging->start);
+
+ if (size_left > sizeof(INC_MemBlock))
+ {
+ ((INC_MemBlock*)aging_htop)->size = size_left;
+ ((INC_MemBlock*)aging_htop)->prev = NULL;
+ ((INC_MemBlock*)aging_htop)->next = inc_free_list;
+ if (inc_free_list)
+ inc_free_list->prev = (INC_MemBlock*)aging_htop;
+ inc_free_list = (INC_MemBlock*)aging_htop;
+ }
+ aging->next = inc_used_mem;
+ inc_used_mem = aging;
+ inc_used_pages++;
+
+ ma_gc_flags &= ~GC_MAJOR;
+ ma_gc_flags &= ~GC_CYCLE;
+
+ VERBOSE(DEBUG_HYBRID_GC,("INCGC: Major collection cycle END\n"));
+ }
+
+ ma_gc_flags &= ~GC_INCLUDE_ALL;
+
+ BM_STOP_TIMER(old_gc);
+#ifdef BM_TIMER
+ major_global_gc_time += old_gc_time;
+ if (old_gc_time > max_global_major_time)
+ max_global_major_time = old_gc_time;
+
+ if ((old_gc_time * 1000) < MAX_PAUSE_TIME)
+ pause_times_old[(int)(old_gc_time * 1000)]++;
+ else
+ pause_times_old[MAX_PAUSE_TIME - 1]++;
+#endif
+ BM_START_TIMER(gc);
+}
+
+
+
+/***************************************************************************
+ * *
+ * Allocation in the old generation. Used in minor colection and when *
+ * copying the rest of a message after a GC. *
+ * *
+ ***************************************************************************/
+
+
+Eterm *erts_inc_alloc(int need)
+{
+ INC_MemBlock *this = inc_free_list;
+
+ ASSERT(need < INC_PAGESIZE);
+ while (this && (this->size) < need)
+ {
+ this = this->next;
+ }
+
+ if (!this)
+ {
+ /* If a free block large enough is not found, a new page is
+ * allocated. GC_NEED_MAJOR is set so that the next garbage
+ * collection cycle will be a major one, that is, both
+ * generations will be garbage collected.
+ */
+ INC_Page *new_page = inc_bibop;
+ INC_MemBlock *new_free = (INC_MemBlock*)new_page->start;
+
+ if (new_page)
+ {
+ VERBOSE(DEBUG_HYBRID_GC,
+ ("INCGC: Allocation grabs a new page\n"));
+ inc_bibop = inc_bibop->next;
+ new_page->next = inc_used_mem;
+ inc_used_mem = new_page;
+ inc_used_pages++;
+
+ new_free->prev = NULL;
+ new_free->next = inc_free_list;
+ new_free->size = INC_PAGESIZE;
+ if (inc_free_list)
+ inc_free_list->prev = new_free;
+ inc_free_list = new_free;
+
+ this = new_free;
+ if (!(ma_gc_flags & GC_MAJOR))
+ ma_gc_flags |= GC_NEED_MAJOR;
+ }
+ else
+ {
+ erl_exit(-1, "inc_alloc ran out of pages!\n");
+ }
+ }
+
+ if (((this->size) - need) * sizeof(Eterm) >= sizeof(INC_MemBlock))
+ {
+ INC_MemBlock *rest = (INC_MemBlock*)((Eterm*)this + need);
+
+ /* The order here IS important! */
+ rest->next = this->next;
+
+ if (rest->next)
+ rest->next->prev = rest;
+
+ rest->prev = this->prev;
+
+ if (rest->prev)
+ rest->prev->next = rest;
+ else
+ inc_free_list = rest;
+
+ rest->size = this->size - need;
+ }
+ else
+ {
+ if (this->prev)
+ this->prev->next = this->next;
+ else
+ inc_free_list = this->next;
+
+ if (this->next)
+ this->next->prev = this->prev;
+ }
+
+ if (ma_gc_flags & GC_MAJOR) {
+ if (need > 254) {
+ blackmap[(Eterm*)this - global_old_heap] = 255;
+ *(int*)((long)(&blackmap[(Eterm*)this - global_old_heap]+4) & ~3) =
+ need;
+ } else
+ blackmap[(Eterm*)this - global_old_heap] = need;
+ }
+ return (Eterm*)this;
+}
+#endif /* INCREMENTAL */
diff --git a/erts/emulator/beam/erl_nmgc.h b/erts/emulator/beam/erl_nmgc.h
new file mode 100644
index 0000000000..b207dd37fa
--- /dev/null
+++ b/erts/emulator/beam/erl_nmgc.h
@@ -0,0 +1,364 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ERL_NMGC_H__
+#define __ERL_NMGC_H__
+
+#ifdef INCREMENTAL
+#include <stddef.h> /* offsetof() */
+#include "erl_process.h"
+
+#define INC_FULLPAGE (INC_PAGESIZE + offsetof(INC_Page,start) / sizeof(void*))
+
+#define BOXED_NEED(PTR,HDR) \
+ (((HDR) & _HEADER_SUBTAG_MASK) == SUB_BINARY_SUBTAG ? \
+ header_arity(HDR) + 2 : \
+ ((HDR) & _HEADER_SUBTAG_MASK) == FUN_SUBTAG ? \
+ header_arity(HDR) + ((ErlFunThing*)(PTR))->num_free + 2 : \
+ header_arity(HDR) + 1)
+
+
+#define INC_DECREASE_WORK(n) inc_words_to_go -= (n);
+
+#define INC_COPY_CONS(FROM,TO,PTR) \
+do { \
+ TO[0] = FROM[0]; \
+ TO[1] = FROM[1]; \
+ INC_MARK_FORWARD(FROM,TO); \
+ *(PTR) = make_list(TO); \
+ INC_DECREASE_WORK(2); \
+ (TO) += 2; \
+} while(0)
+
+#define INC_COPY_BOXED(FROM,TO,PTR) \
+do { \
+ Sint nelts; \
+ Eterm hdr = *(FROM); \
+ \
+ ASSERT(is_header(hdr)); \
+ INC_MARK_FORWARD(FROM,TO); \
+ *(PTR) = make_boxed(TO); \
+ *(TO)++ = *(FROM)++; \
+ nelts = header_arity(hdr); \
+ switch ((hdr) & _HEADER_SUBTAG_MASK) { \
+ case SUB_BINARY_SUBTAG: nelts++; break; \
+ case FUN_SUBTAG: nelts+=((ErlFunThing*)(FROM-1))->num_free+1; break;\
+ } \
+ INC_DECREASE_WORK(nelts + 1); \
+ while (nelts--) \
+ *(TO)++ = *(FROM)++; \
+} while(0)
+
+
+/* Things copied to the old generation are not marked in the blackmap.
+ * This is ok since the page they are copied to (aging) is not part of
+ * the sweep.
+ */
+#define COPYMARK_CONS(FROM,TO,PTR,LIMIT) \
+do { \
+ if (ptr_within(FROM,inc_fromspc,inc_fromend)) { \
+ if (INC_IS_FORWARDED(FROM)) { \
+ *PTR = make_list(INC_FORWARD_VALUE(FROM)); \
+ } else if (TO + 2 <= LIMIT) { \
+ INC_STORE(gray,TO,2); \
+ INC_COPY_CONS(FROM,TO,PTR); \
+ } else { \
+ Eterm *hp = erts_inc_alloc(2); \
+ INC_STORE(gray,hp,2); \
+ INC_COPY_CONS(FROM,hp,PTR); \
+ } \
+ } else if (ptr_within(FROM,global_old_heap,global_old_hend) && \
+ (blackmap[FROM - global_old_heap] == 0)) { \
+ blackmap[FROM - global_old_heap] = 2; \
+ INC_DECREASE_WORK(2); \
+ INC_STORE(gray,FROM,2); \
+ } \
+} while(0)
+
+#define COPYMARK_BOXED(FROM,TO,PTR,LIMIT) \
+do { \
+ if (ptr_within(FROM,inc_fromspc,inc_fromend)) { \
+ int size = BOXED_NEED(FROM,*FROM); \
+ if (INC_IS_FORWARDED(FROM)) { \
+ *PTR = make_boxed(INC_FORWARD_VALUE(FROM)); \
+ } else if (TO + size <= LIMIT) { \
+ INC_STORE(gray,TO,size); \
+ INC_COPY_BOXED(FROM,TO,PTR); \
+ } else { \
+ Eterm *hp = erts_inc_alloc(size); \
+ INC_STORE(gray,hp,size); \
+ INC_COPY_BOXED(FROM,hp,PTR); \
+ } \
+ } else if (ptr_within(FROM,global_old_heap,global_old_hend) && \
+ (blackmap[FROM - global_old_heap] == 0)) { \
+ int size = BOXED_NEED(FROM,*FROM); \
+ if (size > 254) { \
+ blackmap[FROM - global_old_heap] = 255; \
+ *(int*)((long)(&blackmap[FROM - \
+ global_old_heap] + 4) & ~3) = size; \
+ } else \
+ blackmap[FROM - global_old_heap] = size; \
+ INC_DECREASE_WORK(size); \
+ INC_STORE(gray,FROM,size); \
+ } \
+} while(0)
+
+#define INC_MARK_FORWARD(ptr,dst) fwdptrs[(ptr) - inc_fromspc] = (dst);
+#define INC_IS_FORWARDED(ptr) (fwdptrs[(ptr) - inc_fromspc] != 0)
+#define INC_FORWARD_VALUE(ptr) fwdptrs[(ptr) - inc_fromspc]
+
+/* Note for BM_TIMER: Active timer should always be 'system' when IncAlloc
+ * is called!
+ */
+#define IncAlloc(p, sz, objv, nobj) \
+ (ASSERT_EXPR((sz) >= 0), \
+ (((inc_alloc_limit - global_htop) <= (sz)) ? \
+ erts_incremental_gc((p),(sz),(objv),(nobj)) : 0), \
+ ASSERT_EXPR(global_hend - global_htop > (sz)), \
+ global_htop += (sz), global_htop - (sz))
+
+
+/************************************************************************
+ * INC_STORAGE, a dynamic circular storage for objects (INC_Object). *
+ * Use INC_STORE to add objects to the storage. The storage can then *
+ * be used either as a queue, using INC_STORAGE_GET to retreive *
+ * values, or as a stack, using INC_STORAGE_POP. It is OK to mix calls *
+ * to GET and POP if that is desired. *
+ * An iterator can be declared to traverse the storage without removing *
+ * any elements, and INC_STORAGE_STEP will then return each element in *
+ * turn, oldest first. *
+ ***********************************************************************/
+
+/* Declare a new storage; must be in the beginning of a block. Give
+ * the storage a name that is used in all later calls to the storage.
+ * If this is an external declaration of the storage, pass the keyword
+ * external as the first argument, otherwise leave it empty.
+ */
+#define INC_STORAGE_DECLARATION(ext,name) \
+ ext INC_Storage *name##head; \
+ ext INC_Storage *name##tail; \
+ ext INC_Object *name##free; \
+ ext INC_Object *name##last_free; \
+ ext int name##size;
+
+
+/* Initialize the storage. Note that memory allocation is involved -
+ * don't forget to erase the storage when you are done.
+ */
+#define INC_STORAGE_INIT(name) do { \
+ name##head = (INC_Storage*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \
+ sizeof(INC_Storage)); \
+ name##head->next = name##head; \
+ name##head->prev = name##head; \
+ name##tail = name##head; \
+ name##free = name##head->data; \
+ name##last_free = name##free + INC_STORAGE_SIZE - 1; \
+ name##size = 0; \
+} while(0)
+
+
+/*
+#define INC_STORAGE_SWAP(s1,s2) do { \
+ INC_Storage *tmphead = s1##head; \
+ INC_Storage *tmptail = s1##tail; \
+ INC_Object *tmpfree = s1##free; \
+ INC_Object *tmplast = s1##last_free; \
+ int tmpsize = s1##size; \
+ s1##head = s2##head; \
+ s1##tail = s2##tail; \
+ s1##free = s2##free; \
+ s1##last_free = s2##last_free; \
+ s1##size = s2##size; \
+ s2##head = tmphead; \
+ s2##tail = tmptail; \
+ s2##free = tmpfree; \
+ s2##last_free = tmplast; \
+ s2##size = tmpsize; \
+} while(0)
+*/
+
+
+/* Return and remove the youngest element - treat the storage as a
+ * stack. Always check that there are elements in the queue before
+ * using INC_STORAGE_POP!
+ */
+#define INC_STORAGE_POP(name) (ASSERT_EXPR(name##size != 0), \
+ name##size--, \
+ (--name##free != name##head->data - 1) ? \
+ name##free : (name##head = name##head->prev, \
+ name##free = name##head->data + INC_STORAGE_SIZE - 1))
+
+
+/* Return and remove the oldest element - treat the storage as a
+ * queue. Always check that there are elements in the queue before
+ * using INC_STORAGE_GET!
+ */
+#define INC_STORAGE_GET(name) (ASSERT_EXPR(name##size != 0), \
+ name##size--, \
+ (++name##last_free != name##tail->data + INC_STORAGE_SIZE) ? \
+ name##last_free : (name##tail = name##tail->next, \
+ name##last_free = name##tail->data))
+
+
+/* Advance the head to the next free location. If the storage is full,
+ * a new storage is allocated and linked into the list.
+ */
+#define INC_STORAGE_NEXT(name) do { \
+ if (name##free == name##last_free) { \
+ name##tail = (INC_Storage*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \
+ sizeof(INC_Storage)); \
+ memcpy(name##tail->data,name##head->data, \
+ INC_STORAGE_SIZE * sizeof(INC_Object)); \
+ name##tail->next = name##head->next; \
+ name##head->next = name##tail; \
+ name##tail->prev = name##tail->next->prev; \
+ name##tail->next->prev = name##tail; \
+ name##last_free = ((void*)name##tail + \
+ ((void*)name##last_free - (void*)name##head)); \
+ } \
+ name##free++; \
+ name##size++; \
+ if (name##free == name##head->data + INC_STORAGE_SIZE) { \
+ name##head = name##head->next; \
+ name##free = name##head->data; \
+ } \
+} while(0)
+
+
+/* The head of this storage is the next free location. This is where
+ * the next element will be stored.
+ */
+#define INC_STORAGE_HEAD(name) (name##free)
+
+
+/* Return the top - the youngest element in the storage. */
+/* #define INC_STORAGE_TOP(name) (name##free - 1 with some magic..) */
+
+
+/* True if the storage is empty, false otherwise */
+#define INC_STORAGE_EMPTY(name) (name##size == 0)
+
+
+/* Store a new element in the head of the storage and advance the head
+ * to the next free location.
+ */
+#define INC_STORE(name,ptr,sz) do { \
+ INC_STORAGE_HEAD(name)->this = ptr; \
+ INC_STORAGE_HEAD(name)->size = sz; \
+ INC_STORAGE_NEXT(name); \
+} while(0)
+
+
+/* An iterator. Use it together with INC_STORAGE_STEP to browse throuh
+ * the storage. Please note that it is not possible to remove an entry
+ * in the middle of the storage, use GET or POP to remove enties.
+ */
+#define INC_STORAGE_ITERATOR(name) \
+ INC_Storage *name##iterator_head = name##tail; \
+ INC_Object *name##iterator_current = name##last_free; \
+ int name##iterator_left = name##size;
+
+
+/* Return the next element in the storage (sorted by age, oldest
+ * first) or NULL if the storage is empty or the last element has been
+ * returned already.
+ */
+#define INC_STORAGE_STEP(name) (name##iterator_left == 0 ? NULL : \
+ (name##iterator_left--, \
+ (++name##iterator_current != name##iterator_head->data + \
+ INC_STORAGE_SIZE) ? name##iterator_current : \
+ (name##iterator_head = name##iterator_head->next, \
+ name##iterator_current = name##iterator_head->data)))
+
+
+/* Erase the storage. */
+#define INC_STORAGE_ERASE(name)do { \
+ name##head->prev->next = NULL; \
+ while (name##head != NULL) { \
+ name##tail = name##head; \
+ name##head = name##head->next; \
+ erts_free(ERTS_ALC_T_OBJECT_STACK,(void*)name##tail); \
+ } \
+ name##tail = NULL; \
+ name##free = NULL; \
+ name##last_free = NULL; \
+ name##size = 0; \
+} while(0)
+
+/*
+ * Structures used by the non-moving memory manager
+ */
+
+typedef struct
+{
+ Eterm *this;
+ unsigned long size;
+} INC_Object;
+
+typedef struct inc_storage {
+ struct inc_storage *next;
+ struct inc_storage *prev;
+ INC_Object data[INC_STORAGE_SIZE];
+} INC_Storage;
+
+typedef struct inc_mem_block
+{
+ unsigned long size;
+ struct inc_mem_block *prev;
+ struct inc_mem_block *next;
+} INC_MemBlock;
+
+typedef struct inc_page
+{
+ struct inc_page *next;
+ Eterm start[1]; /* Has to be last in struct, this is where the data start */
+} INC_Page;
+
+
+/*
+ * Heap pointers for the non-moving memory area.
+ */
+extern INC_Page *inc_used_mem;
+extern INC_MemBlock *inc_free_list;
+extern unsigned char *blackmap;
+
+extern Eterm **fwdptrs;
+extern Eterm *inc_fromspc;
+extern Eterm *inc_fromend;
+extern Process *inc_active_proc;
+extern Process *inc_active_last;
+extern Eterm *inc_alloc_limit;
+extern int inc_words_to_go;
+
+INC_STORAGE_DECLARATION(extern,gray);
+INC_STORAGE_DECLARATION(extern,root);
+
+void erts_init_incgc(void);
+void erts_cleanup_incgc(void);
+void erts_incremental_gc(Process *p, int sz, Eterm* objv, int nobj);
+Eterm *erts_inc_alloc(int need);
+
+#else
+# define INC_STORE(lst,ptr,sz)
+# define INC_MARK_FORWARD(ptr)
+# define INC_IS_FORWARDED(ptr)
+# define INC_FORWARD_VALUE(ptr)
+#endif /* INCREMENTAL */
+
+#endif /* _ERL_NMGC_H_ */
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
new file mode 100644
index 0000000000..87dbfc2a04
--- /dev/null
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -0,0 +1,318 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2001-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_NODE_CONTAINER_UTILS_H__
+#define ERL_NODE_CONTAINER_UTILS_H__
+
+#include "erl_term.h"
+
+/*
+ * Note regarding node containers:
+ *
+ * The term "node container" is used as a group name (internally in
+ * the emulator) for the Erlang data types that contain a reference
+ * to a node, i.e. pids, ports, and references.
+ *
+ * Observe! The layouts of the node container data types have been
+ * changed in R9.
+ *
+ * Node containers are divided into internal and external node containers.
+ * An internal node container refer to the current incarnation of the
+ * node which it reside on. An external node container refer to
+ * either a remote node (i.e. a node with another node name than the
+ * node name of the node on which the node container resides on) or another
+ * incarnation of the node which the node container resides on (i.e
+ * another node with the same node name but another creation).
+ *
+ * External node containers are boxed data types. The data of an
+ * external node container is stored on the heap together with a pointer
+ * to an element in the node table (see erl_term.h and erl_node_tables.h).
+ * The elements of the node table are garbage collected by reference
+ * counting (much like refc binaries, and funs in the separate heap case).
+ *
+ * Internal node containers are stored as they previously were (in R8)
+ * with the exception of changed internal layouts (see erl_term.h), i.e.
+ * internal pid, and internal port are immediate data types and internal
+ * reference is a boxed data type. An internal node container have an
+ * implicit reference to the 'erts_this_node' element in the node table.
+ *
+ * Due to the R9 changes in layouts of node containers there are room to
+ * store more data than previously. Today (R9) this extra space is unused,
+ * but it is planned to be used in the future. For example only 18 bits
+ * are used for data in a pid but there is room for 28 bits of data (on a
+ * 32-bit machine). Some preparations have been made in the emulator for
+ * usage of this extra space.
+ *
+ * OBSERVE! Pids doesn't use fixed size 'serial' and 'number' fields any
+ * more. Previously the 15 bit 'number' field of a pid was used as index
+ * into the process table, and the 3 bit 'serial' field was used as a
+ * "wrap counter". The needed number of bits for index into the process
+ * table is now calculated at startup and the rest (of the 18 bits used)
+ * are used as 'serial'. In the "emulator interface" (external format,
+ * list_to_pid, etc) the least significant 15 bits are presented as
+ * 'number' and the most significant 3 bits are presented as 'serial',
+ * though. The makro internal_pid_index() can be used for retrieving
+ * index into the process table. Do *not* use the result from
+ * pid_number() as an index into the process table. The pid_number() and
+ * pid_serial() (and friends) fetch the old fixed size 'number' and
+ * 'serial' fields.
+ */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Node containers *
+\* */
+
+#define node_container_node_name(x) (is_external(x) \
+ ? external_node_name((x)) \
+ : internal_node_name((x)))
+#define node_container_creation(x) (is_external(x) \
+ ? external_creation((x)) \
+ : internal_creation((x)))
+#define node_container_dist_entry(x) (is_external(x) \
+ ? external_dist_entry((x)) \
+ : internal_dist_entry((x)))
+#define node_container_channel_no(x) (is_external((x)) \
+ ? external_channel_no((x)) \
+ : internal_channel_no((x)))
+#define is_node_container(x) (is_external((x)) || is_internal((x)))
+#define is_not_node_container(x) (!is_node_container((x)))
+
+#define is_internal(x) (is_internal_pid((x)) \
+ || is_internal_port((x)) \
+ || is_internal_ref((x)))
+#define is_not_internal(x) (!is_internal((x)))
+#define internal_node_name(x) (erts_this_node->sysname)
+#define external_node_name(x) external_node((x))->sysname
+#define internal_creation(x) (erts_this_node->creation)
+#define external_creation(x) (external_node((x))->creation)
+#define internal_dist_entry(x) (erts_this_node->dist_entry)
+#define external_dist_entry(x) (external_node((x))->dist_entry)
+
+extern int erts_use_r9_pids_ports;
+
+/*
+ * For this node (and previous incarnations of this node), 0 is used as
+ * channel no. For other nodes, the atom index of the atom corresponding
+ * to the node name is used as channel no.
+ *
+ * (We used to assert for correct node names, but we removed that assertion
+ * as it is possible to sneak in incorrect node names for instance using
+ * the external format.)
+ */
+#define dist_entry_channel_no(x) \
+ ((x) == erts_this_dist_entry \
+ ? ((Uint) 0) \
+ : (ASSERT_EXPR(is_atom((x)->sysname)), \
+ (Uint) atom_val((x)->sysname)))
+#define internal_channel_no(x) ((Uint) ERST_INTERNAL_CHANNEL_NO)
+#define external_channel_no(x) \
+ (dist_entry_channel_no(external_dist_entry((x))))
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Pids *
+\* */
+
+#define internal_pid_index(x) (internal_pid_data((x)) \
+ & erts_process_tab_index_mask)
+
+#define internal_pid_node_name(x) (internal_pid_node((x))->sysname)
+#define external_pid_node_name(x) (external_pid_node((x))->sysname)
+#define internal_pid_creation(x) (internal_pid_node((x))->creation)
+#define external_pid_creation(x) (external_pid_node((x))->creation)
+#define internal_pid_dist_entry(x) (internal_pid_node((x))->dist_entry)
+#define external_pid_dist_entry(x) (external_pid_node((x))->dist_entry)
+
+#define internal_pid_channel_no(x) (internal_channel_no((x)))
+#define external_pid_channel_no(x) (external_channel_no((x)))
+
+#define pid_data_words(x) (is_internal_pid((x)) \
+ ? internal_pid_data_words((x)) \
+ : external_pid_data_words((x)))
+#define pid_number(x) (is_internal_pid((x)) \
+ ? internal_pid_number((x)) \
+ : external_pid_number((x)))
+#define pid_serial(x) (is_internal_pid((x)) \
+ ? internal_pid_serial((x)) \
+ : external_pid_serial((x)))
+#define pid_node(x) (is_internal_pid((x)) \
+ ? internal_pid_node((x)) \
+ : external_pid_node((x)))
+#define pid_node_name(x) (is_internal_pid((x)) \
+ ? internal_pid_node_name((x)) \
+ : external_pid_node_name((x)))
+#define pid_creation(x) (is_internal_pid((x)) \
+ ? internal_pid_creation((x)) \
+ : external_pid_creation((x)))
+#define pid_dist_entry(x) (is_internal_pid((x)) \
+ ? internal_pid_dist_entry((x)) \
+ : external_pid_dist_entry((x)))
+#define pid_channel_no(x) (is_internal_pid((x)) \
+ ? internal_pid_channel_no((x)) \
+ : external_pid_channel_no((x)))
+#define is_pid(x) (is_internal_pid((x)) \
+ || is_external_pid((x)))
+#define is_not_pid(x) (!is_pid(x))
+
+#define ERTS_MAX_R9_PROCESSES (1 << ERTS_R9_PROC_BITS)
+
+/*
+ * Maximum number of processes. We want the number to fit in a SMALL on
+ * 32-bit CPU.
+ */
+
+#define ERTS_MAX_PROCESSES ((1L << 27)-1)
+#if (ERTS_MAX_PROCESSES > MAX_SMALL)
+# error "The maximum number of processes must fit in a SMALL."
+#endif
+
+#define ERTS_MAX_PID_DATA ((1 << _PID_DATA_SIZE) - 1)
+#define ERTS_MAX_PID_NUMBER ((1 << _PID_NUM_SIZE) - 1)
+#define ERTS_MAX_PID_SERIAL ((1 << _PID_SER_SIZE) - 1)
+#define ERTS_MAX_PID_R9_SERIAL ((1 << _PID_R9_SER_SIZE) - 1)
+
+#define ERTS_R9_PROC_BITS (_PID_R9_SER_SIZE + _PID_NUM_SIZE)
+#define ERTS_PROC_BITS (_PID_SER_SIZE + _PID_NUM_SIZE)
+
+#define ERTS_INVALID_PID make_internal_pid(ERTS_MAX_PID_DATA)
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Ports *
+\* */
+
+#define internal_port_index(x) (internal_port_data((x)) \
+ & erts_port_tab_index_mask)
+
+#define internal_port_node_name(x) (internal_port_node((x))->sysname)
+#define external_port_node_name(x) (external_port_node((x))->sysname)
+#define internal_port_creation(x) (internal_port_node((x))->creation)
+#define external_port_creation(x) (external_port_node((x))->creation)
+#define internal_port_dist_entry(x) (internal_port_node((x))->dist_entry)
+#define external_port_dist_entry(x) (external_port_node((x))->dist_entry)
+
+#define internal_port_channel_no(x) (internal_channel_no((x)))
+#define external_port_channel_no(x) (external_channel_no((x)))
+
+#define port_data_words(x) (is_internal_port((x)) \
+ ? internal_port_data_words((x))\
+ : external_port_data_words((x)))
+#define port_number(x) (is_internal_port((x)) \
+ ? internal_port_number((x)) \
+ : external_port_number((x)))
+#define port_node(x) (is_internal_port((x)) \
+ ? internal_port_node((x)) \
+ : external_port_node((x)))
+#define port_node_name(x) (is_internal_port((x)) \
+ ? internal_port_node_name((x)) \
+ : external_port_node_name((x)))
+#define port_creation(x) (is_internal_port((x)) \
+ ? internal_port_creation((x)) \
+ : external_port_creation((x)))
+#define port_dist_entry(x) (is_internal_port((x)) \
+ ? internal_port_dist_entry((x))\
+ : external_port_dist_entry((x)))
+#define port_channel_no(x) (is_internal_port((x)) \
+ ? internal_port_channel_no((x))\
+ : external_port_channel_no((x)))
+
+#define is_port(x) (is_internal_port((x)) \
+ || is_external_port((x)))
+#define is_not_port(x) (!is_port(x))
+
+/* Highest port-ID part in a term of type Port
+ Not necessarily the same as the variable erts_max_ports
+ which defines the maximum number of simultaneous Ports
+ in the Erlang node. ERTS_MAX_PORTS is a hard upper limit.
+*/
+#define ERTS_MAX_R9_PORTS (1 << ERTS_R9_PORTS_BITS)
+#define ERTS_MAX_PORTS (1 << ERTS_PORTS_BITS)
+
+#define ERTS_MAX_PORT_DATA ((1 << _PORT_DATA_SIZE) - 1)
+#define ERTS_MAX_PORT_NUMBER ((1 << _PORT_NUM_SIZE) - 1)
+
+#define ERTS_R9_PORTS_BITS (_PORT_R9_NUM_SIZE)
+#define ERTS_PORTS_BITS (_PORT_NUM_SIZE)
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Refs *
+\* */
+
+#ifdef ARCH_64
+
+#define internal_ref_no_of_numbers(x) \
+ (internal_ref_data((x))[0])
+#define internal_ref_numbers(x) \
+ (&internal_ref_data((x))[1])
+#define external_ref_no_of_numbers(x) \
+ (external_ref_data((x))[0])
+#define external_ref_numbers(x) \
+ (&external_ref_data((x))[1])
+
+#else
+
+#define internal_ref_no_of_numbers(x) (internal_ref_data_words((x)))
+#define internal_ref_numbers(x) (internal_ref_data((x)))
+#define external_ref_no_of_numbers(x) (external_ref_data_words((x)))
+#define external_ref_numbers(x) (external_ref_data((x)))
+
+#endif
+
+#define internal_ref_node_name(x) (internal_ref_node((x))->sysname)
+#define external_ref_node_name(x) (external_ref_node((x))->sysname)
+#define internal_ref_creation(x) (internal_ref_node((x))->creation)
+#define external_ref_creation(x) (external_ref_node((x))->creation)
+#define internal_ref_dist_entry(x) (internal_ref_node((x))->dist_entry)
+#define external_ref_dist_entry(x) (external_ref_node((x))->dist_entry)
+
+
+#define internal_ref_channel_no(x) (internal_channel_no((x)))
+#define external_ref_channel_no(x) (external_channel_no((x)))
+
+#define ref_data_words(x) (is_internal_ref((x)) \
+ ? internal_ref_data_words((x)) \
+ : external_ref_data_words((x)))
+#define ref_data(x) (is_internal_ref((x)) \
+ ? internal_ref_data((x)) \
+ : external_ref_data((x)))
+#define ref_no_of_numbers(x) (is_internal_ref((x)) \
+ ? internal_ref_no_of_numbers((x))\
+ : external_ref_no_of_numbers((x)))
+#define ref_numbers(x) (is_internal_ref((x)) \
+ ? internal_ref_numbers((x)) \
+ : external_ref_numbers((x)))
+#define ref_node(x) (is_internal_ref((x)) \
+ ? internal_ref_node(x) \
+ : external_ref_node((x)))
+#define ref_node_name(x) (is_internal_ref((x)) \
+ ? internal_ref_node_name((x)) \
+ : external_ref_node_name((x)))
+#define ref_creation(x) (is_internal_ref((x)) \
+ ? internal_ref_creation((x)) \
+ : external_ref_creation((x)))
+#define ref_dist_entry(x) (is_internal_ref((x)) \
+ ? internal_ref_dist_entry((x)) \
+ : external_ref_dist_entry((x)))
+#define ref_channel_no(x) (is_internal_ref((x)) \
+ ? internal_ref_channel_no((x)) \
+ : external_ref_channel_no((x)))
+#define is_ref(x) (is_internal_ref((x)) \
+ || is_external_ref((x)))
+#define is_not_ref(x) (!is_ref(x))
+
+#endif
+
+
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
new file mode 100644
index 0000000000..42b28d987c
--- /dev/null
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -0,0 +1,1660 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2001-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "global.h"
+#include "erl_node_tables.h"
+#include "dist.h"
+#include "big.h"
+#include "error.h"
+
+Hash erts_dist_table;
+Hash erts_node_table;
+erts_smp_rwmtx_t erts_dist_table_rwmtx;
+erts_smp_rwmtx_t erts_node_table_rwmtx;
+
+DistEntry *erts_hidden_dist_entries;
+DistEntry *erts_visible_dist_entries;
+DistEntry *erts_not_connected_dist_entries;
+Sint erts_no_of_hidden_dist_entries;
+Sint erts_no_of_visible_dist_entries;
+Sint erts_no_of_not_connected_dist_entries;
+
+DistEntry *erts_this_dist_entry;
+ErlNode *erts_this_node;
+
+static Uint node_entries;
+static Uint dist_entries;
+
+static int references_atoms_need_init = 1;
+
+/* -- The distribution table ---------------------------------------------- */
+
+#ifdef DEBUG
+static int
+is_in_de_list(DistEntry *dep, DistEntry *dep_list)
+{
+ DistEntry *tdep;
+ for(tdep = dep_list; tdep; tdep = tdep->next)
+ if(tdep == dep)
+ return 1;
+ return 0;
+}
+#endif
+
+static HashValue
+dist_table_hash(void *dep)
+{
+ return atom_tab(atom_val(((DistEntry *) dep)->sysname))->slot.bucket.hvalue;
+}
+
+static int
+dist_table_cmp(void *dep1, void *dep2)
+{
+ return (((DistEntry *) dep1)->sysname == ((DistEntry *) dep2)->sysname
+ ? 0 : 1);
+}
+
+static void*
+dist_table_alloc(void *dep_tmpl)
+{
+ Eterm chnl_nr;
+ Eterm sysname;
+ DistEntry *dep;
+
+ if(((DistEntry *) dep_tmpl) == erts_this_dist_entry)
+ return dep_tmpl;
+
+ sysname = ((DistEntry *) dep_tmpl)->sysname;
+ chnl_nr = make_small((Uint) atom_val(sysname));
+ dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
+
+ dist_entries++;
+
+ dep->prev = NULL;
+ erts_refc_init(&dep->refc, -1);
+ erts_smp_rwmtx_init_x(&dep->rwmtx, "dist_entry", chnl_nr);
+ dep->sysname = sysname;
+ dep->cid = NIL;
+ dep->connection_id = 0;
+ dep->status = 0;
+ dep->flags = 0;
+ dep->version = 0;
+
+ erts_smp_mtx_init_x(&dep->lnk_mtx, "dist_entry_links", chnl_nr);
+ dep->node_links = NULL;
+ dep->nlinks = NULL;
+ dep->monitors = NULL;
+
+ erts_smp_spinlock_init_x(&dep->qlock, "dist_entry_out_queue", chnl_nr);
+ dep->qflgs = 0;
+ dep->qsize = 0;
+ dep->out_queue.first = NULL;
+ dep->out_queue.last = NULL;
+ dep->suspended.first = NULL;
+ dep->suspended.last = NULL;
+
+ dep->finalized_out_queue.first = NULL;
+ dep->finalized_out_queue.last = NULL;
+
+ erts_smp_atomic_init(&dep->dist_cmd_scheduled, 0);
+ erts_port_task_handle_init(&dep->dist_cmd);
+ dep->send = NULL;
+ dep->cache = NULL;
+
+ /* Link in */
+
+ /* All new dist entries are "not connected" */
+ dep->next = erts_not_connected_dist_entries;
+ if(erts_not_connected_dist_entries) {
+ ASSERT(erts_not_connected_dist_entries->prev == NULL);
+ erts_not_connected_dist_entries->prev = dep;
+ }
+ erts_not_connected_dist_entries = dep;
+ erts_no_of_not_connected_dist_entries++;
+
+ return (void *) dep;
+}
+
+static void
+dist_table_free(void *vdep)
+{
+ DistEntry *dep = (DistEntry *) vdep;
+
+ if(dep == erts_this_dist_entry)
+ return;
+
+ ASSERT(is_nil(dep->cid));
+ ASSERT(dep->nlinks == NULL);
+ ASSERT(dep->node_links == NULL);
+ ASSERT(dep->monitors == NULL);
+
+ /* Link out */
+
+ /* All dist entries about to be removed are "not connected" */
+
+ if(dep->prev) {
+ ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries));
+ dep->prev->next = dep->next;
+ }
+ else {
+ ASSERT(erts_not_connected_dist_entries == dep);
+ erts_not_connected_dist_entries = dep->next;
+ }
+
+ if(dep->next)
+ dep->next->prev = dep->prev;
+
+ ASSERT(erts_no_of_not_connected_dist_entries > 0);
+ erts_no_of_not_connected_dist_entries--;
+
+ ASSERT(!dep->cache);
+ erts_smp_rwmtx_destroy(&dep->rwmtx);
+ erts_smp_mtx_destroy(&dep->lnk_mtx);
+ erts_smp_spinlock_destroy(&dep->qlock);
+
+#ifdef DEBUG
+ sys_memset(vdep, 0x77, sizeof(DistEntry));
+#endif
+ erts_free(ERTS_ALC_T_DIST_ENTRY, (void *) dep);
+
+ ASSERT(dist_entries > 1);
+ dist_entries--;
+}
+
+
+void
+erts_dist_table_info(int to, void *to_arg)
+{
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ hash_info(to, to_arg, &erts_dist_table);
+ if (lock)
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+}
+
+DistEntry *
+erts_channel_no_to_dist_entry(Uint cno)
+{
+/*
+ * For this node (and previous incarnations of this node),
+ * ERST_INTERNAL_CHANNEL_NO (will always be 0 I guess) is used as
+ * channel no. For other nodes, the atom index of the atom corresponding
+ * to the node name is used as channel no.
+ */
+ if(cno == ERST_INTERNAL_CHANNEL_NO) {
+ erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ return erts_this_dist_entry;
+ }
+
+ if((cno > MAX_ATOM_INDEX)
+ || (cno >= atom_table_size())
+ || (atom_tab(cno) == NULL))
+ return NULL;
+
+ /* cno is a valid atom index; find corresponding dist entry (if there
+ is one) */
+ return erts_find_dist_entry(make_atom(cno));
+}
+
+
+DistEntry *
+erts_sysname_to_connected_dist_entry(Eterm sysname)
+{
+ DistEntry de;
+ DistEntry *res_dep;
+ de.sysname = sysname;
+
+ if(erts_this_dist_entry->sysname == sysname) {
+ erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ return erts_this_dist_entry;
+ }
+
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de);
+ if (res_dep) {
+ long refc = erts_refc_inctest(&res_dep->refc, 1);
+ if (refc < 2) /* Pending delete */
+ erts_refc_inc(&res_dep->refc, 1);
+ }
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ if (res_dep) {
+ int deref;
+ erts_smp_rwmtx_rlock(&res_dep->rwmtx);
+ deref = is_nil(res_dep->cid);
+ erts_smp_rwmtx_runlock(&res_dep->rwmtx);
+ if (deref) {
+ erts_deref_dist_entry(res_dep);
+ res_dep = NULL;
+ }
+ }
+ return res_dep;
+}
+
+DistEntry *erts_find_or_insert_dist_entry(Eterm sysname)
+{
+ DistEntry *res;
+ DistEntry de;
+ long refc;
+ res = erts_find_dist_entry(sysname);
+ if (res)
+ return res;
+ de.sysname = sysname;
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ res = hash_put(&erts_dist_table, (void *) &de);
+ refc = erts_refc_inctest(&res->refc, 0);
+ if (refc < 2) /* New or pending delete */
+ erts_refc_inc(&res->refc, 1);
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ return res;
+}
+
+DistEntry *erts_find_dist_entry(Eterm sysname)
+{
+ DistEntry *res;
+ DistEntry de;
+ de.sysname = sysname;
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ res = hash_get(&erts_dist_table, (void *) &de);
+ if (res) {
+ long refc = erts_refc_inctest(&res->refc, 1);
+ if (refc < 2) /* Pending delete */
+ erts_refc_inc(&res->refc, 1);
+ }
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ return res;
+}
+
+void erts_delete_dist_entry(DistEntry *dep)
+{
+ ASSERT(dep != erts_this_dist_entry);
+ if(dep != erts_this_dist_entry) {
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ /*
+ * Another thread might have looked up this dist entry after
+ * we decided to delete it (refc became zero). If so, the other
+ * thread incremented refc twice. Once for the new reference
+ * and once for this thread. Therefore, delete dist entry if
+ * refc is 0 or -1 after a decrement.
+ */
+ if (erts_refc_dectest(&dep->refc, -1) <= 0)
+ (void) hash_erase(&erts_dist_table, (void *) dep);
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ }
+}
+
+Uint
+erts_dist_table_size(void)
+{
+ Uint res;
+#ifdef DEBUG
+ HashInfo hi;
+ DistEntry *dep;
+ int i;
+#endif
+ int lock = !ERTS_IS_CRASH_DUMPING;
+
+ if (lock)
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+#ifdef DEBUG
+ hash_get_info(&hi, &erts_dist_table);
+ ASSERT(dist_entries == hi.objs);
+
+ i = 0;
+ for(dep = erts_visible_dist_entries; dep; dep = dep->next)
+ i++;
+ ASSERT(i == erts_no_of_visible_dist_entries);
+ i = 0;
+ for(dep = erts_hidden_dist_entries; dep; dep = dep->next)
+ i++;
+ ASSERT(i == erts_no_of_hidden_dist_entries);
+ i = 0;
+ for(dep = erts_not_connected_dist_entries; dep; dep = dep->next)
+ i++;
+ ASSERT(i == erts_no_of_not_connected_dist_entries);
+
+ ASSERT(dist_entries == (erts_no_of_visible_dist_entries
+ + erts_no_of_hidden_dist_entries
+ + erts_no_of_not_connected_dist_entries
+ + 1 /* erts_this_dist_entry */));
+#endif
+
+ res = (hash_table_sz(&erts_dist_table)
+ + dist_entries*sizeof(DistEntry)
+ + erts_dist_cache_size());
+ if (lock)
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ return res;
+}
+
+void
+erts_set_dist_entry_not_connected(DistEntry *dep)
+{
+ ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+
+ ASSERT(dep != erts_this_dist_entry);
+ ASSERT(is_internal_port(dep->cid));
+
+ if(dep->flags & DFLAG_PUBLISHED) {
+ if(dep->prev) {
+ ASSERT(is_in_de_list(dep, erts_visible_dist_entries));
+ dep->prev->next = dep->next;
+ }
+ else {
+ ASSERT(erts_visible_dist_entries == dep);
+ erts_visible_dist_entries = dep->next;
+ }
+
+ ASSERT(erts_no_of_visible_dist_entries > 0);
+ erts_no_of_visible_dist_entries--;
+ }
+ else {
+ if(dep->prev) {
+ ASSERT(is_in_de_list(dep, erts_hidden_dist_entries));
+ dep->prev->next = dep->next;
+ }
+ else {
+ ASSERT(erts_hidden_dist_entries == dep);
+ erts_hidden_dist_entries = dep->next;
+ }
+
+ ASSERT(erts_no_of_hidden_dist_entries > 0);
+ erts_no_of_hidden_dist_entries--;
+ }
+
+ if(dep->next)
+ dep->next->prev = dep->prev;
+
+ dep->status &= ~ERTS_DE_SFLG_CONNECTED;
+ dep->flags = 0;
+ dep->prev = NULL;
+ dep->cid = NIL;
+
+ dep->next = erts_not_connected_dist_entries;
+ if(erts_not_connected_dist_entries) {
+ ASSERT(erts_not_connected_dist_entries->prev == NULL);
+ erts_not_connected_dist_entries->prev = dep;
+ }
+ erts_not_connected_dist_entries = dep;
+ erts_no_of_not_connected_dist_entries++;
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+}
+
+void
+erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags)
+{
+ ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep));
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+
+ ASSERT(dep != erts_this_dist_entry);
+ ASSERT(is_nil(dep->cid));
+ ASSERT(is_internal_port(cid));
+
+ if(dep->prev) {
+ ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries));
+ dep->prev->next = dep->next;
+ }
+ else {
+ ASSERT(erts_not_connected_dist_entries == dep);
+ erts_not_connected_dist_entries = dep->next;
+ }
+
+ if(dep->next)
+ dep->next->prev = dep->prev;
+
+ ASSERT(erts_no_of_not_connected_dist_entries > 0);
+ erts_no_of_not_connected_dist_entries--;
+
+ dep->status |= ERTS_DE_SFLG_CONNECTED;
+ dep->flags = flags;
+ dep->cid = cid;
+ dep->connection_id++;
+ dep->connection_id &= ERTS_DIST_EXT_CON_ID_MASK;
+ dep->prev = NULL;
+
+ if(flags & DFLAG_PUBLISHED) {
+ dep->next = erts_visible_dist_entries;
+ if(erts_visible_dist_entries) {
+ ASSERT(erts_visible_dist_entries->prev == NULL);
+ erts_visible_dist_entries->prev = dep;
+ }
+ erts_visible_dist_entries = dep;
+ erts_no_of_visible_dist_entries++;
+ }
+ else {
+ dep->next = erts_hidden_dist_entries;
+ if(erts_hidden_dist_entries) {
+ ASSERT(erts_hidden_dist_entries->prev == NULL);
+ erts_hidden_dist_entries->prev = dep;
+ }
+ erts_hidden_dist_entries = dep;
+ erts_no_of_hidden_dist_entries++;
+ }
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+}
+
+/* -- Node table --------------------------------------------------------- */
+
+/* Some large primes */
+#define PRIME0 ((HashValue) 268438039)
+#define PRIME1 ((HashValue) 268440479)
+#define PRIME2 ((HashValue) 268439161)
+#define PRIME3 ((HashValue) 268437017)
+
+static HashValue
+node_table_hash(void *venp)
+{
+ Uint32 cre = ((ErlNode *) venp)->creation;
+ HashValue h = atom_tab(atom_val(((ErlNode *) venp)->sysname))->slot.bucket.hvalue;
+
+ h *= PRIME0;
+ h += cre & 0xff;
+
+#if MAX_CREATION >= (1 << 8)
+ h *= PRIME1;
+ h += (cre >> 8) & 0xff;
+#endif
+
+#if MAX_CREATION >= (1 << 16)
+ h *= PRIME2;
+ h += (cre >> 16) & 0xff;
+#endif
+
+#if MAX_CREATION >= (1 << 24)
+ h *= PRIME3;
+ h += (cre >> 24) & 0xff;
+#endif
+
+#if 0
+/* XXX Problems in older versions of GCC */
+ #if MAX_CREATION >= (1UL << 32)
+ #error "MAX_CREATION larger than size of expected creation storage (Uint32)"
+ #endif
+#endif
+ return h;
+}
+
+static int
+node_table_cmp(void *venp1, void *venp2)
+{
+ return ((((ErlNode *) venp1)->sysname == ((ErlNode *) venp2)->sysname
+ && ((ErlNode *) venp1)->creation == ((ErlNode *) venp2)->creation)
+ ? 0
+ : 1);
+}
+
+static void*
+node_table_alloc(void *venp_tmpl)
+{
+ ErlNode *enp;
+
+ if(((ErlNode *) venp_tmpl) == erts_this_node)
+ return venp_tmpl;
+
+ enp = (ErlNode *) erts_alloc(ERTS_ALC_T_NODE_ENTRY, sizeof(ErlNode));
+
+ node_entries++;
+
+ erts_refc_init(&enp->refc, -1);
+ enp->creation = ((ErlNode *) venp_tmpl)->creation;
+ enp->sysname = ((ErlNode *) venp_tmpl)->sysname;
+ enp->dist_entry = erts_find_or_insert_dist_entry(((ErlNode *) venp_tmpl)->sysname);
+
+ return (void *) enp;
+}
+
+static void
+node_table_free(void *venp)
+{
+ ErlNode *enp = (ErlNode *) venp;
+
+ if(enp == erts_this_node)
+ return;
+
+ erts_deref_dist_entry(enp->dist_entry);
+#ifdef DEBUG
+ sys_memset(venp, 0x55, sizeof(ErlNode));
+#endif
+ erts_free(ERTS_ALC_T_NODE_ENTRY, venp);
+
+ ASSERT(node_entries > 1);
+ node_entries--;
+}
+
+Uint
+erts_node_table_size(void)
+{
+ Uint res;
+#ifdef DEBUG
+ HashInfo hi;
+#endif
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+#ifdef DEBUG
+ hash_get_info(&hi, &erts_node_table);
+ ASSERT(node_entries == hi.objs);
+#endif
+ res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode);
+ if (lock)
+ erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ return res;
+}
+
+void
+erts_node_table_info(int to, void *to_arg)
+{
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ hash_info(to, to_arg, &erts_node_table);
+ if (lock)
+ erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+}
+
+
+ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
+{
+ ErlNode *res;
+ ErlNode ne;
+ ne.sysname = sysname;
+ ne.creation = creation;
+ erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ res = hash_put(&erts_node_table, (void *) &ne);
+ ASSERT(res);
+ if (res != erts_this_node) {
+ long refc = erts_refc_inctest(&res->refc, 0);
+ if (refc < 2) /* New or pending delete */
+ erts_refc_inc(&res->refc, 1);
+ }
+ erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ return res;
+}
+
+void erts_delete_node(ErlNode *enp)
+{
+ ASSERT(enp != erts_this_node);
+ if(enp != erts_this_node) {
+ erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ /*
+ * Another thread might have looked up this node after we
+ * decided to delete it (refc became zero). If so, the other
+ * thread incremented refc twice. Once for the new reference
+ * and once for this thread. Therefore, delete node if refc
+ * is 0 or -1 after a decrement.
+ */
+ if (erts_refc_dectest(&enp->refc, -1) <= 0)
+ (void) hash_erase(&erts_node_table, (void *) enp);
+ erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ }
+}
+
+struct pn_data {
+ int to;
+ void *to_arg;
+ Eterm sysname;
+ int no_sysname;
+ int no_total;
+};
+
+static void print_node(void *venp, void *vpndp)
+{
+ struct pn_data *pndp = ((struct pn_data *) vpndp);
+ ErlNode *enp = ((ErlNode *) venp);
+
+ if(pndp->sysname == NIL
+ || enp->sysname == pndp->sysname) {
+ if (pndp->no_sysname == 0) {
+ erts_print(pndp->to, pndp->to_arg, "Creation:");
+ }
+ if(pndp->sysname == NIL) {
+ erts_print(pndp->to, pndp->to_arg, "Name: %T ", enp->sysname);
+ }
+ erts_print(pndp->to, pndp->to_arg, " %d", enp->creation);
+#ifdef DEBUG
+ erts_print(pndp->to, pndp->to_arg, " (refc=%ld)",
+ erts_refc_read(&enp->refc, 1));
+#endif
+ pndp->no_sysname++;
+ }
+ pndp->no_total++;
+}
+
+void erts_print_node_info(int to,
+ void *to_arg,
+ Eterm sysname,
+ int *no_sysname,
+ int *no_total)
+{
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ struct pn_data pnd;
+
+ pnd.to = to;
+ pnd.to_arg = to_arg;
+ pnd.sysname = sysname;
+ pnd.no_sysname = 0;
+ pnd.no_total = 0;
+
+ if (lock)
+ erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ hash_foreach(&erts_node_table, print_node, (void *) &pnd);
+ if (pnd.no_sysname != 0) {
+ erts_print(to, to_arg, "\n");
+ }
+ if (lock)
+ erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+
+ if(no_sysname)
+ *no_sysname = pnd.no_sysname;
+ if(no_total)
+ *no_total = pnd.no_total;
+}
+
+/* ----------------------------------------------------------------------- */
+
+void
+erts_set_this_node(Eterm sysname, Uint creation)
+{
+ erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+
+ (void) hash_erase(&erts_dist_table, (void *) erts_this_dist_entry);
+ erts_this_dist_entry->sysname = sysname;
+ erts_this_dist_entry->creation = creation;
+ (void) hash_put(&erts_dist_table, (void *) erts_this_dist_entry);
+
+ (void) hash_erase(&erts_node_table, (void *) erts_this_node);
+ erts_this_node->sysname = sysname;
+ erts_this_node->creation = creation;
+ (void) hash_put(&erts_node_table, (void *) erts_this_node);
+
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+
+}
+
+void erts_init_node_tables(void)
+{
+ HashFunctions f;
+
+ f.hash = (H_FUN) dist_table_hash;
+ f.cmp = (HCMP_FUN) dist_table_cmp;
+ f.alloc = (HALLOC_FUN) dist_table_alloc;
+ f.free = (HFREE_FUN) dist_table_free;
+
+ erts_this_dist_entry = erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
+ dist_entries = 1;
+
+ hash_init(ERTS_ALC_T_DIST_TABLE, &erts_dist_table, "dist_table", 11, f);
+
+ erts_hidden_dist_entries = NULL;
+ erts_visible_dist_entries = NULL;
+ erts_not_connected_dist_entries = NULL;
+ erts_no_of_hidden_dist_entries = 0;
+ erts_no_of_visible_dist_entries = 0;
+ erts_no_of_not_connected_dist_entries = 0;
+
+ erts_this_dist_entry->next = NULL;
+ erts_this_dist_entry->prev = NULL;
+ erts_refc_init(&erts_this_dist_entry->refc, 1); /* erts_this_node */
+
+ erts_smp_rwmtx_init_x(&erts_this_dist_entry->rwmtx,
+ "dist_entry",
+ make_small(ERST_INTERNAL_CHANNEL_NO));
+ erts_this_dist_entry->sysname = am_Noname;
+ erts_this_dist_entry->cid = NIL;
+ erts_this_dist_entry->connection_id = 0;
+ erts_this_dist_entry->status = 0;
+ erts_this_dist_entry->flags = 0;
+ erts_this_dist_entry->version = 0;
+
+ erts_smp_mtx_init_x(&erts_this_dist_entry->lnk_mtx,
+ "dist_entry_links",
+ make_small(ERST_INTERNAL_CHANNEL_NO));
+ erts_this_dist_entry->node_links = NULL;
+ erts_this_dist_entry->nlinks = NULL;
+ erts_this_dist_entry->monitors = NULL;
+
+ erts_smp_spinlock_init_x(&erts_this_dist_entry->qlock,
+ "dist_entry_out_queue",
+ make_small(ERST_INTERNAL_CHANNEL_NO));
+ erts_this_dist_entry->qflgs = 0;
+ erts_this_dist_entry->qsize = 0;
+ erts_this_dist_entry->out_queue.first = NULL;
+ erts_this_dist_entry->out_queue.last = NULL;
+ erts_this_dist_entry->suspended.first = NULL;
+ erts_this_dist_entry->suspended.last = NULL;
+
+ erts_this_dist_entry->finalized_out_queue.first = NULL;
+ erts_this_dist_entry->finalized_out_queue.last = NULL;
+ erts_smp_atomic_init(&erts_this_dist_entry->dist_cmd_scheduled, 0);
+ erts_port_task_handle_init(&erts_this_dist_entry->dist_cmd);
+ erts_this_dist_entry->send = NULL;
+ erts_this_dist_entry->cache = NULL;
+
+ (void) hash_put(&erts_dist_table, (void *) erts_this_dist_entry);
+
+ f.hash = (H_FUN) node_table_hash;
+ f.cmp = (HCMP_FUN) node_table_cmp;
+ f.alloc = (HALLOC_FUN) node_table_alloc;
+ f.free = (HFREE_FUN) node_table_free;
+
+ hash_init(ERTS_ALC_T_NODE_TABLE, &erts_node_table, "node_table", 11, f);
+
+ erts_this_node = erts_alloc(ERTS_ALC_T_NODE_ENTRY, sizeof(ErlNode));
+ node_entries = 1;
+
+ erts_refc_init(&erts_this_node->refc, 1); /* The system itself */
+ erts_this_node->sysname = am_Noname;
+ erts_this_node->creation = 0;
+ erts_this_node->dist_entry = erts_this_dist_entry;
+
+ (void) hash_put(&erts_node_table, (void *) erts_this_node);
+
+ erts_smp_rwmtx_init(&erts_node_table_rwmtx, "node_table");
+ erts_smp_rwmtx_init(&erts_dist_table_rwmtx, "dist_table");
+
+ references_atoms_need_init = 1;
+}
+
+#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_CHECK
+int erts_lc_is_de_rwlocked(DistEntry *dep)
+{
+ return erts_smp_lc_rwmtx_is_rwlocked(&dep->rwmtx);
+}
+int erts_lc_is_de_rlocked(DistEntry *dep)
+{
+ return erts_smp_lc_rwmtx_is_rlocked(&dep->rwmtx);
+}
+#endif
+#endif
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * The following is only supposed to be used for testing, and debugging. *
+ * *
+ * erts_get_node_and_dist_references() returns a table of all references to *
+ * all entries in the node and dist tables. The hole system will be searched *
+ * at once. This will give a consistent view over the references, but can *
+ * can damage the real-time properties of the system. *
+\* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "erl_db.h"
+
+#undef INIT_AM
+#define INIT_AM(S) AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
+
+static Eterm AM_heap;
+static Eterm AM_link;
+static Eterm AM_monitor;
+static Eterm AM_process;
+static Eterm AM_port;
+static Eterm AM_ets;
+static Eterm AM_binary;
+static Eterm AM_match_spec;
+static Eterm AM_control;
+static Eterm AM_dist;
+static Eterm AM_node;
+static Eterm AM_dist_references;
+static Eterm AM_node_references;
+static Eterm AM_system;
+static Eterm AM_timer;
+#ifdef HYBRID
+static Eterm AM_processes;
+#endif
+
+static void setup_reference_table(void);
+static Eterm reference_table_term(Uint **hpp, Uint *szp);
+static void delete_reference_table(void);
+
+#if BIG_UINT_HEAP_SIZE > 3 /* 2-tuple */
+#define ID_HEAP_SIZE BIG_UINT_HEAP_SIZE
+#else
+#define ID_HEAP_SIZE 3 /* 2-tuple */
+#endif
+
+typedef struct node_referrer_ {
+ struct node_referrer_ *next;
+ int heap_ref;
+ int link_ref;
+ int monitor_ref;
+ int ets_ref;
+ int bin_ref;
+ int timer_ref;
+ int system_ref;
+ Eterm id;
+ Uint id_heap[ID_HEAP_SIZE];
+} NodeReferrer;
+
+typedef struct {
+ ErlNode *node;
+ NodeReferrer *referrers;
+} ReferredNode;
+
+typedef struct dist_referrer_ {
+ struct dist_referrer_ *next;
+ int heap_ref;
+ int node_ref;
+ int ctrl_ref;
+ Eterm id;
+ Uint creation;
+} DistReferrer;
+
+typedef struct {
+ DistEntry *dist;
+ DistReferrer *referrers;
+} ReferredDist;
+
+typedef struct inserted_bin_ {
+ struct inserted_bin_ *next;
+ Binary *bin_val;
+} InsertedBin;
+
+static ReferredNode *referred_nodes;
+static int no_referred_nodes;
+static ReferredDist *referred_dists;
+static int no_referred_dists;
+static InsertedBin *inserted_bins;
+
+Eterm
+erts_get_node_and_dist_references(struct process *proc)
+{
+ Uint *hp;
+ Uint size;
+ Eterm res;
+#ifdef DEBUG
+ Uint *endp;
+#endif
+
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ /* No need to lock any thing since we are alone... */
+
+ if (references_atoms_need_init) {
+ INIT_AM(heap);
+ INIT_AM(link);
+ INIT_AM(monitor);
+ INIT_AM(process);
+ INIT_AM(port);
+ INIT_AM(ets);
+ INIT_AM(binary);
+ INIT_AM(match_spec);
+ INIT_AM(control);
+ INIT_AM(dist);
+ INIT_AM(node);
+ INIT_AM(dist_references);
+ INIT_AM(node_references);
+ INIT_AM(timer);
+ INIT_AM(system);
+#ifdef HYBRID
+ INIT_AM(processes);
+#endif
+ references_atoms_need_init = 0;
+ }
+
+ setup_reference_table();
+
+ /* Get term size */
+ size = 0;
+ (void) reference_table_term(NULL, &size);
+
+ hp = HAlloc(proc, size);
+#ifdef DEBUG
+ ASSERT(size > 0);
+ endp = hp + size;
+#endif
+
+ /* Write term */
+ res = reference_table_term(&hp, NULL);
+
+ ASSERT(endp == hp);
+
+ delete_reference_table();
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ return res;
+}
+
+#define HEAP_REF 1
+#define LINK_REF 2
+#define ETS_REF 3
+#define BIN_REF 4
+#define NODE_REF 5
+#define CTRL_REF 6
+#define MONITOR_REF 7
+#define TIMER_REF 8
+#define SYSTEM_REF 9
+
+#define INC_TAB_SZ 10
+
+static void
+insert_dist_referrer(ReferredDist *referred_dist,
+ int type,
+ Eterm id,
+ Uint creation)
+{
+ DistReferrer *drp;
+
+ for(drp = referred_dist->referrers; drp; drp = drp->next)
+ if(id == drp->id && (type == CTRL_REF
+ || creation == drp->creation))
+ break;
+
+ if(!drp) {
+ drp = (DistReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
+ sizeof(DistReferrer));
+ drp->next = referred_dist->referrers;
+ referred_dist->referrers = drp;
+ drp->id = id;
+ drp->creation = creation;
+ drp->heap_ref = 0;
+ drp->node_ref = 0;
+ drp->ctrl_ref = 0;
+ }
+
+ switch (type) {
+ case NODE_REF: drp->node_ref++; break;
+ case CTRL_REF: drp->ctrl_ref++; break;
+ case HEAP_REF: drp->heap_ref++; break;
+ default: ASSERT(0);
+ }
+}
+
+static void
+insert_dist_entry(DistEntry *dist, int type, Eterm id, Uint creation)
+{
+ ReferredDist *rdp = NULL;
+ int i;
+
+ for(i = 0; i < no_referred_dists; i++) {
+ if(dist == referred_dists[i].dist) {
+ rdp = &referred_dists[i];
+ break;
+ }
+ }
+
+ if(!rdp)
+ erl_exit(1,
+ "Reference to non-existing distribution table entry found!\n");
+
+ insert_dist_referrer(rdp, type, id, creation);
+}
+
+static void
+insert_node_referrer(ReferredNode *referred_node, int type, Eterm id)
+{
+ NodeReferrer *nrp;
+
+ for(nrp = referred_node->referrers; nrp; nrp = nrp->next)
+ if(EQ(id, nrp->id))
+ break;
+
+ if(!nrp) {
+ nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
+ sizeof(NodeReferrer));
+ nrp->next = referred_node->referrers;
+ referred_node->referrers = nrp;
+ if(IS_CONST(id))
+ nrp->id = id;
+ else {
+ Uint *hp = &nrp->id_heap[0];
+ ASSERT(is_big(id) || is_tuple(id));
+ nrp->id = copy_struct(id, size_object(id), &hp, NULL);
+ }
+ nrp->heap_ref = 0;
+ nrp->link_ref = 0;
+ nrp->monitor_ref = 0;
+ nrp->ets_ref = 0;
+ nrp->bin_ref = 0;
+ nrp->timer_ref = 0;
+ nrp->system_ref = 0;
+ }
+
+ switch (type) {
+ case HEAP_REF: nrp->heap_ref++; break;
+ case LINK_REF: nrp->link_ref++; break;
+ case ETS_REF: nrp->ets_ref++; break;
+ case BIN_REF: nrp->bin_ref++; break;
+ case MONITOR_REF: nrp->monitor_ref++; break;
+ case TIMER_REF: nrp->timer_ref++; break;
+ case SYSTEM_REF: nrp->system_ref++; break;
+ default: ASSERT(0);
+ }
+}
+
+static void
+insert_node(ErlNode *node, int type, Eterm id)
+{
+ int i;
+ ReferredNode *rnp = NULL;
+ for(i = 0; i < no_referred_nodes; i++) {
+ if(node == referred_nodes[i].node) {
+ rnp = &referred_nodes[i];
+ break;
+ }
+ }
+
+ if (!rnp)
+ erl_exit(1, "Reference to non-existing node table entry found!\n");
+
+ insert_node_referrer(rnp, type, id);
+}
+
+static void
+insert_erl_node(void *venp, void *unused)
+{
+ ErlNode *enp = (ErlNode *) venp;
+
+ insert_dist_entry(enp->dist_entry, NODE_REF, enp->sysname, enp->creation);
+}
+
+struct insert_offheap2_arg {
+ int type;
+ Eterm id;
+};
+
+static void insert_offheap(ErlOffHeap *, int, Eterm);
+
+static void
+insert_offheap2(ErlOffHeap *oh, void *arg)
+{
+ struct insert_offheap2_arg *a = (struct insert_offheap2_arg *) arg;
+ insert_offheap(oh, a->type, a->id);
+}
+
+static void
+insert_offheap(ErlOffHeap *oh, int type, Eterm id)
+{
+ if(oh->externals) {
+ ExternalThing *etp = oh->externals;
+ while (etp) {
+ insert_node(etp->node, type, id);
+ etp = etp->next;
+ }
+ }
+
+ if(oh->mso) {
+ ProcBin *pb;
+ struct insert_offheap2_arg a;
+ a.type = BIN_REF;
+ for(pb = oh->mso; pb; pb = pb->next) {
+ if(IsMatchProgBinary(pb->val)) {
+ InsertedBin *ib;
+ int insert_bin = 1;
+ for (ib = inserted_bins; ib; ib = ib->next)
+ if(ib->bin_val == pb->val) {
+ insert_bin = 0;
+ break;
+ }
+ if (insert_bin) {
+ Uint id_heap[BIG_UINT_HEAP_SIZE];
+ Uint *hp = &id_heap[0];
+ InsertedBin *nib;
+ a.id = erts_bld_uint(&hp, NULL, (Uint) pb->val);
+ erts_match_prog_foreach_offheap(pb->val,
+ insert_offheap2,
+ (void *) &a);
+ nib = erts_alloc(ERTS_ALC_T_NC_TMP, sizeof(InsertedBin));
+ nib->bin_val = pb->val;
+ nib->next = inserted_bins;
+ inserted_bins = nib;
+ }
+ }
+ }
+ }
+
+#if 0
+ if(oh->funs) {
+ /* No need to */
+ }
+#endif
+}
+
+static void doit_insert_monitor(ErtsMonitor *monitor, void *p)
+{
+ Eterm *idp = p;
+ if(is_external(monitor->pid))
+ insert_node(external_thing_ptr(monitor->pid)->node, MONITOR_REF, *idp);
+ if(is_external(monitor->ref))
+ insert_node(external_thing_ptr(monitor->ref)->node, MONITOR_REF, *idp);
+}
+
+static void doit_insert_link(ErtsLink *lnk, void *p)
+{
+ Eterm *idp = p;
+ if(is_external(lnk->pid))
+ insert_node(external_thing_ptr(lnk->pid)->node, LINK_REF,
+ *idp);
+}
+
+
+static void
+insert_monitors(ErtsMonitor *monitors, Eterm id)
+{
+ erts_doforall_monitors(monitors,&doit_insert_monitor,&id);
+}
+
+static void
+insert_links(ErtsLink *lnk, Eterm id)
+{
+ erts_doforall_links(lnk,&doit_insert_link,&id);
+}
+
+static void doit_insert_link2(ErtsLink *lnk, void *p)
+{
+ Eterm *idp = p;
+ if(is_external(lnk->pid))
+ insert_node(external_thing_ptr(lnk->pid)->node, LINK_REF,
+ *idp);
+ insert_links(ERTS_LINK_ROOT(lnk), *idp);
+}
+
+static void
+insert_links2(ErtsLink *lnk, Eterm id)
+{
+ erts_doforall_links(lnk,&doit_insert_link2,&id);
+}
+
+static void
+insert_ets_table(DbTable *tab, void *unused)
+{
+ struct insert_offheap2_arg a;
+ a.type = ETS_REF;
+ a.id = tab->common.id;
+ erts_db_foreach_offheap(tab, insert_offheap2, (void *) &a);
+}
+
+static void
+insert_bif_timer(Eterm receiver, Eterm msg, ErlHeapFragment *bp, void *arg)
+{
+ if (bp) {
+ Eterm heap[3];
+ insert_offheap(&bp->off_heap,
+ TIMER_REF,
+ (is_internal_pid(receiver)
+ ? receiver
+ : TUPLE2(&heap[0], AM_process, receiver)));
+ }
+}
+
+static void
+init_referred_node(void *node, void *unused)
+{
+ referred_nodes[no_referred_nodes].node = (ErlNode *) node;
+ referred_nodes[no_referred_nodes].referrers = NULL;
+ no_referred_nodes++;
+}
+
+static void
+init_referred_dist(void *dist, void *unused)
+{
+ referred_dists[no_referred_dists].dist = (DistEntry *) dist;
+ referred_dists[no_referred_dists].referrers = NULL;
+ no_referred_dists++;
+}
+
+#ifdef ERTS_SMP
+static void
+insert_sys_msg(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp)
+{
+ insert_offheap(&bp->off_heap, HEAP_REF, to);
+}
+#endif
+
+static void
+setup_reference_table(void)
+{
+ ErlHeapFragment *hfp;
+ DistEntry *dep;
+ HashInfo hi;
+ int i;
+ Eterm heap[3];
+
+ inserted_bins = NULL;
+
+ hash_get_info(&hi, &erts_node_table);
+ referred_nodes = erts_alloc(ERTS_ALC_T_NC_TMP,
+ hi.objs*sizeof(ReferredNode));
+ no_referred_nodes = 0;
+ hash_foreach(&erts_node_table, init_referred_node, NULL);
+ ASSERT(no_referred_nodes == hi.objs);
+
+ hash_get_info(&hi, &erts_dist_table);
+ referred_dists = erts_alloc(ERTS_ALC_T_NC_TMP,
+ hi.objs*sizeof(ReferredDist));
+ no_referred_dists = 0;
+ hash_foreach(&erts_dist_table, init_referred_dist, NULL);
+ ASSERT(no_referred_dists == hi.objs);
+
+ /* Go through the hole system, and build a table of all references
+ to ErlNode and DistEntry structures */
+
+ insert_node(erts_this_node,
+ SYSTEM_REF,
+ TUPLE2(&heap[0], AM_system, am_undefined));
+
+#ifdef HYBRID
+ /* Insert Heap */
+ insert_offheap(&erts_global_offheap,
+ HEAP_REF,
+ TUPLE2(&heap[0], AM_processes, am_undefined));
+#endif
+
+ /* Insert all processes */
+ for (i = 0; i < erts_max_processes; i++)
+ if (process_tab[i]) {
+ ErlMessage *msg;
+ /* Insert Heap */
+ insert_offheap(&(process_tab[i]->off_heap),
+ HEAP_REF,
+ process_tab[i]->id);
+ /* Insert message buffers */
+ for(hfp = process_tab[i]->mbuf; hfp; hfp = hfp->next)
+ insert_offheap(&(hfp->off_heap),
+ HEAP_REF,
+ process_tab[i]->id);
+ /* Insert msg msg buffers */
+ for (msg = process_tab[i]->msg.first; msg; msg = msg->next) {
+ ErlHeapFragment *heap_frag = NULL;
+ if (msg->data.attached) {
+ if (is_value(ERL_MESSAGE_TERM(msg)))
+ heap_frag = msg->data.heap_frag;
+ else {
+ if (msg->data.dist_ext->dep)
+ insert_dist_entry(msg->data.dist_ext->dep,
+ HEAP_REF, process_tab[i]->id, 0);
+ if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
+ heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ }
+ }
+ if (heap_frag)
+ insert_offheap(&(heap_frag->off_heap),
+ HEAP_REF,
+ process_tab[i]->id);
+ }
+#ifdef ERTS_SMP
+ for (msg = process_tab[i]->msg_inq.first; msg; msg = msg->next) {
+ ErlHeapFragment *heap_frag = NULL;
+ if (msg->data.attached) {
+ if (is_value(ERL_MESSAGE_TERM(msg)))
+ heap_frag = msg->data.heap_frag;
+ else {
+ if (msg->data.dist_ext->dep)
+ insert_dist_entry(msg->data.dist_ext->dep,
+ HEAP_REF, process_tab[i]->id, 0);
+ if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
+ heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
+ }
+ }
+ if (heap_frag)
+ insert_offheap(&(heap_frag->off_heap),
+ HEAP_REF,
+ process_tab[i]->id);
+ }
+#endif
+ /* Insert links */
+ if(process_tab[i]->nlinks)
+ insert_links(process_tab[i]->nlinks, process_tab[i]->id);
+ if(process_tab[i]->monitors)
+ insert_monitors(process_tab[i]->monitors, process_tab[i]->id);
+ /* Insert controller */
+ {
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(process_tab[i]);
+ if (dep)
+ insert_dist_entry(dep, CTRL_REF, process_tab[i]->id, 0);
+ }
+ }
+
+#ifdef ERTS_SMP
+ erts_foreach_sys_msg_in_q(insert_sys_msg);
+#endif
+
+ /* Insert all ports */
+ for (i = 0; i < erts_max_ports; i++) {
+ if (erts_port[i].status & ERTS_PORT_SFLGS_DEAD)
+ continue;
+
+ /* Insert links */
+ if(erts_port[i].nlinks)
+ insert_links(erts_port[i].nlinks, erts_port[i].id);
+ /* Insert port data */
+ for(hfp = erts_port[i].bp; hfp; hfp = hfp->next)
+ insert_offheap(&(hfp->off_heap), HEAP_REF, erts_port[i].id);
+ /* Insert controller */
+ if (erts_port[i].dist_entry)
+ insert_dist_entry(erts_port[i].dist_entry,
+ CTRL_REF,
+ erts_port[i].id,
+ 0);
+ }
+
+ { /* Add binaries stored elsewhere ... */
+ ErlOffHeap oh;
+ ProcBin pb[2] = {{0},{0}};
+ ProcBin *mso = NULL;
+ int i = 0;
+ Binary *default_match_spec;
+ Binary *default_meta_match_spec;
+
+ /* Only the ProcBin members val and next will be inspected
+ (by insert_offheap()) */
+#undef ADD_BINARY
+#define ADD_BINARY(Bin) \
+ if ((Bin)) { \
+ pb[i].val = (Bin); \
+ pb[i].next = mso; \
+ mso = &pb[i]; \
+ i++; \
+ }
+
+ erts_get_default_trace_pattern(NULL,
+ &default_match_spec,
+ &default_meta_match_spec,
+ NULL,
+ NULL);
+
+ ADD_BINARY(default_match_spec);
+ ADD_BINARY(default_meta_match_spec);
+
+ oh.mso = mso;
+ oh.externals = NULL;
+#ifndef HYBRID /* FIND ME! */
+ oh.funs = NULL;
+#endif
+ insert_offheap(&oh, BIN_REF, AM_match_spec);
+#undef ADD_BINARY
+ }
+
+ /* Insert all dist links */
+
+ for(dep = erts_visible_dist_entries; dep; dep = dep->next) {
+ if(dep->nlinks)
+ insert_links2(dep->nlinks, dep->sysname);
+ if(dep->node_links)
+ insert_links(dep->node_links, dep->sysname);
+ if(dep->monitors)
+ insert_monitors(dep->monitors, dep->sysname);
+ }
+
+ for(dep = erts_hidden_dist_entries; dep; dep = dep->next) {
+ if(dep->nlinks)
+ insert_links2(dep->nlinks, dep->sysname);
+ if(dep->node_links)
+ insert_links(dep->node_links, dep->sysname);
+ if(dep->monitors)
+ insert_monitors(dep->monitors, dep->sysname);
+ }
+
+ /* Not connected dist entries should not have any links,
+ but inspect them anyway */
+ for(dep = erts_not_connected_dist_entries; dep; dep = dep->next) {
+ if(dep->nlinks)
+ insert_links2(dep->nlinks, dep->sysname);
+ if(dep->node_links)
+ insert_links(dep->node_links, dep->sysname);
+ if(dep->monitors)
+ insert_monitors(dep->monitors, dep->sysname);
+ }
+
+ /* Insert all ets tables */
+ erts_db_foreach_table(insert_ets_table, NULL);
+
+ /* Insert all bif timers */
+ erts_bif_timer_foreach(insert_bif_timer, NULL);
+
+ /* Insert node table (references to dist) */
+ hash_foreach(&erts_node_table, insert_erl_node, NULL);
+}
+
+/*
+ Returns an erlang term on this format:
+
+ {{node_references,
+ [{{Node, Creation}, Refc,
+ [{{ReferrerType, ID},
+ [{ReferenceType,References},
+ '...']},
+ '...']},
+ '...']},
+ {dist_references,
+ [{Node, Refc,
+ [{{ReferrerType, ID},
+ [{ReferenceType,References},
+ '...']},
+ '...']},
+ '...']}}
+ */
+
+static Eterm
+reference_table_term(Uint **hpp, Uint *szp)
+{
+#undef MK_2TUP
+#undef MK_3TUP
+#undef MK_CONS
+#undef MK_UINT
+#define MK_2TUP(E1, E2) erts_bld_tuple(hpp, szp, 2, (E1), (E2))
+#define MK_3TUP(E1, E2, E3) erts_bld_tuple(hpp, szp, 3, (E1), (E2), (E3))
+#define MK_CONS(CAR, CDR) erts_bld_cons(hpp, szp, (CAR), (CDR))
+#define MK_UINT(UI) erts_bld_uint(hpp, szp, (UI))
+ int i;
+ Eterm tup;
+ Eterm tup2;
+ Eterm nl = NIL;
+ Eterm dl = NIL;
+ Eterm nrid;
+
+ for(i = 0; i < no_referred_nodes; i++) {
+ NodeReferrer *nrp;
+ Eterm nril = NIL;
+
+ for(nrp = referred_nodes[i].referrers; nrp; nrp = nrp->next) {
+ Eterm nrl = NIL;
+ /* NodeReferenceList = [{ReferenceType,References}] */
+ if(nrp->heap_ref) {
+ tup = MK_2TUP(AM_heap, MK_UINT(nrp->heap_ref));
+ nrl = MK_CONS(tup, nrl);
+ }
+ if(nrp->link_ref) {
+ tup = MK_2TUP(AM_link, MK_UINT(nrp->link_ref));
+ nrl = MK_CONS(tup, nrl);
+ }
+ if(nrp->monitor_ref) {
+ tup = MK_2TUP(AM_monitor, MK_UINT(nrp->monitor_ref));
+ nrl = MK_CONS(tup, nrl);
+ }
+ if(nrp->ets_ref) {
+ tup = MK_2TUP(AM_ets, MK_UINT(nrp->ets_ref));
+ nrl = MK_CONS(tup, nrl);
+ }
+ if(nrp->bin_ref) {
+ tup = MK_2TUP(AM_binary, MK_UINT(nrp->bin_ref));
+ nrl = MK_CONS(tup, nrl);
+ }
+ if(nrp->timer_ref) {
+ tup = MK_2TUP(AM_timer, MK_UINT(nrp->timer_ref));
+ nrl = MK_CONS(tup, nrl);
+ }
+ if(nrp->system_ref) {
+ tup = MK_2TUP(AM_system, MK_UINT(nrp->system_ref));
+ nrl = MK_CONS(tup, nrl);
+ }
+
+ nrid = nrp->id;
+ if (!IS_CONST(nrp->id)) {
+
+ Uint nrid_sz = size_object(nrp->id);
+ if (szp)
+ *szp += nrid_sz;
+ if (hpp)
+ nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL);
+ }
+
+ if (is_internal_pid(nrid) || nrid == am_error_logger) {
+ ASSERT(!nrp->ets_ref && !nrp->bin_ref && !nrp->system_ref);
+ tup = MK_2TUP(AM_process, nrid);
+ }
+ else if (is_tuple(nrid)) {
+ Eterm *t;
+ ASSERT(!nrp->ets_ref && !nrp->bin_ref);
+ t = tuple_val(nrid);
+ ASSERT(2 == arityval(t[0]));
+ tup = MK_2TUP(t[1], t[2]);
+ }
+ else if(is_internal_port(nrid)) {
+ ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref
+ && !nrp->timer_ref && !nrp->system_ref);
+ tup = MK_2TUP(AM_port, nrid);
+ }
+ else if(nrp->ets_ref) {
+ ASSERT(!nrp->heap_ref && !nrp->link_ref &&
+ !nrp->monitor_ref && !nrp->bin_ref
+ && !nrp->timer_ref && !nrp->system_ref);
+ tup = MK_2TUP(AM_ets, nrid);
+ }
+ else if(nrp->bin_ref) {
+ ASSERT(is_small(nrid) || is_big(nrid));
+ ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->link_ref &&
+ !nrp->monitor_ref && !nrp->timer_ref
+ && !nrp->system_ref);
+ tup = MK_2TUP(AM_match_spec, nrid);
+ }
+ else {
+ ASSERT(!nrp->heap_ref && !nrp->ets_ref && !nrp->bin_ref);
+ ASSERT(is_atom(nrid));
+ tup = MK_2TUP(AM_dist, nrid);
+ }
+ tup = MK_2TUP(tup, nrl);
+ /* NodeReferenceIdList = [{{ReferrerType, ID}, NodeReferenceList}] */
+ nril = MK_CONS(tup, nril);
+ }
+
+ /* NodeList = [{{Node, Creation}, Refc, NodeReferenceIdList}] */
+
+ tup = MK_2TUP(referred_nodes[i].node->sysname,
+ MK_UINT(referred_nodes[i].node->creation));
+ tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 1)), nril);
+ nl = MK_CONS(tup, nl);
+ }
+
+ for(i = 0; i < no_referred_dists; i++) {
+ DistReferrer *drp;
+ Eterm dril = NIL;
+ for(drp = referred_dists[i].referrers; drp; drp = drp->next) {
+ Eterm drl = NIL;
+
+ /* DistReferenceList = [{ReferenceType,References}] */
+ if(drp->node_ref) {
+ tup = MK_2TUP(AM_node, MK_UINT(drp->node_ref));
+ drl = MK_CONS(tup, drl);
+ }
+ if(drp->ctrl_ref) {
+ tup = MK_2TUP(AM_control, MK_UINT(drp->ctrl_ref));
+ drl = MK_CONS(tup, drl);
+ }
+ if(drp->heap_ref) {
+ tup = MK_2TUP(AM_heap, MK_UINT(drp->heap_ref));
+ drl = MK_CONS(tup, drl);
+ }
+
+ if (is_internal_pid(drp->id)) {
+ ASSERT(!drp->node_ref);
+ tup = MK_2TUP(AM_process, drp->id);
+ }
+ else if(is_internal_port(drp->id)) {
+ ASSERT(drp->ctrl_ref && !drp->node_ref);
+ tup = MK_2TUP(AM_port, drp->id);
+ }
+ else {
+ ASSERT(!drp->ctrl_ref && drp->node_ref);
+ ASSERT(is_atom(drp->id));
+ tup = MK_2TUP(drp->id, MK_UINT(drp->creation));
+ tup = MK_2TUP(AM_node, tup);
+ }
+
+ tup = MK_2TUP(tup, drl);
+
+ /* DistReferenceIdList =
+ [{{ReferrerType, ID}, DistReferenceList}] */
+ dril = MK_CONS(tup, dril);
+
+ }
+
+ /* DistList = [{Dist, Refc, ReferenceIdList}] */
+ tup = MK_3TUP(referred_dists[i].dist->sysname,
+ MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 1)),
+ dril);
+ dl = MK_CONS(tup, dl);
+ }
+
+ /* {{node_references, NodeList}, {dist_references, DistList}} */
+
+ tup = MK_2TUP(AM_node_references, nl);
+ tup2 = MK_2TUP(AM_dist_references, dl);
+ tup = MK_2TUP(tup, tup2);
+
+ return tup;
+#undef MK_2TUP
+#undef MK_3TUP
+#undef MK_CONS
+#undef MK_UINT
+
+}
+
+static void
+delete_reference_table(void)
+{
+ Uint i;
+ for(i = 0; i < no_referred_nodes; i++) {
+ NodeReferrer *nrp;
+ NodeReferrer *tnrp;
+ nrp = referred_nodes[i].referrers;
+ while(nrp) {
+ tnrp = nrp;
+ nrp = nrp->next;
+ erts_free(ERTS_ALC_T_NC_TMP, (void *) tnrp);
+ }
+ }
+ if (referred_nodes)
+ erts_free(ERTS_ALC_T_NC_TMP, (void *) referred_nodes);
+
+ for(i = 0; i < no_referred_dists; i++) {
+ DistReferrer *drp;
+ DistReferrer *tdrp;
+ drp = referred_dists[i].referrers;
+ while(drp) {
+ tdrp = drp;
+ drp = drp->next;
+ erts_free(ERTS_ALC_T_NC_TMP, (void *) tdrp);
+ }
+ }
+ if (referred_dists)
+ erts_free(ERTS_ALC_T_NC_TMP, (void *) referred_dists);
+ while(inserted_bins) {
+ InsertedBin *ib = inserted_bins;
+ inserted_bins = inserted_bins->next;
+ erts_free(ERTS_ALC_T_NC_TMP, (void *)ib);
+ }
+}
+
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
new file mode 100644
index 0000000000..c48dac6219
--- /dev/null
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -0,0 +1,261 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2001-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_NODE_TABLES_H__
+#define ERL_NODE_TABLES_H__
+
+/*
+ * The "node_tables module" contain two (hash) tables: the node_table
+ * and the dist_table.
+ *
+ * The elements of the node_table represents a specific incarnation of
+ * an Erlang node and has {Nodename, Creation} pairs as keys. Elements
+ * in the node_table are referred to from node containers (see
+ * node_container_utils.h).
+ *
+ * The elements of the dist_table represents a (potential) connection
+ * to an Erlang node and has Nodename as key. Elements in the
+ * dist_table are either referred to from elements in the node_table
+ * or from the process or port structure of the entity controlling
+ * the connection.
+ *
+ * Both tables are garbage collected by reference counting.
+ */
+
+#include "sys.h"
+#include "hash.h"
+#include "erl_process.h"
+#include "erl_monitors.h"
+#include "erl_smp.h"
+#define ERTS_PORT_TASK_ONLY_BASIC_TYPES__
+#include "erl_port_task.h"
+#undef ERTS_PORT_TASK_ONLY_BASIC_TYPES__
+
+#define ERST_INTERNAL_CHANNEL_NO 0
+
+#define ERTS_DE_SFLG_CONNECTED (((Uint32) 1) << 0)
+#define ERTS_DE_SFLG_EXITING (((Uint32) 1) << 1)
+
+#define ERTS_DE_SFLGS_ALL (ERTS_DE_SFLG_CONNECTED \
+ | ERTS_DE_SFLG_EXITING)
+
+#define ERTS_DE_QFLG_BUSY (((Uint32) 1) << 0)
+#define ERTS_DE_QFLG_EXIT (((Uint32) 1) << 1)
+
+#define ERTS_DE_QFLGS_ALL (ERTS_DE_QFLG_BUSY \
+ | ERTS_DE_QFLG_EXIT)
+
+#ifdef ARCH_64
+#define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713f713f713UL)
+#else
+#define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713)
+#endif
+
+typedef struct ErtsDistOutputBuf_ ErtsDistOutputBuf;
+struct ErtsDistOutputBuf_ {
+#ifdef DEBUG
+ Uint dbg_pattern;
+#endif
+ ErtsDistOutputBuf *next;
+ byte *extp;
+ byte *ext_endp;
+ byte data[1];
+};
+
+typedef struct {
+ ErtsDistOutputBuf *first;
+ ErtsDistOutputBuf *last;
+} ErtsDistOutputQueue;
+
+struct ErtsProcList_;
+typedef struct {
+ struct ErtsProcList_ *first;
+ struct ErtsProcList_ *last;
+} ErtsDistSuspended;
+
+/*
+ * Lock order:
+ * 1. dist_entry->rwmtx
+ * 2. erts_node_table_rwmtx
+ * 3. erts_dist_table_rwmtx
+ *
+ * Lock mutexes with lower numbers before mutexes with higher numbers and
+ * unlock mutexes with higher numbers before mutexes with higher numbers.
+ */
+
+struct erl_link;
+struct port;
+
+typedef struct dist_entry_ {
+ HashBucket hash_bucket; /* Hash bucket */
+ struct dist_entry_ *next; /* Next entry in dist_table (not sorted) */
+ struct dist_entry_ *prev; /* Previous entry in dist_table (not sorted) */
+ erts_refc_t refc; /* Reference count */
+
+ erts_smp_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */
+ Eterm sysname; /* name@host atom for efficiency */
+ Uint32 creation; /* creation of connected node */
+ Eterm cid; /* connection handler (pid or port), NIL == free */
+ Uint32 connection_id; /* Connection id incremented on connect */
+ Uint32 status; /* Slot status, like exiting reserved etc */
+ Uint32 flags; /* Distribution flags, like hidden,
+ atom cache etc. */
+ unsigned long version; /* Protocol version */
+
+
+ erts_smp_mtx_t lnk_mtx; /* Protects node_links, nlinks, and
+ monitors. */
+ ErtsLink *node_links; /* In a dist entry, node links are kept
+ in a separate tree, while they are
+ colocted with the ordinary link tree
+ for processes. It's not due to confusion,
+ it's because the link tree for the dist
+ entry is in two levels, see erl_monitors.h
+ */
+ ErtsLink *nlinks; /* Link tree with subtrees */
+ ErtsMonitor *monitors; /* Monitor tree */
+
+ erts_smp_spinlock_t qlock; /* Protects qflgs and out_queue */
+ Uint32 qflgs;
+ Sint qsize;
+ ErtsDistOutputQueue out_queue;
+ ErtsDistSuspended suspended;
+
+ ErtsDistOutputQueue finalized_out_queue;
+ erts_smp_atomic_t dist_cmd_scheduled;
+ ErtsPortTaskHandle dist_cmd;
+
+ Uint (*send)(struct port *prt, ErtsDistOutputBuf *obuf);
+
+ struct cache* cache; /* The atom cache */
+} DistEntry;
+
+typedef struct erl_node_ {
+ HashBucket hash_bucket; /* Hash bucket */
+ erts_refc_t refc; /* Reference count */
+ Eterm sysname; /* name@host atom for efficiency */
+ Uint32 creation; /* Creation */
+ DistEntry *dist_entry; /* Corresponding dist entry */
+} ErlNode;
+
+
+extern Hash erts_dist_table;
+extern Hash erts_node_table;
+extern erts_smp_rwmtx_t erts_dist_table_rwmtx;
+extern erts_smp_rwmtx_t erts_node_table_rwmtx;
+
+extern DistEntry *erts_hidden_dist_entries;
+extern DistEntry *erts_visible_dist_entries;
+extern DistEntry *erts_not_connected_dist_entries;
+extern Sint erts_no_of_hidden_dist_entries;
+extern Sint erts_no_of_visible_dist_entries;
+extern Sint erts_no_of_not_connected_dist_entries;
+
+extern DistEntry *erts_this_dist_entry;
+extern ErlNode *erts_this_node;
+
+DistEntry *erts_channel_no_to_dist_entry(Uint);
+DistEntry *erts_sysname_to_connected_dist_entry(Eterm);
+DistEntry *erts_find_or_insert_dist_entry(Eterm);
+DistEntry *erts_find_dist_entry(Eterm);
+void erts_delete_dist_entry(DistEntry *);
+Uint erts_dist_table_size(void);
+void erts_dist_table_info(int, void *);
+void erts_set_dist_entry_not_connected(DistEntry *);
+void erts_set_dist_entry_connected(DistEntry *, Eterm, Uint);
+ErlNode *erts_find_or_insert_node(Eterm, Uint);
+void erts_delete_node(ErlNode *);
+void erts_set_this_node(Eterm, Uint);
+Uint erts_node_table_size(void);
+void erts_init_node_tables(void);
+void erts_node_table_info(int, void *);
+void erts_print_node_info(int, void *, Eterm, int*, int*);
+Eterm erts_get_node_and_dist_references(struct process *);
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int erts_lc_is_de_rwlocked(DistEntry *);
+int erts_lc_is_de_rlocked(DistEntry *);
+#endif
+
+ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep);
+ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np);
+ERTS_GLB_INLINE void erts_smp_de_rlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_smp_de_runlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_smp_de_rwlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_smp_de_rwunlock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_smp_de_links_lock(DistEntry *dep);
+ERTS_GLB_INLINE void erts_smp_de_links_unlock(DistEntry *dep);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_deref_dist_entry(DistEntry *dep)
+{
+ ASSERT(dep);
+ if (erts_refc_dectest(&dep->refc, 0) == 0)
+ erts_delete_dist_entry(dep);
+}
+
+ERTS_GLB_INLINE void
+erts_deref_node_entry(ErlNode *np)
+{
+ ASSERT(np);
+ if (erts_refc_dectest(&np->refc, 0) == 0)
+ erts_delete_node(np);
+}
+
+ERTS_GLB_INLINE void
+erts_smp_de_rlock(DistEntry *dep)
+{
+ erts_smp_rwmtx_rlock(&dep->rwmtx);
+}
+
+ERTS_GLB_INLINE void
+erts_smp_de_runlock(DistEntry *dep)
+{
+ erts_smp_rwmtx_runlock(&dep->rwmtx);
+}
+
+ERTS_GLB_INLINE void
+erts_smp_de_rwlock(DistEntry *dep)
+{
+ erts_smp_rwmtx_rwlock(&dep->rwmtx);
+}
+
+ERTS_GLB_INLINE void
+erts_smp_de_rwunlock(DistEntry *dep)
+{
+ erts_smp_rwmtx_rwunlock(&dep->rwmtx);
+}
+
+ERTS_GLB_INLINE void
+erts_smp_de_links_lock(DistEntry *dep)
+{
+ erts_smp_mtx_lock(&dep->lnk_mtx);
+}
+
+ERTS_GLB_INLINE void
+erts_smp_de_links_unlock(DistEntry *dep)
+{
+ erts_smp_mtx_unlock(&dep->lnk_mtx);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+#endif
diff --git a/erts/emulator/beam/erl_obsolete.c b/erts/emulator/beam/erl_obsolete.c
new file mode 100644
index 0000000000..9c5a7c7ff9
--- /dev/null
+++ b/erts/emulator/beam/erl_obsolete.c
@@ -0,0 +1,186 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_driver.h"
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * *
+ * ------------------------- OBSOLETE! DO NOT USE! ------------------------- *
+ * *
+\* */
+
+/* cut from ../obsolete/driver.h (since it doesn't mix well with other
+ * headers from the emulator).
+ */
+#ifdef __WIN32__
+#ifdef CONST
+# undef CONST
+#endif
+#endif
+
+#if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE)
+# define _USING_PROTOTYPES_ 1
+# define _ANSI_ARGS_(x) x
+# define CONST const
+#else
+# define _ANSI_ARGS_(x) ()
+# define CONST
+#endif
+
+typedef void* erl_mutex_t;
+typedef void* erl_cond_t;
+typedef void* erl_thread_t;
+
+EXTERN erl_mutex_t erts_mutex_create _ANSI_ARGS_((void));
+EXTERN int erts_mutex_destroy _ANSI_ARGS_((erl_mutex_t));
+EXTERN int erts_mutex_lock _ANSI_ARGS_((erl_mutex_t));
+EXTERN int erts_mutex_unlock _ANSI_ARGS_((erl_mutex_t));
+
+EXTERN erl_cond_t erts_cond_create _ANSI_ARGS_((void));
+EXTERN int erts_cond_destroy _ANSI_ARGS_((erl_cond_t));
+EXTERN int erts_cond_signal _ANSI_ARGS_((erl_cond_t));
+EXTERN int erts_cond_broadcast _ANSI_ARGS_((erl_cond_t));
+EXTERN int erts_cond_wait _ANSI_ARGS_((erl_cond_t, erl_mutex_t));
+EXTERN int erts_cond_timedwait _ANSI_ARGS_((erl_cond_t, erl_mutex_t, long));
+
+EXTERN int erts_thread_create _ANSI_ARGS_((erl_thread_t*,
+ void* (*func)(void*),
+ void* arg,
+ int detached));
+EXTERN erl_thread_t erts_thread_self _ANSI_ARGS_((void));
+EXTERN void erts_thread_exit _ANSI_ARGS_((void*));
+EXTERN int erts_thread_join _ANSI_ARGS_((erl_thread_t, void**));
+EXTERN int erts_thread_kill _ANSI_ARGS_((erl_thread_t));
+
+/*
+ * These functions implement the thread interface in ../obsolete/driver.h.
+ * Do *not* use this interface! Within the emulator, use the erl_threads.h,
+ * erl_smp.h, or ethread.h interface. From a driver use the thread interface
+ * in erl_driver.h.
+ */
+
+erl_mutex_t
+erts_mutex_create(void)
+{
+ return (erl_mutex_t) erl_drv_mutex_create(NULL);
+}
+
+int
+erts_mutex_destroy(erl_mutex_t mtx)
+{
+ erl_drv_mutex_destroy((ErlDrvMutex *) mtx);
+ return 0;
+}
+
+int
+erts_mutex_lock(erl_mutex_t mtx)
+{
+ erl_drv_mutex_lock((ErlDrvMutex *) mtx);
+ return 0;
+}
+
+int
+erts_mutex_unlock(erl_mutex_t mtx)
+{
+ erl_drv_mutex_unlock((ErlDrvMutex *) mtx);
+ return 0;
+}
+
+erl_cond_t
+erts_cond_create(void)
+{
+ return (erl_cond_t) erl_drv_cond_create(NULL);
+}
+
+int
+erts_cond_destroy(erl_cond_t cnd)
+{
+ erl_drv_cond_destroy((ErlDrvCond *) cnd);
+ return 0;
+}
+
+
+int
+erts_cond_signal(erl_cond_t cnd)
+{
+ erl_drv_cond_signal((ErlDrvCond *) cnd);
+ return 0;
+}
+
+int
+erts_cond_broadcast(erl_cond_t cnd)
+{
+ erl_drv_cond_broadcast((ErlDrvCond *) cnd);
+ return 0;
+}
+
+
+int
+erts_cond_wait(erl_cond_t cnd, erl_mutex_t mtx)
+{
+ erl_drv_cond_wait((ErlDrvCond *) cnd, (ErlDrvMutex *) mtx);
+ return 0;
+}
+
+int
+erts_cond_timedwait(erl_cond_t cnd, erl_mutex_t mtx, long ms)
+{
+ return ENOTSUP;
+}
+
+int
+erts_thread_create(erl_thread_t *tid,
+ void* (*func)(void*),
+ void* arg,
+ int detached)
+{
+ if (detached)
+ return ENOTSUP;
+ return erl_drv_thread_create(NULL, (ErlDrvTid *) tid, func, arg, NULL);
+}
+
+erl_thread_t
+erts_thread_self(void)
+{
+ return (erl_thread_t) erl_drv_thread_self();
+}
+
+void
+erts_thread_exit(void *res)
+{
+ erl_drv_thread_exit(res);
+}
+
+int
+erts_thread_join(erl_thread_t tid, void **respp)
+{
+ return erl_drv_thread_join((ErlDrvTid) tid, respp);
+}
+
+int
+erts_thread_kill(erl_thread_t tid)
+{
+ return ENOTSUP;
+}
+
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
new file mode 100644
index 0000000000..0fb264a53c
--- /dev/null
+++ b/erts/emulator/beam/erl_port_task.c
@@ -0,0 +1,1100 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Scheduling of port tasks
+ *
+ * Author: Rickard Green
+ */
+
+#define ERL_PORT_TASK_C__
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "global.h"
+#include "erl_port_task.h"
+#include "dist.h"
+
+#if defined(DEBUG) && 0
+#define HARD_DEBUG
+#endif
+
+/*
+ * Costs in reductions for some port operations.
+ */
+#define ERTS_PORT_REDS_EXECUTE 0
+#define ERTS_PORT_REDS_FREE 50
+#define ERTS_PORT_REDS_TIMEOUT 200
+#define ERTS_PORT_REDS_INPUT 200
+#define ERTS_PORT_REDS_OUTPUT 200
+#define ERTS_PORT_REDS_EVENT 200
+#define ERTS_PORT_REDS_TERMINATE 100
+
+
+#define ERTS_PORT_TASK_INVALID_PORT(P, ID) \
+ ((erts_port_status_get((P)) & ERTS_PORT_SFLGS_DEAD) || (P)->id != (ID))
+
+#define ERTS_PORT_IS_IN_RUNQ(RQ, P) \
+ ((P)->sched.next || (P)->sched.prev || (RQ)->ports.start == (P))
+
+#define ERTS_PORT_NOT_IN_RUNQ(P) \
+do { \
+ (P)->sched.prev = NULL; \
+ (P)->sched.next = NULL; \
+} while (0)
+
+erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
+
+struct ErtsPortTaskQueue_ {
+ ErtsPortTask *first;
+ ErtsPortTask *last;
+ Port *port;
+};
+
+struct ErtsPortTask_ {
+ ErtsPortTask *prev;
+ ErtsPortTask *next;
+ ErtsPortTaskQueue *queue;
+ ErtsPortTaskHandle *handle;
+ ErtsPortTaskType type;
+ ErlDrvEvent event;
+ ErlDrvEventData event_data;
+};
+
+#ifdef HARD_DEBUG
+#define ERTS_PT_CHK_PORTQ(RQ) check_port_queue((RQ), NULL, 0)
+#define ERTS_PT_CHK_PRES_PORTQ(RQ, PP) check_port_queue((RQ), (PP), -1)
+#define ERTS_PT_CHK_IN_PORTQ(RQ, PP) check_port_queue((RQ), (PP), 1)
+#define ERTS_PT_CHK_NOT_IN_PORTQ(RQ, PP) check_port_queue((RQ), (PP), 0)
+#define ERTS_PT_CHK_TASKQ(Q) check_task_queue((Q), NULL, 0)
+#define ERTS_PT_CHK_IN_TASKQ(Q, T) check_task_queue((Q), (T), 1)
+#define ERTS_PT_CHK_NOT_IN_TASKQ(Q, T) check_task_queue((Q), (T), 0)
+static void
+check_port_queue(Port *chk_pp, int inq);
+static void
+check_task_queue(ErtsPortTaskQueue *ptqp,
+ ErtsPortTask *chk_ptp,
+ int inq);
+#else
+#define ERTS_PT_CHK_PORTQ(RQ)
+#define ERTS_PT_CHK_PRES_PORTQ(RQ, PP)
+#define ERTS_PT_CHK_IN_PORTQ(RQ, PP)
+#define ERTS_PT_CHK_NOT_IN_PORTQ(RQ, PP)
+#define ERTS_PT_CHK_TASKQ(Q)
+#define ERTS_PT_CHK_IN_TASKQ(Q, T)
+#define ERTS_PT_CHK_NOT_IN_TASKQ(Q, T)
+#endif
+
+static void handle_remaining_tasks(ErtsRunQueue *runq, Port *pp);
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_task,
+ ErtsPortTask,
+ 200,
+ ERTS_ALC_T_PORT_TASK)
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_taskq,
+ ErtsPortTaskQueue,
+ 50,
+ ERTS_ALC_T_PORT_TASKQ)
+
+/*
+ * Task handle manipulation.
+ */
+
+static ERTS_INLINE ErtsPortTask *
+handle2task(ErtsPortTaskHandle *pthp)
+{
+ return (ErtsPortTask *) erts_smp_atomic_read(pthp);
+}
+
+static ERTS_INLINE void
+reset_handle(ErtsPortTask *ptp)
+{
+ if (ptp->handle) {
+ ASSERT(ptp == handle2task(ptp->handle));
+ erts_smp_atomic_set(ptp->handle, (long) NULL);
+ }
+}
+
+static ERTS_INLINE void
+set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
+{
+ ptp->handle = pthp;
+ if (pthp) {
+ erts_smp_atomic_set(pthp, (long) ptp);
+ ASSERT(ptp == handle2task(ptp->handle));
+ }
+}
+
+/*
+ * Port queue operations
+ */
+
+static ERTS_INLINE void
+enqueue_port(ErtsRunQueue *runq, Port *pp)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ pp->sched.next = NULL;
+ pp->sched.prev = runq->ports.end;
+ if (runq->ports.end) {
+ ASSERT(runq->ports.start);
+ runq->ports.end->sched.next = pp;
+ }
+ else {
+ ASSERT(!runq->ports.start);
+ runq->ports.start = pp;
+ }
+
+ runq->ports.info.len++;
+ if (runq->ports.info.max_len < runq->ports.info.len)
+ runq->ports.info.max_len = runq->ports.info.len;
+ runq->len++;
+ if (runq->max_len < runq->len)
+ runq->max_len = runq->len;
+ runq->ports.end = pp;
+ ASSERT(runq->ports.start && runq->ports.end);
+}
+
+static ERTS_INLINE void
+dequeue_port(ErtsRunQueue *runq, Port *pp)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ if (pp->sched.next)
+ pp->sched.next->sched.prev = pp->sched.prev;
+ else {
+ ASSERT(runq->ports.end == pp);
+ runq->ports.end = pp->sched.prev;
+ }
+ if (pp->sched.prev)
+ pp->sched.prev->sched.next = pp->sched.next;
+ else {
+ ASSERT(runq->ports.start == pp);
+ runq->ports.start = pp->sched.next;
+ }
+
+ ASSERT(runq->ports.info.len > 0);
+ runq->ports.info.len--;
+ ASSERT(runq->len > 0);
+ runq->len--;
+ ASSERT(runq->ports.start || !runq->ports.end);
+ ASSERT(runq->ports.end || !runq->ports.start);
+}
+
+static ERTS_INLINE Port *
+pop_port(ErtsRunQueue *runq)
+{
+ Port *pp = runq->ports.start;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ if (!pp) {
+ ASSERT(!runq->ports.end);
+ }
+ else {
+ runq->ports.start = runq->ports.start->sched.next;
+ if (runq->ports.start)
+ runq->ports.start->sched.prev = NULL;
+ else {
+ ASSERT(runq->ports.end == pp);
+ runq->ports.end = NULL;
+ }
+ ASSERT(runq->ports.info.len > 0);
+ runq->ports.info.len--;
+ ASSERT(runq->len > 0);
+ runq->len--;
+ }
+
+ ASSERT(runq->ports.start || !runq->ports.end);
+ ASSERT(runq->ports.end || !runq->ports.start);
+ return pp;
+}
+
+
+#ifdef HARD_DEBUG
+
+static void
+check_port_queue(ErtsRunQueue *runq, Port *chk_pp, int inq)
+{
+ Port *pp;
+ Port *last_pp;
+ Port *first_pp = runq->ports.start;
+ int no_forward = 0, no_backward = 0;
+ int found_forward = 0, found_backward = 0;
+ if (!first_pp) {
+ ASSERT(!runq->ports.end);
+ }
+ else {
+ ASSERT(!first_pp->sched.prev);
+ for (pp = first_pp; pp; pp = pp->sched.next) {
+ ASSERT(pp->sched.taskq);
+ if (pp->sched.taskq->first)
+ no_forward++;
+ if (chk_pp == pp)
+ found_forward = 1;
+ if (!pp->sched.prev) {
+ ASSERT(first_pp == pp);
+ }
+ if (!pp->sched.next) {
+ ASSERT(runq->ports.end == pp);
+ last_pp = pp;
+ }
+ }
+ for (pp = last_pp; pp; pp = pp->sched.prev) {
+ ASSERT(pp->sched.taskq);
+ if (pp->sched.taskq->last)
+ no_backward++;
+ if (chk_pp == pp)
+ found_backward = 1;
+ if (!pp->sched.prev) {
+ ASSERT(first_pp == pp);
+ }
+ if (!pp->sched.next) {
+ ASSERT(runq->ports.end == pp);
+ }
+ check_task_queue(pp->sched.taskq, NULL, 0);
+ }
+ ASSERT(no_forward == no_backward);
+ }
+ ASSERT(no_forward == runq->ports.info.len);
+ if (chk_pp) {
+ if (chk_pp->sched.taskq || chk_pp->sched.exe_taskq) {
+ ASSERT(chk_pp->sched.taskq != chk_pp->sched.exe_taskq);
+ }
+ ASSERT(!chk_pp->sched.taskq || chk_pp->sched.taskq->first);
+ if (inq < 0)
+ inq = chk_pp->sched.taskq && !chk_pp->sched.exe_taskq;
+ if (inq) {
+ ASSERT(found_forward && found_backward);
+ }
+ else {
+ ASSERT(!found_forward && !found_backward);
+ }
+ }
+}
+
+#endif
+
+/*
+ * Task queue operations
+ */
+
+static ERTS_INLINE ErtsPortTaskQueue *
+port_taskq_init(ErtsPortTaskQueue *ptqp, Port *pp)
+{
+ if (ptqp) {
+ ptqp->first = NULL;
+ ptqp->last = NULL;
+ ptqp->port = pp;
+ }
+ return ptqp;
+}
+
+static ERTS_INLINE void
+enqueue_task(ErtsPortTaskQueue *ptqp, ErtsPortTask *ptp)
+{
+ ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp);
+ ptp->next = NULL;
+ ptp->prev = ptqp->last;
+ ptp->queue = ptqp;
+ if (ptqp->last) {
+ ASSERT(ptqp->first);
+ ptqp->last->next = ptp;
+ }
+ else {
+ ASSERT(!ptqp->first);
+ ptqp->first = ptp;
+ }
+ ptqp->last = ptp;
+ ERTS_PT_CHK_IN_TASKQ(ptqp, ptp);
+}
+
+static ERTS_INLINE void
+push_task(ErtsPortTaskQueue *ptqp, ErtsPortTask *ptp)
+{
+ ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp);
+ ptp->next = ptqp->first;
+ ptp->prev = NULL;
+ ptp->queue = ptqp;
+ if (ptqp->first) {
+ ASSERT(ptqp->last);
+ ptqp->first->prev = ptp;
+ }
+ else {
+ ASSERT(!ptqp->last);
+ ptqp->last = ptp;
+ }
+ ptqp->first = ptp;
+ ERTS_PT_CHK_IN_TASKQ(ptqp, ptp);
+}
+
+static ERTS_INLINE void
+dequeue_task(ErtsPortTask *ptp)
+{
+ ASSERT(ptp);
+ ASSERT(ptp->queue);
+ ERTS_PT_CHK_IN_TASKQ(ptp->queue, ptp);
+ if (ptp->next)
+ ptp->next->prev = ptp->prev;
+ else {
+ ASSERT(ptp->queue->last == ptp);
+ ptp->queue->last = ptp->prev;
+ }
+ if (ptp->prev)
+ ptp->prev->next = ptp->next;
+ else {
+ ASSERT(ptp->queue->first == ptp);
+ ptp->queue->first = ptp->next;
+ }
+
+ ASSERT(ptp->queue->first || !ptp->queue->last);
+ ASSERT(ptp->queue->last || !ptp->queue->first);
+ ERTS_PT_CHK_NOT_IN_TASKQ(ptp->queue, ptp);
+}
+
+static ERTS_INLINE ErtsPortTask *
+pop_task(ErtsPortTaskQueue *ptqp)
+{
+ ErtsPortTask *ptp = ptqp->first;
+ if (!ptp) {
+ ASSERT(!ptqp->last);
+ }
+ else {
+ ERTS_PT_CHK_IN_TASKQ(ptqp, ptp);
+ ASSERT(!ptp->prev);
+ ptqp->first = ptp->next;
+ if (ptqp->first)
+ ptqp->first->prev = NULL;
+ else {
+ ASSERT(ptqp->last == ptp);
+ ptqp->last = NULL;
+ }
+ ASSERT(ptp->queue->first || !ptp->queue->last);
+ ASSERT(ptp->queue->last || !ptp->queue->first);
+ }
+ ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp);
+ return ptp;
+}
+
+#ifdef HARD_DEBUG
+
+static void
+check_task_queue(ErtsPortTaskQueue *ptqp,
+ ErtsPortTask *chk_ptp,
+ int inq)
+{
+ ErtsPortTask *ptp;
+ ErtsPortTask *last_ptp;
+ ErtsPortTask *first_ptp = ptqp->first;
+ int found_forward = 0, found_backward = 0;
+ if (!first_ptp) {
+ ASSERT(!ptqp->last);
+ }
+ else {
+ ASSERT(!first_ptp->prev);
+ for (ptp = first_ptp; ptp; ptp = ptp->next) {
+ ASSERT(ptp->queue == ptqp);
+ if (chk_ptp == ptp)
+ found_forward = 1;
+ if (!ptp->prev) {
+ ASSERT(first_ptp == ptp);
+ }
+ if (!ptp->next) {
+ ASSERT(ptqp->last == ptp);
+ last_ptp = ptp;
+ }
+ }
+ for (ptp = last_ptp; ptp; ptp = ptp->prev) {
+ ASSERT(ptp->queue == ptqp);
+ if (chk_ptp == ptp)
+ found_backward = 1;
+ if (!ptp->prev) {
+ ASSERT(first_ptp == ptp);
+ }
+ if (!ptp->next) {
+ ASSERT(ptqp->last == ptp);
+ }
+ }
+ }
+ if (chk_ptp) {
+ if (inq) {
+ ASSERT(found_forward && found_backward);
+ }
+ else {
+ ASSERT(!found_forward && !found_backward);
+ }
+ }
+}
+#endif
+
+/*
+ * Abort a scheduled task.
+ */
+
+int
+erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp)
+{
+ ErtsRunQueue *runq;
+ ErtsPortTaskQueue *ptqp;
+ ErtsPortTask *ptp;
+ Port *pp;
+ int port_is_dequeued = 0;
+
+ pp = &erts_port[internal_port_index(id)];
+ runq = erts_port_runq(pp);
+
+ ptp = handle2task(pthp);
+
+ if (!ptp) {
+ erts_smp_runq_unlock(runq);
+ return 1;
+ }
+
+ ASSERT(ptp->handle == pthp);
+ ptqp = ptp->queue;
+ ASSERT(pp == ptqp->port);
+
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ ASSERT(ptqp);
+ ASSERT(ptqp->first);
+
+ dequeue_task(ptp);
+ reset_handle(ptp);
+
+ switch (ptp->type) {
+ case ERTS_PORT_TASK_INPUT:
+ case ERTS_PORT_TASK_OUTPUT:
+ case ERTS_PORT_TASK_EVENT:
+ ASSERT(erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) > 0);
+ erts_smp_atomic_dec(&erts_port_task_outstanding_io_tasks);
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(ptqp == pp->sched.taskq || ptqp == pp->sched.exe_taskq);
+
+ if (ptqp->first || pp->sched.taskq != ptqp)
+ ptqp = NULL;
+ else {
+ pp->sched.taskq = NULL;
+ if (!pp->sched.exe_taskq) {
+ dequeue_port(runq, pp);
+ ERTS_PORT_NOT_IN_RUNQ(pp);
+ port_is_dequeued = 1;
+ }
+ }
+
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+
+ erts_smp_runq_unlock(runq);
+
+ if (erts_system_profile_flags.runnable_ports && port_is_dequeued) {
+ profile_runnable_port(pp, am_inactive);
+ }
+
+ port_task_free(ptp);
+ if (ptqp)
+ port_taskq_free(ptqp);
+
+ return 0;
+}
+
+/*
+ * Schedule a task.
+ */
+
+int
+erts_port_task_schedule(Eterm id,
+ ErtsPortTaskHandle *pthp,
+ ErtsPortTaskType type,
+ ErlDrvEvent event,
+ ErlDrvEventData event_data)
+{
+ ErtsRunQueue *runq;
+ Port *pp;
+ ErtsPortTask *ptp;
+ int enq_port = 0;
+
+ /*
+ * NOTE: We might not have the port lock here. We are only
+ * allowed to access the 'sched', 'tab_status',
+ * and 'id' fields of the port struct while
+ * tasks_lock is held.
+ */
+
+ if (pthp && erts_port_task_is_scheduled(pthp)) {
+ ASSERT(0);
+ erts_port_task_abort(id, pthp);
+ }
+
+ ptp = port_task_alloc();
+
+ ASSERT(is_internal_port(id));
+ pp = &erts_port[internal_port_index(id)];
+ runq = erts_port_runq(pp);
+
+ if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) {
+ if (runq)
+ erts_smp_runq_unlock(runq);
+ return -1;
+ }
+
+ ASSERT(!erts_port_task_is_scheduled(pthp));
+
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+
+ if (!pp->sched.taskq) {
+ pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp);
+ enq_port = !pp->sched.exe_taskq;
+ }
+
+#ifdef ERTS_SMP
+ if (enq_port) {
+ ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ if (xrunq) {
+ /* Port emigrated ... */
+ erts_smp_atomic_set(&pp->run_queue, (long) xrunq);
+ erts_smp_runq_unlock(runq);
+ runq = xrunq;
+ }
+ }
+#endif
+
+ ASSERT(!(runq->flags & ERTS_RUNQ_FLG_SUSPENDED));
+
+ ASSERT(pp->sched.taskq);
+ ASSERT(ptp);
+
+ ptp->type = type;
+ ptp->event = event;
+ ptp->event_data = event_data;
+
+ set_handle(ptp, pthp);
+
+ switch (type) {
+ case ERTS_PORT_TASK_FREE:
+ erl_exit(ERTS_ABORT_EXIT,
+ "erts_port_task_schedule(): Cannot schedule free task\n");
+ break;
+ case ERTS_PORT_TASK_INPUT:
+ case ERTS_PORT_TASK_OUTPUT:
+ case ERTS_PORT_TASK_EVENT:
+ erts_smp_atomic_inc(&erts_port_task_outstanding_io_tasks);
+ /* Fall through... */
+ default:
+ enqueue_task(pp->sched.taskq, ptp);
+ break;
+ }
+
+#if defined(HARD_DEBUG)
+ if (pp->sched.exe_taskq || enq_port)
+ ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp);
+ else
+ ERTS_PT_CHK_IN_PORTQ(runq, pp);
+#elif defined(DEBUG)
+ if (!enq_port && !pp->sched.exe_taskq) {
+ /* We should be in port run q */
+ ASSERT(pp->sched.prev || runq->ports.start == pp);
+ }
+#endif
+
+ if (!enq_port) {
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ }
+ else {
+ enqueue_port(runq, pp);
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+
+ if (erts_system_profile_flags.runnable_ports) {
+ profile_runnable_port(pp, am_active);
+ }
+
+ erts_smp_notify_inc_runq(runq);
+ }
+ erts_smp_runq_unlock(runq);
+ return 0;
+}
+
+void
+erts_port_task_free_port(Port *pp)
+{
+ ErtsRunQueue *runq;
+ int port_is_dequeued = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
+ runq = erts_port_runq(pp);
+ ASSERT(runq);
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ if (pp->sched.exe_taskq) {
+ /* I (this thread) am currently executing this port, free it
+ when scheduled out... */
+ ErtsPortTask *ptp = port_task_alloc();
+ erts_smp_port_state_lock(pp);
+ ASSERT(erts_smp_atomic_read(&erts_ports_alive) > 0);
+ erts_smp_atomic_dec(&erts_ports_alive);
+ pp->status &= ~ERTS_PORT_SFLG_CLOSING;
+ pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED;
+ erts_may_save_closed_port(pp);
+ erts_smp_port_state_unlock(pp);
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&pp->refc) > 1);
+ ptp->type = ERTS_PORT_TASK_FREE;
+ ptp->event = (ErlDrvEvent) -1;
+ ptp->event_data = NULL;
+ set_handle(ptp, NULL);
+ push_task(pp->sched.exe_taskq, ptp);
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ erts_smp_runq_unlock(runq);
+ }
+ else {
+ ErtsPortTaskQueue *ptqp = pp->sched.taskq;
+ if (ptqp) {
+ dequeue_port(runq, pp);
+ ERTS_PORT_NOT_IN_RUNQ(pp);
+ port_is_dequeued = 1;
+ }
+ erts_smp_port_state_lock(pp);
+ erts_smp_atomic_dec(&erts_ports_alive);
+ pp->status &= ~ERTS_PORT_SFLG_CLOSING;
+ pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED;
+ erts_may_save_closed_port(pp);
+ erts_smp_port_state_unlock(pp);
+#ifdef ERTS_SMP
+ erts_smp_atomic_dec(&pp->refc); /* Not alive */
+#endif
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&pp->refc) > 0); /* Lock */
+ handle_remaining_tasks(runq, pp); /* May release runq lock */
+ ASSERT(!pp->sched.exe_taskq && (!ptqp || !ptqp->first));
+ pp->sched.taskq = NULL;
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+#ifndef ERTS_SMP
+ ASSERT(pp->status & ERTS_PORT_SFLG_PORT_DEBUG);
+ erts_port_status_set(pp, ERTS_PORT_SFLG_FREE);
+#endif
+ erts_smp_runq_unlock(runq);
+
+ if (erts_system_profile_flags.runnable_ports && port_is_dequeued) {
+ profile_runnable_port(pp, am_inactive);
+ }
+
+ if (ptqp)
+ port_taskq_free(ptqp);
+ }
+}
+
+typedef struct {
+ ErtsRunQueue *runq;
+ int *resp;
+} ErtsPortTaskExeBlockData;
+
+static void
+prepare_for_block(void *vd)
+{
+ ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd;
+ erts_smp_runq_unlock(d->runq);
+}
+
+static void
+resume_after_block(void *vd)
+{
+ ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd;
+ erts_smp_runq_lock(d->runq);
+ if (d->resp)
+ *d->resp = erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != (long) 0;
+}
+
+/*
+ * Run all scheduled tasks for the first port in run queue. If
+ * new tasks appear while running reschedule port (free task is
+ * an exception; it is always handled instantly).
+ *
+ * erts_port_task_execute() is called by scheduler threads between
+ * scheduleing of processes. Sched lock should be held by caller.
+ */
+
+int
+erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
+{
+ int port_was_enqueued = 0;
+ Port *pp;
+ ErtsPortTaskQueue *ptqp;
+ ErtsPortTask *ptp;
+ int res = 0;
+ int reds = ERTS_PORT_REDS_EXECUTE;
+ long io_tasks_executed = 0;
+ int fpe_was_unmasked;
+ ErtsPortTaskExeBlockData blk_data = {runq, NULL};
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+
+ erts_smp_activity_begin(ERTS_ACTIVITY_IO,
+ prepare_for_block,
+ resume_after_block,
+ (void *) &blk_data);
+
+ ERTS_PT_CHK_PORTQ(runq);
+
+ pp = pop_port(runq);
+ if (!pp) {
+ res = 0;
+ goto done;
+ }
+
+ ERTS_PORT_NOT_IN_RUNQ(pp);
+
+ *curr_port_pp = pp;
+
+ ASSERT(pp->sched.taskq);
+ ASSERT(pp->sched.taskq->first);
+ ptqp = pp->sched.taskq;
+ pp->sched.taskq = NULL;
+
+ ASSERT(!pp->sched.exe_taskq);
+ pp->sched.exe_taskq = ptqp;
+
+ if (erts_smp_port_trylock(pp) == EBUSY) {
+ erts_smp_runq_unlock(runq);
+ erts_smp_port_lock(pp);
+ erts_smp_runq_lock(runq);
+ }
+
+ if (erts_sched_stat.enabled) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no);
+ int migrated = old && old != esdp->no;
+
+ erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++;
+ erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++;
+ if (migrated) {
+ erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++;
+ erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++;
+ }
+ erts_smp_spin_unlock(&erts_sched_stat.lock);
+ }
+
+ /* trace port scheduling, in */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports(pp, am_in);
+ }
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ ptp = pop_task(ptqp);
+
+ fpe_was_unmasked = erts_block_fpe();
+
+ while (ptp) {
+ ASSERT(pp->sched.taskq != pp->sched.exe_taskq);
+
+ reset_handle(ptp);
+ erts_smp_runq_unlock(runq);
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ASSERT(pp->drv_ptr);
+
+ switch (ptp->type) {
+ case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */
+ reds += ERTS_PORT_REDS_FREE;
+ erts_smp_runq_lock(runq);
+
+ erts_unblock_fpe(fpe_was_unmasked);
+ ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
+ if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first))
+ handle_remaining_tasks(runq, pp);
+ ASSERT(!ptqp->first
+ && (!pp->sched.taskq || !pp->sched.taskq->first));
+#ifdef ERTS_SMP
+ erts_smp_atomic_dec(&pp->refc); /* Not alive */
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&pp->refc) > 0); /* Lock */
+#else
+ erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE);
+#endif
+
+ port_task_free(ptp);
+ if (pp->sched.taskq)
+ port_taskq_free(pp->sched.taskq);
+ pp->sched.taskq = NULL;
+
+ goto tasks_done;
+ case ERTS_PORT_TASK_TIMEOUT:
+ reds += ERTS_PORT_REDS_TIMEOUT;
+ if (!(pp->status & ERTS_PORT_SFLGS_DEAD))
+ (*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
+ break;
+ case ERTS_PORT_TASK_INPUT:
+ reds += ERTS_PORT_REDS_INPUT;
+ ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ /* NOTE some windows drivers use ->ready_input for input and output */
+ (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event);
+ io_tasks_executed++;
+ break;
+ case ERTS_PORT_TASK_OUTPUT:
+ reds += ERTS_PORT_REDS_OUTPUT;
+ ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event);
+ io_tasks_executed++;
+ break;
+ case ERTS_PORT_TASK_EVENT:
+ reds += ERTS_PORT_REDS_EVENT;
+ ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data);
+ io_tasks_executed++;
+ break;
+ case ERTS_PORT_TASK_DIST_CMD:
+ reds += erts_dist_command(pp, CONTEXT_REDS-reds);
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "Invalid port task type: %d\n",
+ (int) ptp->type);
+ break;
+ }
+
+ if ((pp->status & ERTS_PORT_SFLG_CLOSING)
+ && erts_is_port_ioq_empty(pp)) {
+ reds += ERTS_PORT_REDS_TERMINATE;
+ erts_terminate_port(pp);
+ }
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+
+#ifdef ERTS_SMP
+ if (pp->xports)
+ erts_smp_xports_unlock(pp);
+ ASSERT(!pp->xports);
+#endif
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+
+ port_task_free(ptp);
+
+ erts_smp_runq_lock(runq);
+
+ ptp = pop_task(ptqp);
+ }
+
+ tasks_done:
+
+ erts_unblock_fpe(fpe_was_unmasked);
+
+ if (io_tasks_executed) {
+ ASSERT(erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) >= io_tasks_executed);
+ erts_smp_atomic_add(&erts_port_task_outstanding_io_tasks, -1*io_tasks_executed);
+ }
+
+ *curr_port_pp = NULL;
+
+ if (pp->sched.taskq) {
+ ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
+ ASSERT(pp->sched.taskq->first);
+ enqueue_port(runq, pp);
+ port_was_enqueued = 1;
+
+ /*
+ erts_smp_notify_inc_runq();
+
+ * No need to notify schedulers about the increase in run
+ * queue length since at least this thread, which is a
+ * scheduler, will discover that the port run queue isn't
+ * empty before trying to go to sleep.
+ */
+ }
+
+ ASSERT(pp->sched.exe_taskq);
+ pp->sched.exe_taskq = NULL;
+
+ res = erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != (long) 0;
+
+ ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+
+ port_taskq_free(ptqp);
+
+ if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) {
+ profile_runnable_port(pp, am_inactive);
+ }
+
+ /* trace port scheduling, out */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports(pp, am_out);
+ }
+#ifndef ERTS_SMP
+ erts_port_release(pp);
+#else
+ {
+ long refc = erts_smp_atomic_dectest(&pp->refc);
+ ASSERT(refc >= 0);
+ if (refc > 0)
+ erts_smp_mtx_unlock(pp->lock);
+ else {
+ erts_smp_runq_unlock(runq);
+ erts_port_cleanup(pp); /* Might aquire runq lock */
+ erts_smp_runq_lock(runq);
+ res = erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != (long) 0;
+ }
+ }
+#endif
+
+ done:
+ blk_data.resp = &res;
+ erts_smp_activity_end(ERTS_ACTIVITY_IO,
+ prepare_for_block,
+ resume_after_block,
+ (void *) &blk_data);
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+
+ ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);
+
+ return res;
+}
+
+/*
+ * Handle remaining tasks after a free task.
+ */
+
+static void
+handle_remaining_tasks(ErtsRunQueue *runq, Port *pp)
+{
+ int i;
+ ErtsPortTask *ptp;
+ ErtsPortTaskQueue *ptqps[] = {pp->sched.exe_taskq, pp->sched.taskq};
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+
+ for (i = 0; i < sizeof(ptqps)/sizeof(ErtsPortTaskQueue *); i++) {
+ if (!ptqps[i])
+ continue;
+
+ ptp = pop_task(ptqps[i]);
+ while (ptp) {
+ reset_handle(ptp);
+ erts_smp_runq_unlock(runq);
+
+ switch (ptp->type) {
+ case ERTS_PORT_TASK_FREE:
+ case ERTS_PORT_TASK_TIMEOUT:
+ break;
+ case ERTS_PORT_TASK_INPUT:
+ erts_stale_drv_select(pp->id, ptp->event, DO_READ, 1);
+ break;
+ case ERTS_PORT_TASK_OUTPUT:
+ erts_stale_drv_select(pp->id, ptp->event, DO_WRITE, 1);
+ break;
+ case ERTS_PORT_TASK_EVENT:
+ erts_stale_drv_select(pp->id, ptp->event, 0, 1);
+ break;
+ case ERTS_PORT_TASK_DIST_CMD:
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "Invalid port task type: %d\n",
+ (int) ptp->type);
+ }
+
+ port_task_free(ptp);
+
+ erts_smp_runq_lock(runq);
+ ptp = pop_task(ptqps[i]);
+ }
+ }
+
+ ASSERT(!pp->sched.taskq || !pp->sched.taskq->first);
+}
+
+int
+erts_port_is_scheduled(Port *pp)
+{
+ int res;
+ ErtsRunQueue *runq = erts_port_runq(pp);
+ res = pp->sched.taskq || pp->sched.exe_taskq;
+ erts_smp_runq_unlock(runq);
+ return res;
+}
+
+#ifdef ERTS_SMP
+
+ErtsMigrateResult
+erts_port_migrate(Port *prt, int *prt_locked,
+ ErtsRunQueue *from_rq, int *from_locked,
+ ErtsRunQueue *to_rq, int *to_locked)
+{
+ ERTS_SMP_LC_ASSERT(*from_locked);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
+
+ ASSERT(!erts_common_run_queue);
+
+ if (!*from_locked || !*to_locked) {
+ if (from_rq < to_rq) {
+ if (!*to_locked) {
+ if (!*from_locked)
+ erts_smp_runq_lock(from_rq);
+ erts_smp_runq_lock(to_rq);
+ }
+ else if (erts_smp_runq_trylock(from_rq) == EBUSY) {
+ erts_smp_runq_unlock(to_rq);
+ erts_smp_runq_lock(from_rq);
+ erts_smp_runq_lock(to_rq);
+ }
+ }
+ else {
+ if (!*from_locked) {
+ if (!*to_locked)
+ erts_smp_runq_lock(to_rq);
+ erts_smp_runq_lock(from_rq);
+ }
+ else if (erts_smp_runq_trylock(to_rq) == EBUSY) {
+ erts_smp_runq_unlock(from_rq);
+ erts_smp_runq_lock(to_rq);
+ erts_smp_runq_lock(from_rq);
+ }
+ }
+ *to_locked = *from_locked = 1;
+ }
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
+
+ /* Refuse to migrate to a suspended run queue */
+ if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
+ return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED;
+ if (from_rq != (ErtsRunQueue *) erts_smp_atomic_read(&prt->run_queue))
+ return ERTS_MIGRATE_FAILED_RUNQ_CHANGED;
+ if (!ERTS_PORT_IS_IN_RUNQ(from_rq, prt))
+ return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
+ dequeue_port(from_rq, prt);
+ erts_smp_atomic_set(&prt->run_queue, (long) to_rq);
+ enqueue_port(to_rq, prt);
+ erts_smp_notify_inc_runq(to_rq);
+ return ERTS_MIGRATE_SUCCESS;
+}
+
+#endif
+
+/*
+ * Initialize the module.
+ */
+void
+erts_port_task_init(void)
+{
+ erts_smp_atomic_init(&erts_port_task_outstanding_io_tasks, (long) 0);
+ init_port_task_alloc();
+ init_port_taskq_alloc();
+}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
new file mode 100644
index 0000000000..f12d02da0c
--- /dev/null
+++ b/erts/emulator/beam/erl_port_task.h
@@ -0,0 +1,135 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Scheduling of port tasks
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERTS_PORT_TASK_H_BASIC_TYPES__
+#define ERTS_PORT_TASK_H_BASIC_TYPES__
+#include "erl_sys_driver.h"
+#include "erl_smp.h"
+typedef erts_smp_atomic_t ErtsPortTaskHandle;
+#endif
+
+#ifndef ERTS_PORT_TASK_ONLY_BASIC_TYPES__
+#ifndef ERL_PORT_TASK_H__
+#define ERL_PORT_TASK_H__
+
+#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#if (defined(ERL_PROCESS_C__) \
+ || defined(ERL_PORT_TASK_C__) \
+ || defined(ERL_IO_C__) \
+ || (ERTS_GLB_INLINE_INCL_FUNC_DEF \
+ && defined(ERTS_DO_INCL_GLB_INLINE_FUNC_DEF)))
+#define ERTS_INCLUDE_SCHEDULER_INTERNALS
+#endif
+
+typedef enum {
+ ERTS_PORT_TASK_FREE,
+ ERTS_PORT_TASK_INPUT,
+ ERTS_PORT_TASK_OUTPUT,
+ ERTS_PORT_TASK_EVENT,
+ ERTS_PORT_TASK_TIMEOUT,
+ ERTS_PORT_TASK_DIST_CMD
+} ErtsPortTaskType;
+
+#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+/* NOTE: Do not access any of the exported variables directly */
+extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
+#endif
+
+typedef struct ErtsPortTask_ ErtsPortTask;
+typedef struct ErtsPortTaskQueue_ ErtsPortTaskQueue;
+
+typedef struct {
+ Port *next;
+ Port *prev;
+ ErtsPortTaskQueue *taskq;
+ ErtsPortTaskQueue *exe_taskq;
+} ErtsPortTaskSched;
+
+ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp);
+ERTS_GLB_INLINE int erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp);
+ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp);
+#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
+#endif
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_port_task_handle_init(ErtsPortTaskHandle *pthp)
+{
+ erts_smp_atomic_init(pthp, (long) NULL);
+}
+
+ERTS_GLB_INLINE int
+erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp)
+{
+ return ((void *) erts_smp_atomic_read(pthp)) != NULL;
+}
+
+ERTS_GLB_INLINE void
+erts_port_task_init_sched(ErtsPortTaskSched *ptsp)
+{
+ ptsp->next = NULL;
+ ptsp->prev = NULL;
+ ptsp->taskq = NULL;
+ ptsp->exe_taskq = NULL;
+}
+
+#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+
+ERTS_GLB_INLINE int
+erts_port_task_have_outstanding_io_tasks(void)
+{
+ return erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != 0;
+}
+
+#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+
+#endif
+
+#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+int erts_port_task_execute(ErtsRunQueue *, Port **);
+void erts_port_task_init(void);
+#endif
+
+int erts_port_task_abort(Eterm id, ErtsPortTaskHandle *);
+int erts_port_task_schedule(Eterm,
+ ErtsPortTaskHandle *,
+ ErtsPortTaskType,
+ ErlDrvEvent,
+ ErlDrvEventData);
+void erts_port_task_free_port(Port *);
+int erts_port_is_scheduled(Port *);
+#ifdef ERTS_SMP
+ErtsMigrateResult erts_port_migrate(Port *,
+ int *,
+ ErtsRunQueue *,
+ int *,
+ ErtsRunQueue *,
+ int *);
+#endif
+#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#endif /* ERL_PORT_TASK_H__ */
+#endif /* ERTS_PORT_TASK_ONLY_BASIC_TYPES__ */
diff --git a/erts/emulator/beam/erl_posix_str.c b/erts/emulator/beam/erl_posix_str.c
new file mode 100644
index 0000000000..02db10905b
--- /dev/null
+++ b/erts/emulator/beam/erl_posix_str.c
@@ -0,0 +1,641 @@
+/*
+ * Original: tclPosixStr.c --
+ *
+ * This file contains procedures that generate strings
+ * corresponding to various POSIX-related codes, such
+ * as errno and signals.
+ *
+ * Copyright (c) 1991-1994 The Regents of the University of California.
+ * Copyright (c) 1994-1996 Sun Microsystems, Inc.
+ *
+ * See the file "license.terms" for information on usage and redistribution
+ * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
+ *
+ * SCCS: @(#) tclPosixStr.c 1.32 96/10/10 10:09:42
+ */
+
+/* %ExternalCopyright% */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef __WIN32__
+#ifndef WINDOWS_H_INCLUDES_WINSOCK2_H
+#include <winsock2.h>
+#endif
+#include <windows.h>
+#endif
+
+#include "erl_errno.h"
+#include "sys.h"
+#include "erl_driver.h"
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * erl_errno_id --
+ *
+ * Return a textual identifier for the given errno value.
+ *
+ * Results:
+ * This procedure returns a machine-readable textual identifier
+ * that corresponds to the current errno value (e.g. "eperm").
+ * The identifier is the same as the #define name in errno.h,
+ * except that it is in lowercase.
+ *
+ *----------------------------------------------------------------------
+ */
+
+char *
+erl_errno_id(error)
+ int error; /* Posix error number (as from errno). */
+{
+ switch (error) {
+#ifdef E2BIG
+ case E2BIG: return "e2big";
+#endif
+#ifdef EACCES
+ case EACCES: return "eacces";
+#endif
+#ifdef EADDRINUSE
+ case EADDRINUSE: return "eaddrinuse";
+#endif
+#ifdef EADDRNOTAVAIL
+ case EADDRNOTAVAIL: return "eaddrnotavail";
+#endif
+#ifdef EADV
+ case EADV: return "eadv";
+#endif
+#ifdef EAFNOSUPPORT
+ case EAFNOSUPPORT: return "eafnosupport";
+#endif
+#ifdef EAGAIN
+ case EAGAIN: return "eagain";
+#endif
+#ifdef EALIGN
+ case EALIGN: return "ealign";
+#endif
+#if defined(EALREADY) && (!defined(EBUSY) || (EALREADY != EBUSY ))
+ case EALREADY: return "ealready";
+#endif
+#ifdef EBADE
+ case EBADE: return "ebade";
+#endif
+#ifdef EBADF
+ case EBADF: return "ebadf";
+#endif
+#ifdef EBADFD
+ case EBADFD: return "ebadfd";
+#endif
+#ifdef EBADMSG
+ case EBADMSG: return "ebadmsg";
+#endif
+#ifdef EBADR
+ case EBADR: return "ebadr";
+#endif
+#ifdef EBADRPC
+ case EBADRPC: return "ebadrpc";
+#endif
+#ifdef EBADRQC
+ case EBADRQC: return "ebadrqc";
+#endif
+#ifdef EBADSLT
+ case EBADSLT: return "ebadslt";
+#endif
+#ifdef EBFONT
+ case EBFONT: return "ebfont";
+#endif
+#ifdef EBUSY
+ case EBUSY: return "ebusy";
+#endif
+#ifdef ECHILD
+ case ECHILD: return "echild";
+#endif
+#ifdef ECHRNG
+ case ECHRNG: return "echrng";
+#endif
+#ifdef ECOMM
+ case ECOMM: return "ecomm";
+#endif
+#ifdef ECONNABORTED
+ case ECONNABORTED: return "econnaborted";
+#endif
+#ifdef ECONNREFUSED
+ case ECONNREFUSED: return "econnrefused";
+#endif
+#ifdef ECONNRESET
+ case ECONNRESET: return "econnreset";
+#endif
+#if defined(EDEADLK) && (!defined(EWOULDBLOCK) || (EDEADLK != EWOULDBLOCK))
+ case EDEADLK: return "edeadlk";
+#endif
+#if defined(EDEADLOCK) && (!defined(EDEADLK) || (EDEADLOCK != EDEADLK))
+ case EDEADLOCK: return "edeadlock";
+#endif
+#ifdef EDESTADDRREQ
+ case EDESTADDRREQ: return "edestaddrreq";
+#endif
+#ifdef EDIRTY
+ case EDIRTY: return "edirty";
+#endif
+#ifdef EDOM
+ case EDOM: return "edom";
+#endif
+#ifdef EDOTDOT
+ case EDOTDOT: return "edotdot";
+#endif
+#ifdef EDQUOT
+ case EDQUOT: return "edquot";
+#endif
+#ifdef EDUPPKG
+ case EDUPPKG: return "eduppkg";
+#endif
+#ifdef EEXIST
+ case EEXIST: return "eexist";
+#endif
+#ifdef EFAULT
+ case EFAULT: return "efault";
+#endif
+#ifdef EFBIG
+ case EFBIG: return "efbig";
+#endif
+#ifdef EHOSTDOWN
+ case EHOSTDOWN: return "ehostdown";
+#endif
+#ifdef EHOSTUNREACH
+ case EHOSTUNREACH: return "ehostunreach";
+#endif
+#if defined(EIDRM) && (!defined(EINPROGRESS) || (EIDRM != EINPROGRESS))
+ case EIDRM: return "eidrm";
+#endif
+#ifdef EINIT
+ case EINIT: return "einit";
+#endif
+#ifdef EINPROGRESS
+ case EINPROGRESS: return "einprogress";
+#endif
+#ifdef EINTR
+ case EINTR: return "eintr";
+#endif
+#ifdef EINVAL
+ case EINVAL: return "einval";
+#endif
+#ifdef EIO
+ case EIO: return "eio";
+#endif
+#ifdef EISCONN
+ case EISCONN: return "eisconn";
+#endif
+#ifdef EISDIR
+ case EISDIR: return "eisdir";
+#endif
+#ifdef EISNAME
+ case EISNAM: return "eisnam";
+#endif
+#ifdef ELBIN
+ case ELBIN: return "elbin";
+#endif
+#ifdef EL2HLT
+ case EL2HLT: return "el2hlt";
+#endif
+#ifdef EL2NSYNC
+ case EL2NSYNC: return "el2nsync";
+#endif
+#ifdef EL3HLT
+ case EL3HLT: return "el3hlt";
+#endif
+#ifdef EL3RST
+ case EL3RST: return "el3rst";
+#endif
+#ifdef ELIBACC
+ case ELIBACC: return "elibacc";
+#endif
+#ifdef ELIBBAD
+ case ELIBBAD: return "elibbad";
+#endif
+#ifdef ELIBEXEC
+ case ELIBEXEC: return "elibexec";
+#endif
+#ifdef ELIBMAX
+ case ELIBMAX: return "elibmax";
+#endif
+#ifdef ELIBSCN
+ case ELIBSCN: return "elibscn";
+#endif
+#ifdef ELNRNG
+ case ELNRNG: return "elnrng";
+#endif
+#if defined(ELOOP) && (!defined(ENOENT) || (ELOOP != ENOENT))
+ case ELOOP: return "eloop";
+#endif
+#ifdef EMFILE
+ case EMFILE: return "emfile";
+#endif
+#ifdef EMLINK
+ case EMLINK: return "emlink";
+#endif
+#ifdef EMSGSIZE
+ case EMSGSIZE: return "emsgsize";
+#endif
+#ifdef EMULTIHOP
+ case EMULTIHOP: return "emultihop";
+#endif
+#ifdef ENAMETOOLONG
+ case ENAMETOOLONG: return "enametoolong";
+#endif
+#ifdef ENAVAIL
+ case ENAVAIL: return "enavail";
+#endif
+#ifdef ENET
+ case ENET: return "enet";
+#endif
+#ifdef ENETDOWN
+ case ENETDOWN: return "enetdown";
+#endif
+#ifdef ENETRESET
+ case ENETRESET: return "enetreset";
+#endif
+#ifdef ENETUNREACH
+ case ENETUNREACH: return "enetunreach";
+#endif
+#ifdef ENFILE
+ case ENFILE: return "enfile";
+#endif
+#ifdef ENOANO
+ case ENOANO: return "enoano";
+#endif
+#if defined(ENOBUFS) && (!defined(ENOSR) || (ENOBUFS != ENOSR))
+ case ENOBUFS: return "enobufs";
+#endif
+#ifdef ENOCSI
+ case ENOCSI: return "enocsi";
+#endif
+#if defined(ENODATA) && (!defined(ECONNREFUSED) || (ENODATA != ECONNREFUSED))
+ case ENODATA: return "enodata";
+#endif
+#ifdef ENODEV
+ case ENODEV: return "enodev";
+#endif
+#ifdef ENOENT
+ case ENOENT: return "enoent";
+#endif
+#ifdef ENOEXEC
+ case ENOEXEC: return "enoexec";
+#endif
+#ifdef ENOLCK
+ case ENOLCK: return "enolck";
+#endif
+#ifdef ENOLINK
+ case ENOLINK: return "enolink";
+#endif
+#ifdef ENOMEM
+ case ENOMEM: return "enomem";
+#endif
+#ifdef ENOMSG
+ case ENOMSG: return "enomsg";
+#endif
+#ifdef ENONET
+ case ENONET: return "enonet";
+#endif
+#ifdef ENOPKG
+ case ENOPKG: return "enopkg";
+#endif
+#ifdef ENOPROTOOPT
+ case ENOPROTOOPT: return "enoprotoopt";
+#endif
+#ifdef ENOSPC
+ case ENOSPC: return "enospc";
+#endif
+#if defined(ENOSR) && (!defined(ENAMETOOLONG) || (ENAMETOOLONG != ENOSR))
+ case ENOSR: return "enosr";
+#endif
+#if defined(ENOSTR) && (!defined(ENOTTY) || (ENOTTY != ENOSTR))
+ case ENOSTR: return "enostr";
+#endif
+#ifdef ENOSYM
+ case ENOSYM: return "enosym";
+#endif
+#ifdef ENOSYS
+ case ENOSYS: return "enosys";
+#endif
+#ifdef ENOTBLK
+ case ENOTBLK: return "enotblk";
+#endif
+#ifdef ENOTCONN
+ case ENOTCONN: return "enotconn";
+#endif
+#ifdef ENOTDIR
+ case ENOTDIR: return "enotdir";
+#endif
+#if defined(ENOTEMPTY) && (!defined(EEXIST) || (ENOTEMPTY != EEXIST))
+ case ENOTEMPTY: return "enotempty";
+#endif
+#ifdef ENOTNAM
+ case ENOTNAM: return "enotnam";
+#endif
+#ifdef ENOTSOCK
+ case ENOTSOCK: return "enotsock";
+#endif
+#ifdef ENOTSUP
+ case ENOTSUP: return "enotsup";
+#endif
+#ifdef ENOTTY
+ case ENOTTY: return "enotty";
+#endif
+#ifdef ENOTUNIQ
+ case ENOTUNIQ: return "enotuniq";
+#endif
+#ifdef ENXIO
+ case ENXIO: return "enxio";
+#endif
+#if defined(EOPNOTSUPP) && (!defined(ENOTSUP) || (EOPNOTSUPP != ENOTSUP))
+ case EOPNOTSUPP: return "eopnotsupp";
+#endif
+#ifdef EPERM
+ case EPERM: return "eperm";
+#endif
+#if defined(EPFNOSUPPORT) && (!defined(ENOLCK) || (ENOLCK != EPFNOSUPPORT))
+ case EPFNOSUPPORT: return "epfnosupport";
+#endif
+#ifdef EPIPE
+ case EPIPE: return "epipe";
+#endif
+#ifdef EPROCLIM
+ case EPROCLIM: return "eproclim";
+#endif
+#ifdef EPROCUNAVAIL
+ case EPROCUNAVAIL: return "eprocunavail";
+#endif
+#ifdef EPROGMISMATCH
+ case EPROGMISMATCH: return "eprogmismatch";
+#endif
+#ifdef EPROGUNAVAIL
+ case EPROGUNAVAIL: return "eprogunavail";
+#endif
+#ifdef EPROTO
+ case EPROTO: return "eproto";
+#endif
+#ifdef EPROTONOSUPPORT
+ case EPROTONOSUPPORT: return "eprotonosupport";
+#endif
+#ifdef EPROTOTYPE
+ case EPROTOTYPE: return "eprototype";
+#endif
+#ifdef ERANGE
+ case ERANGE: return "erange";
+#endif
+#if defined(EREFUSED) && (!defined(ECONNREFUSED) || (EREFUSED != ECONNREFUSED))
+ case EREFUSED: return "erefused";
+#endif
+#ifdef EREMCHG
+ case EREMCHG: return "eremchg";
+#endif
+#ifdef EREMDEV
+ case EREMDEV: return "eremdev";
+#endif
+#ifdef EREMOTE
+ case EREMOTE: return "eremote";
+#endif
+#ifdef EREMOTEIO
+ case EREMOTEIO: return "eremoteio";
+#endif
+#ifdef EREMOTERELEASE
+ case EREMOTERELEASE: return "eremoterelease";
+#endif
+#ifdef EROFS
+ case EROFS: return "erofs";
+#endif
+#ifdef ERPCMISMATCH
+ case ERPCMISMATCH: return "erpcmismatch";
+#endif
+#ifdef ERREMOTE
+ case ERREMOTE: return "erremote";
+#endif
+#ifdef ESHUTDOWN
+ case ESHUTDOWN: return "eshutdown";
+#endif
+#ifdef ESOCKTNOSUPPORT
+ case ESOCKTNOSUPPORT: return "esocktnosupport";
+#endif
+#ifdef ESPIPE
+ case ESPIPE: return "espipe";
+#endif
+#ifdef ESRCH
+ case ESRCH: return "esrch";
+#endif
+#ifdef ESRMNT
+ case ESRMNT: return "esrmnt";
+#endif
+#ifdef ESTALE
+ case ESTALE: return "estale";
+#endif
+#ifdef ESUCCESS
+ case ESUCCESS: return "esuccess";
+#endif
+#if defined(ETIME) && (!defined(ELOOP) || (ETIME != ELOOP))
+ case ETIME: return "etime";
+#endif
+#if defined(ETIMEDOUT) && (!defined(ENOSTR) || (ETIMEDOUT != ENOSTR)) && (!defined(EAGAIN) || (ETIMEDOUT != EAGAIN)) && (!defined(WSAETIMEDOUT) || (ETIMEDOUT != WSAETIMEDOUT))
+ case ETIMEDOUT: return "etimedout";
+#endif
+#ifdef ETOOMANYREFS
+ case ETOOMANYREFS: return "etoomanyrefs";
+#endif
+#ifdef ETXTBSY
+ case ETXTBSY: return "etxtbsy";
+#endif
+#ifdef EUCLEAN
+ case EUCLEAN: return "euclean";
+#endif
+#ifdef EUNATCH
+ case EUNATCH: return "eunatch";
+#endif
+#ifdef EUSERS
+ case EUSERS: return "eusers";
+#endif
+#ifdef EVERSION
+ case EVERSION: return "eversion";
+#endif
+#if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN)) && (!defined(WSAEWOULDBLOCK) || (EWOULDBLOCK != WSAEWOULDBLOCK))
+ case EWOULDBLOCK: return "ewouldblock";
+#endif
+#ifdef EXDEV
+ case EXDEV: return "exdev";
+#endif
+#ifdef EXFULL
+ case EXFULL: return "exfull";
+#endif
+#ifdef WSAEINTR
+ case WSAEINTR: return "eintr";
+#endif
+#ifdef WSAEBADF
+ case WSAEBADF: return "ebadf";
+#endif
+#ifdef WSAEACCES
+ case WSAEACCES: return "eacces";
+#endif
+#ifdef WSAEFAULT
+ case WSAEFAULT: return "efault";
+#endif
+#ifdef WSAEINVAL
+ case WSAEINVAL: return "einval";
+#endif
+#ifdef WSAEMFILE
+ case WSAEMFILE: return "emfile";
+#endif
+#ifdef WSAEWOULDBLOCK
+ case WSAEWOULDBLOCK: return "ewouldblock";
+#endif
+#ifdef WSAEINPROGRESS
+ case WSAEINPROGRESS: return "einprogress";
+#endif
+#ifdef WSAEALREADY
+ case WSAEALREADY: return "ealready";
+#endif
+#ifdef WSAENOTSOCK
+ case WSAENOTSOCK: return "enotsock";
+#endif
+#ifdef WSAEDESTADDRREQ
+ case WSAEDESTADDRREQ: return "edestaddrreq";
+#endif
+#ifdef WSAEMSGSIZE
+ case WSAEMSGSIZE: return "emsgsize";
+#endif
+#ifdef WSAEPROTOTYPE
+ case WSAEPROTOTYPE: return "eprototype";
+#endif
+#ifdef WSAENOPROTOOPT
+ case WSAENOPROTOOPT: return "enoprotoopt";
+#endif
+#ifdef WSAEPROTONOSUPPORT
+ case WSAEPROTONOSUPPORT: return "eprotonosupport";
+#endif
+#ifdef WSAESOCKTNOSUPPORT
+ case WSAESOCKTNOSUPPORT: return "esocktnosupport";
+#endif
+#ifdef WSAEOPNOTSUPP
+ case WSAEOPNOTSUPP: return "eopnotsupp";
+#endif
+#ifdef WSAEPFNOSUPPORT
+ case WSAEPFNOSUPPORT: return "epfnosupport";
+#endif
+#ifdef WSAEAFNOSUPPORT
+ case WSAEAFNOSUPPORT: return "eafnosupport";
+#endif
+#ifdef WSAEADDRINUSE
+ case WSAEADDRINUSE: return "eaddrinuse";
+#endif
+#ifdef WSAEADDRNOTAVAIL
+ case WSAEADDRNOTAVAIL: return "eaddrnotavail";
+#endif
+#ifdef WSAENETDOWN
+ case WSAENETDOWN: return "enetdown";
+#endif
+#ifdef WSAENETUNREACH
+ case WSAENETUNREACH: return "enetunreach";
+#endif
+#ifdef WSAENETRESET
+ case WSAENETRESET: return "enetreset";
+#endif
+#ifdef WSAECONNABORTED
+ case WSAECONNABORTED: return "econnaborted";
+#endif
+#ifdef WSAECONNRESET
+ case WSAECONNRESET: return "econnreset";
+#endif
+#ifdef WSAENOBUFS
+ case WSAENOBUFS: return "enobufs";
+#endif
+#ifdef WSAEISCONN
+ case WSAEISCONN: return "eisconn";
+#endif
+#ifdef WSAENOTCONN
+ case WSAENOTCONN: return "enotconn";
+#endif
+#ifdef WSAESHUTDOWN
+ case WSAESHUTDOWN: return "eshutdown";
+#endif
+#ifdef WSAETOOMANYREFS
+ case WSAETOOMANYREFS: return "etoomanyrefs";
+#endif
+#ifdef WSAETIMEDOUT
+ case WSAETIMEDOUT: return "etimedout";
+#endif
+#ifdef WSAECONNREFUSED
+ case WSAECONNREFUSED: return "econnrefused";
+#endif
+#ifdef WSAELOOP
+ case WSAELOOP: return "eloop";
+#endif
+#ifdef WSAENAMETOOLONG
+ case WSAENAMETOOLONG: return "enametoolong";
+#endif
+#ifdef WSAEHOSTDOWN
+ case WSAEHOSTDOWN: return "ehostdown";
+#endif
+#ifdef WSAEHOSTUNREACH
+ case WSAEHOSTUNREACH: return "ehostunreach";
+#endif
+#ifdef WSAENOTEMPTY
+ case WSAENOTEMPTY: return "enotempty";
+#endif
+#ifdef WSAEPROCLIM
+ case WSAEPROCLIM: return "eproclim";
+#endif
+#ifdef WSAEUSERS
+ case WSAEUSERS: return "eusers";
+#endif
+#ifdef WSAEDQUOT
+ case WSAEDQUOT: return "edquot";
+#endif
+#ifdef WSAESTALE
+ case WSAESTALE: return "estale";
+#endif
+#ifdef WSAEREMOTE
+ case WSAEREMOTE: return "eremote";
+#endif
+#ifdef WSASYSNOTREADY
+ case WSASYSNOTREADY: return "sysnotready";
+#endif
+#ifdef WSAVERNOTSUPPORTED
+ case WSAVERNOTSUPPORTED: return "vernotsupported";
+#endif
+#ifdef WSANOTINITIALISED
+ case WSANOTINITIALISED: return "notinitialised";
+#endif
+#ifdef WSAEDISCON
+ case WSAEDISCON: return "ediscon";
+#endif
+#ifdef WSAENOMORE
+ case WSAENOMORE: return "enomore";
+#endif
+#ifdef WSAECANCELLED
+ case WSAECANCELLED: return "ecancelled";
+#endif
+#ifdef WSAEINVALIDPROCTABLE
+ case WSAEINVALIDPROCTABLE: return "einvalidproctable";
+#endif
+#ifdef WSAEINVALIDPROVIDER
+ case WSAEINVALIDPROVIDER: return "einvalidprovider";
+#endif
+#ifdef WSAEPROVIDERFAILEDINIT
+ case WSAEPROVIDERFAILEDINIT: return "eproviderfailedinit";
+#endif
+#ifdef WSASYSCALLFAILURE
+ case WSASYSCALLFAILURE: return "syscallfailure";
+#endif
+#ifdef WSASERVICE_NOT_FOUND
+ case WSASERVICE_NOT_FOUND: return "service_not_found";
+#endif
+#ifdef WSATYPE_NOT_FOUND
+ case WSATYPE_NOT_FOUND: return "type_not_found";
+#endif
+#ifdef WSA_E_NO_MORE
+ case WSA_E_NO_MORE: return "e_no_more";
+#endif
+#ifdef WSA_E_CANCELLED
+ case WSA_E_CANCELLED: return "e_cancelled";
+#endif
+ }
+ return "unknown";
+}
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
new file mode 100644
index 0000000000..7fe3f3bca5
--- /dev/null
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -0,0 +1,458 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_printf_term.h"
+#include "sys.h"
+#include "big.h"
+
+#define PRINT_CHAR(CNT, FN, ARG, C) \
+do { \
+ int res__ = erts_printf_char((FN), (ARG), (C)); \
+ if (res__ < 0) \
+ return res__; \
+ (CNT) += res__; \
+} while (0)
+
+#define PRINT_STRING(CNT, FN, ARG, STR) \
+do { \
+ int res__ = erts_printf_string((FN), (ARG), (STR)); \
+ if (res__ < 0) \
+ return res__; \
+ (CNT) += res__; \
+} while (0)
+
+#define PRINT_BUF(CNT, FN, ARG, BUF, LEN) \
+do { \
+ int res__ = erts_printf_buf((FN), (ARG), (char*)(BUF), (LEN)); \
+ if (res__ < 0) \
+ return res__; \
+ (CNT) += res__; \
+} while (0)
+
+#define PRINT_POINTER(CNT, FN, ARG, PTR) \
+do { \
+ int res__ = erts_printf_pointer((FN), (ARG), (void *) (PTR)); \
+ if (res__ < 0) \
+ return res__; \
+ (CNT) += res__; \
+} while (0)
+
+#define PRINT_ULONG(CNT, FN, ARG, C, P, W, I) \
+do { \
+ int res__ = erts_printf_ulong((FN), (ARG), (C), (P), (W), (I)); \
+ if (res__ < 0) \
+ return res__; \
+ (CNT) += res__; \
+} while (0)
+
+#define PRINT_SLONG(CNT, FN, ARG, C, P, W, I) \
+do { \
+ int res__ = erts_printf_slong((FN), (ARG), (C), (P), (W), (I)); \
+ if (res__ < 0) \
+ return res__; \
+ (CNT) += res__; \
+} while (0)
+
+#define PRINT_DOUBLE(CNT, FN, ARG, C, P, W, I) \
+do { \
+ int res__ = erts_printf_double((FN), (ARG), (C), (P), (W), (I)); \
+ if (res__ < 0) \
+ return res__; \
+ (CNT) += res__; \
+} while (0)
+
+/* CTYPE macros */
+
+#define LATIN1
+
+#define IS_DIGIT(c) ((c) >= '0' && (c) <= '9')
+#ifdef LATIN1
+#define IS_LOWER(c) (((c) >= 'a' && (c) <= 'z') \
+ || ((c) >= 128+95 && (c) <= 255 && (c) != 247))
+#define IS_UPPER(c) (((c) >= 'A' && (c) <= 'Z') \
+ || ((c) >= 128+64 && (c) <= 128+94 && (c) != 247-32))
+#else
+#define IS_LOWER(c) ((c) >= 'a' && (c) <= 'z')
+#define IS_UPPER(c) ((c) >= 'A' && (c) <= 'Z')
+#endif
+
+#define IS_ALNUM(c) (IS_DIGIT(c) || IS_LOWER(c) || IS_UPPER(c))
+
+/* We don't include 160 (non-breaking space). */
+#define IS_SPACE(c) (c == ' ' || c == '\n' || c == '\t' || c == '\r')
+
+#ifdef LATIN1
+#define IS_CNTRL(c) ((c) < ' ' || (c) == 127 \
+ || ((c) >= 128 && (c) < 128+32))
+#else
+/* Treat all non-ASCII as control characters */
+#define IS_CNTRL(c) ((c) < ' ' || (c) >= 127)
+#endif
+
+#define IS_PRINT(c) (!IS_CNTRL(c))
+
+/* return 0 if list is not a non-empty flat list of printable characters */
+
+static int
+is_printable_string(Eterm list)
+{
+ int len = 0;
+ int c;
+
+ while(is_list(list)) {
+ Eterm* consp = list_val(list);
+ Eterm hd = CAR(consp);
+
+ if (!is_byte(hd))
+ return 0;
+ c = signed_val(hd);
+ /* IS_PRINT || IS_SPACE would be another way to put it */
+ if (IS_CNTRL(c) && !IS_SPACE(c))
+ return 0;
+ len++;
+ list = CDR(consp);
+ }
+ if (is_nil(list))
+ return len;
+ return 0;
+}
+
+/* print a atom doing what quoting is necessary */
+static int print_atom_name(fmtfn_t fn, void* arg, Eterm atom, long *dcount)
+{
+ int n, i;
+ int res;
+ int need_quote;
+ int pos;
+ byte *s;
+ byte *cpos;
+ int c;
+
+ res = 0;
+ i = atom_val(atom);
+
+ if ((i < 0) || (i >= atom_table_size()) || (atom_tab(i) == NULL)) {
+ PRINT_STRING(res, fn, arg, "<bad atom index: ");
+ PRINT_SLONG(res, fn, arg, 'd', 0, 1, (signed long) i);
+ PRINT_CHAR(res, fn, arg, '>');
+ return res;
+ }
+
+ s = atom_tab(i)->name;
+ n = atom_tab(i)->len;
+
+ *dcount -= atom_tab(i)->len;
+
+ if (n == 0) {
+ PRINT_STRING(res, fn, arg, "''");
+ return res;
+ }
+
+
+ need_quote = 0;
+ cpos = s;
+ pos = n - 1;
+
+ c = *cpos++;
+ if (!IS_LOWER(c))
+ need_quote++;
+ else {
+ while (pos--) {
+ c = *cpos++;
+ if (!IS_ALNUM(c) && (c != '_')) {
+ need_quote++;
+ break;
+ }
+ }
+ }
+ cpos = s;
+ pos = n;
+ if (need_quote)
+ PRINT_CHAR(res, fn, arg, '\'');
+ while(pos--) {
+ c = *cpos++;
+ switch(c) {
+ case '\'': PRINT_STRING(res, fn, arg, "\\'"); break;
+ case '\\': PRINT_STRING(res, fn, arg, "\\\\"); break;
+ case '\n': PRINT_STRING(res, fn, arg, "\\n"); break;
+ case '\f': PRINT_STRING(res, fn, arg, "\\f"); break;
+ case '\t': PRINT_STRING(res, fn, arg, "\\t"); break;
+ case '\r': PRINT_STRING(res, fn, arg, "\\r"); break;
+ case '\b': PRINT_STRING(res, fn, arg, "\\b"); break;
+ case '\v': PRINT_STRING(res, fn, arg, "\\v"); break;
+ default:
+ if (IS_CNTRL(c)) {
+ PRINT_CHAR(res, fn, arg, '\\');
+ PRINT_ULONG(res, fn, arg, 'o', 1, 3, (unsigned long) c);
+ }
+ else
+ PRINT_CHAR(res, fn, arg, (char) c);
+ break;
+ }
+ }
+ if (need_quote)
+ PRINT_CHAR(res, fn, arg, '\'');
+ return res;
+}
+
+
+
+static int
+print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount)
+{
+ int res;
+ int i;
+ Uint32 *ref_num;
+ Eterm* nobj;
+
+ res = 0;
+
+ if ((*dcount)-- <= 0)
+ return res;
+
+#ifdef HYBRID___NOT_ACTIVE
+ /* Color coded output based on memory location */
+ if(ptr_val(obj) >= global_heap && ptr_val(obj) < global_hend)
+ PRINT_STRING(res, fn, arg, "\033[32m");
+#ifdef INCREMENTAL
+ else if(ptr_val(obj) >= inc_fromspc && ptr_val(obj) < inc_fromend)
+ PRINT_STRING(res, fn, arg, "\033[33m");
+#endif
+ else if(IS_CONST(obj))
+ PRINT_STRING(res, fn, arg, "\033[34m");
+ else
+ PRINT_STRING(res, fn, arg, "\033[31m");
+#endif
+
+ if (is_CP(obj)) {
+ PRINT_STRING(res, fn, arg, "<cp/header:");
+ PRINT_POINTER(res, fn, arg, obj);
+ PRINT_CHAR(res, fn, arg, '>');
+ return res;
+ }
+
+ switch (tag_val_def(obj)) {
+ case NIL_DEF:
+ PRINT_STRING(res, fn, arg, "[]");
+ break;
+ case ATOM_DEF: {
+ int tres = print_atom_name(fn, arg, obj, dcount);
+ if (tres < 0)
+ return tres;
+ res += tres;
+ if (*dcount <= 0)
+ return res;
+ break;
+ }
+ case SMALL_DEF:
+ PRINT_SLONG(res, fn, arg, 'd', 0, 1, (signed long) signed_val(obj));
+ break;
+ case BIG_DEF: {
+ int print_res;
+ char def_buf[64];
+ char *buf, *big_str;
+ Uint sz = (Uint) big_decimal_estimate(obj);
+ sz++;
+ if (sz <= 64)
+ buf = &def_buf[0];
+ else
+ buf = erts_alloc(ERTS_ALC_T_TMP, sz);
+ big_str = erts_big_to_string(obj, buf, sz);
+ print_res = erts_printf_string(fn, arg, big_str);
+ if (buf != &def_buf[0])
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ if (print_res < 0)
+ return print_res;
+ res += print_res;
+ break;
+ }
+ case REF_DEF:
+ case EXTERNAL_REF_DEF:
+ PRINT_STRING(res, fn, arg, "#Ref<");
+ PRINT_ULONG(res, fn, arg, 'u', 0, 1,
+ (unsigned long) ref_channel_no(obj));
+ ref_num = ref_numbers(obj);
+ for (i = ref_no_of_numbers(obj)-1; i >= 0; i--) {
+ PRINT_CHAR(res, fn, arg, '.');
+ PRINT_ULONG(res, fn, arg, 'u', 0, 1, (unsigned long) ref_num[i]);
+ }
+ PRINT_CHAR(res, fn, arg, '>');
+ break;
+ case PID_DEF:
+ case EXTERNAL_PID_DEF:
+ PRINT_CHAR(res, fn, arg, '<');
+ PRINT_ULONG(res, fn, arg, 'u', 0, 1,
+ (unsigned long) pid_channel_no(obj));
+ PRINT_CHAR(res, fn, arg, '.');
+ PRINT_ULONG(res, fn, arg, 'u', 0, 1,
+ (unsigned long) pid_number(obj));
+ PRINT_CHAR(res, fn, arg, '.');
+ PRINT_ULONG(res, fn, arg, 'u', 0, 1,
+ (unsigned long) pid_serial(obj));
+ PRINT_CHAR(res, fn, arg, '>');
+ break;
+ case PORT_DEF:
+ case EXTERNAL_PORT_DEF:
+ PRINT_STRING(res, fn, arg, "#Port<");
+ PRINT_ULONG(res, fn, arg, 'u', 0, 1,
+ (unsigned long) port_channel_no(obj));
+ PRINT_CHAR(res, fn, arg, '.');
+ PRINT_ULONG(res, fn, arg, 'u', 0, 1,
+ (unsigned long) port_number(obj));
+ PRINT_CHAR(res, fn, arg, '>');
+ break;
+ case LIST_DEF:
+ if (is_printable_string(obj)) {
+ int c;
+ PRINT_CHAR(res, fn, arg, '"');
+ nobj = list_val(obj);
+ while (1) {
+ if ((*dcount)-- <= 0)
+ return res;
+ c = signed_val(*nobj++);
+ if (c == '\n')
+ PRINT_STRING(res, fn, arg, "\\n");
+ else {
+ if (c == '"')
+ PRINT_CHAR(res, fn, arg, '\\');
+ PRINT_CHAR(res, fn, arg, (char) c);
+ }
+ if (is_not_list(*nobj))
+ break;
+ nobj = list_val(*nobj);
+ }
+ PRINT_CHAR(res, fn, arg, '"');
+ } else {
+ PRINT_CHAR(res, fn, arg, '[');
+ nobj = list_val(obj);
+ while (1) {
+ int tres = print_term(fn, arg, *nobj++, dcount);
+ if (tres < 0)
+ return tres;
+ res += tres;
+ if (*dcount <= 0)
+ return res;
+ if (is_not_list(*nobj))
+ break;
+ PRINT_CHAR(res, fn, arg, ',');
+ nobj = list_val(*nobj);
+ }
+ if (is_not_nil(*nobj)) {
+ int tres;
+ PRINT_CHAR(res, fn, arg, '|');
+ tres = print_term(fn, arg, *nobj, dcount);
+ if (tres < 0)
+ return tres;
+ res += tres;
+ if (*dcount <= 0)
+ return res;
+ }
+ PRINT_CHAR(res, fn, arg, ']');
+ }
+ break;
+ case TUPLE_DEF:
+ nobj = tuple_val(obj); /* pointer to arity */
+ i = arityval(*nobj); /* arity */
+ PRINT_CHAR(res, fn, arg, '{');
+ while (i--) {
+ int tres = print_term(fn, arg, *++nobj, dcount);
+ if (tres < 0)
+ return tres;
+ res += tres;
+ if (*dcount <= 0)
+ return res;
+ if (i >= 1)
+ PRINT_CHAR(res, fn, arg, ',');
+ }
+ PRINT_CHAR(res, fn, arg, '}');
+ break;
+ case FLOAT_DEF: {
+ FloatDef ff;
+ GET_DOUBLE(obj, ff);
+ PRINT_DOUBLE(res, fn, arg, 'e', 6, 0, ff.fd);
+ }
+ break;
+ case BINARY_DEF:
+ {
+ ProcBin* pb = (ProcBin *) binary_val(obj);
+ if (pb->size == 1)
+ PRINT_STRING(res, fn, arg, "<<1 byte>>");
+ else {
+ PRINT_STRING(res, fn, arg, "<<");
+ PRINT_ULONG(res, fn, arg, 'u', 0, 1, (unsigned long) pb->size);
+ PRINT_STRING(res, fn, arg, " bytes>>");
+ }
+ }
+ break;
+ case EXPORT_DEF:
+ {
+ Export* ep = (Export *) (export_val(obj))[1];
+ Atom* module = atom_tab(atom_val(ep->code[0]));
+ Atom* name = atom_tab(atom_val(ep->code[1]));
+
+ PRINT_STRING(res, fn, arg, "#Fun<");
+ PRINT_BUF(res, fn, arg, module->name, module->len);
+ PRINT_CHAR(res, fn, arg, '.');
+ PRINT_BUF(res, fn, arg, name->name, name->len);
+ PRINT_CHAR(res, fn, arg, '.');
+ PRINT_SLONG(res, fn, arg, 'd', 0, 1,
+ (signed long) ep->code[2]);
+ PRINT_CHAR(res, fn, arg, '>');
+ }
+ break;
+ case FUN_DEF:
+ {
+ ErlFunThing *funp = (ErlFunThing *) fun_val(obj);
+ Atom *ap = atom_tab(atom_val(funp->fe->module));
+
+ PRINT_STRING(res, fn, arg, "#Fun<");
+ PRINT_BUF(res, fn, arg, ap->name, ap->len);
+ PRINT_CHAR(res, fn, arg, '.');
+ PRINT_SLONG(res, fn, arg, 'd', 0, 1,
+ (signed long) funp->fe->old_index);
+ PRINT_CHAR(res, fn, arg, '.');
+ PRINT_SLONG(res, fn, arg, 'd', 0, 1,
+ (signed long) funp->fe->old_uniq);
+ PRINT_CHAR(res, fn, arg, '>');
+ }
+ break;
+ default:
+ PRINT_STRING(res, fn, arg, "<unknown:");
+ PRINT_POINTER(res, fn, arg, obj);
+ PRINT_CHAR(res, fn, arg, '>');
+ break;
+ }
+
+ return res;
+}
+
+int
+erts_printf_term(fmtfn_t fn, void* arg, unsigned long term, long precision)
+{
+ int res = print_term(fn, arg, (Uint) term, &precision);
+ if (res < 0)
+ return res;
+ if (precision <= 0)
+ PRINT_STRING(res, fn, arg, "... ");
+ return res;
+}
diff --git a/erts/emulator/beam/erl_printf_term.h b/erts/emulator/beam/erl_printf_term.h
new file mode 100644
index 0000000000..4f76028396
--- /dev/null
+++ b/erts/emulator/beam/erl_printf_term.h
@@ -0,0 +1,26 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_PRINTF_TERM_H__
+#define ERL_PRINTF_TERM_H__
+
+#include "erl_printf_format.h"
+int erts_printf_term(fmtfn_t fn, void* arg, unsigned long term, long precision);
+
+#endif
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
new file mode 100644
index 0000000000..9960172366
--- /dev/null
+++ b/erts/emulator/beam/erl_process.c
@@ -0,0 +1,9469 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#define ERL_PROCESS_C__
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stddef.h> /* offsetof() */
+#include <ctype.h>
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "erl_nmgc.h"
+#include "error.h"
+#include "bif.h"
+#include "erl_db.h"
+#include "dist.h"
+#include "beam_catches.h"
+#include "erl_instrument.h"
+#include "erl_threads.h"
+#include "erl_binary.h"
+
+#define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
+#define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
+ (ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED/2)
+
+#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
+
+#define ERTS_SCHED_SLEEP_SPINCOUNT 10000
+
+#define ERTS_WAKEUP_OTHER_LIMIT (100*CONTEXT_REDS/2)
+#define ERTS_WAKEUP_OTHER_DEC 10
+#define ERTS_WAKEUP_OTHER_FIXED_INC (CONTEXT_REDS/10)
+
+#define ERTS_MAX_CPU_TOPOLOGY_ID ((int) 0xffff)
+
+#if 0 || defined(DEBUG)
+#define ERTS_FAKE_SCHED_BIND_PRINT_SORTED_CPU_DATA
+#endif
+
+#if defined(DEBUG) && 0
+#define HARDDEBUG
+#else
+#undef HARDDEBUG
+#endif
+
+#ifdef HARDDEBUG
+#define HARDDEBUG_RUNQS
+#endif
+
+#ifdef HIPE
+#include "hipe_mode_switch.h" /* for hipe_init_process() */
+#include "hipe_signal.h" /* for hipe_thread_signal_init() */
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "erl_lock_count.h"
+#endif
+
+#define MAX_BIT (1 << PRIORITY_MAX)
+#define HIGH_BIT (1 << PRIORITY_HIGH)
+#define NORMAL_BIT (1 << PRIORITY_NORMAL)
+#define LOW_BIT (1 << PRIORITY_LOW)
+
+#define ERTS_MAYBE_SAVE_TERMINATING_PROCESS(P) \
+do { \
+ ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx)); \
+ if (saved_term_procs.end) \
+ save_terminating_process((P)); \
+} while (0)
+
+#define ERTS_EMPTY_RUNQ(RQ) \
+ ((RQ)->len == 0 && (RQ)->misc.start == NULL)
+
+extern Eterm beam_apply[];
+extern Eterm beam_exit[];
+extern Eterm beam_continue_exit[];
+
+static Sint p_last;
+static Sint p_next;
+static Sint p_serial;
+static Uint p_serial_mask;
+static Uint p_serial_shift;
+
+Uint erts_no_schedulers;
+Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
+Uint erts_process_tab_index_mask;
+
+int erts_sched_thread_suggested_stack_size = -1;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
+#endif
+
+#ifdef ERTS_SMP
+
+int erts_disable_proc_not_running_opt;
+
+#define ERTS_SCHED_CHANGING_ONLINE 1
+#define ERTS_SCHED_CHANGING_MULTI_SCHED 2
+
+static struct {
+ erts_smp_mtx_t mtx;
+ erts_smp_cnd_t cnd;
+ int changing;
+ int online;
+ int curr_online;
+ int wait_curr_online;
+ erts_smp_atomic_t active;
+ struct {
+ erts_smp_atomic_t ongoing;
+ long wait_active;
+ ErtsProcList *procs;
+ } msb; /* Multi Scheduling Block */
+} schdlr_sspnd;
+
+static struct {
+ erts_smp_mtx_t update_mtx;
+ erts_smp_atomic_t active_runqs;
+ int last_active_runqs;
+ erts_smp_atomic_t used_runqs;
+ int forced_check_balance;
+ erts_smp_atomic_t checking_balance;
+ int halftime;
+ int full_reds_history_index;
+ struct {
+ int active_runqs;
+ int reds;
+ int max_len;
+ } prev_rise;
+ Uint n;
+} balance_info;
+
+#define ERTS_BLNCE_SAVE_RISE(ACTIVE, MAX_LEN, REDS) \
+do { \
+ balance_info.prev_rise.active_runqs = (ACTIVE); \
+ balance_info.prev_rise.max_len = (MAX_LEN); \
+ balance_info.prev_rise.reds = (REDS); \
+} while (0)
+
+#endif
+
+/*
+ * Cpu topology hierarchy.
+ */
+#define ERTS_TOPOLOGY_NODE 0
+#define ERTS_TOPOLOGY_PROCESSOR 1
+#define ERTS_TOPOLOGY_PROCESSOR_NODE 2
+#define ERTS_TOPOLOGY_CORE 3
+#define ERTS_TOPOLOGY_THREAD 4
+#define ERTS_TOPOLOGY_LOGICAL 5
+
+#define ERTS_TOPOLOGY_MAX_DEPTH 6
+
+typedef struct {
+ int bind_id;
+ int bound_id;
+} ErtsCpuBindData;
+
+static ErtsCpuBindData *scheduler2cpu_map;
+erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
+
+typedef enum {
+ ERTS_CPU_BIND_SPREAD,
+ ERTS_CPU_BIND_PROCESSOR_SPREAD,
+ ERTS_CPU_BIND_THREAD_SPREAD,
+ ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD,
+ ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD,
+ ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD,
+ ERTS_CPU_BIND_NO_SPREAD,
+ ERTS_CPU_BIND_NONE
+} ErtsCpuBindOrder;
+
+ErtsCpuBindOrder cpu_bind_order;
+
+static erts_cpu_topology_t *user_cpudata;
+static int user_cpudata_size;
+static erts_cpu_topology_t *system_cpudata;
+static int system_cpudata_size;
+
+erts_sched_stat_t erts_sched_stat;
+
+ErtsRunQueue *erts_common_run_queue;
+
+#ifdef USE_THREADS
+static erts_tsd_key_t sched_data_key;
+#endif
+
+static erts_smp_mtx_t proc_tab_mtx;
+
+static erts_smp_atomic_t function_calls;
+
+#ifdef ERTS_SMP
+static erts_smp_atomic_t doing_sys_schedule;
+static erts_smp_atomic_t no_empty_run_queues;
+#else /* !ERTS_SMP */
+ErtsSchedulerData *erts_scheduler_data;
+#endif
+
+ErtsAlignedRunQueue *erts_aligned_run_queues;
+Uint erts_no_run_queues;
+
+typedef struct {
+ ErtsSchedulerData esd;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsSchedulerData))];
+} ErtsAlignedSchedulerData;
+
+ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
+
+#ifndef BM_COUNTERS
+static int processes_busy;
+#endif
+
+Process** process_tab;
+static Uint last_reductions;
+static Uint last_exact_reductions;
+Uint erts_default_process_flags;
+Eterm erts_system_monitor;
+Eterm erts_system_monitor_msg_queue_len;
+Eterm erts_system_monitor_long_gc;
+Eterm erts_system_monitor_large_heap;
+struct erts_system_monitor_flags_t erts_system_monitor_flags;
+
+/* system performance monitor */
+Eterm erts_system_profile;
+struct erts_system_profile_flags_t erts_system_profile_flags;
+
+#ifdef HYBRID
+Uint erts_num_active_procs;
+Process** erts_active_procs;
+#endif
+
+static erts_smp_atomic_t process_count;
+
+typedef struct ErtsTermProcElement_ ErtsTermProcElement;
+struct ErtsTermProcElement_ {
+ ErtsTermProcElement *next;
+ ErtsTermProcElement *prev;
+ int ix;
+ union {
+ struct {
+ Eterm pid;
+ SysTimeval spawned;
+ SysTimeval exited;
+ } process;
+ struct {
+ SysTimeval time;
+ } bif_invocation;
+ } u;
+};
+
+static struct {
+ ErtsTermProcElement *start;
+ ErtsTermProcElement *end;
+} saved_term_procs;
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_op_list,
+ ErtsMiscOpList,
+ 10,
+ ERTS_ALC_T_MISC_OP_LIST)
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
+ ErtsProcList,
+ 200,
+ ERTS_ALC_T_PROC_LIST)
+
+#define ERTS_RUNQ_IX(IX) (&erts_aligned_run_queues[(IX)].runq)
+#define ERTS_SCHEDULER_IX(IX) (&erts_aligned_scheduler_data[(IX)].esd)
+
+#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
+do { \
+ ErtsRunQueue *RQVAR; \
+ int ix__; \
+ for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \
+ RQVAR = ERTS_RUNQ_IX(ix__); \
+ erts_smp_runq_lock(RQVAR); \
+ { DO; } \
+ erts_smp_runq_unlock(RQVAR); \
+ } \
+} while (0)
+
+#define ERTS_FOREACH_OP_RUNQ(RQVAR, DO) \
+do { \
+ ErtsRunQueue *RQVAR; \
+ int ix__; \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \
+ for (ix__ = 0; ix__ < schdlr_sspnd.online; ix__++) { \
+ RQVAR = ERTS_RUNQ_IX(ix__); \
+ erts_smp_runq_lock(RQVAR); \
+ { DO; } \
+ erts_smp_runq_unlock(RQVAR); \
+ } \
+} while (0)
+
+#define ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, DOX) \
+do { \
+ ErtsRunQueue *RQVAR; \
+ int ix__; \
+ for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \
+ RQVAR = ERTS_RUNQ_IX(ix__); \
+ erts_smp_runq_lock(RQVAR); \
+ { DO; } \
+ } \
+ { DOX; } \
+ for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) \
+ erts_smp_runq_unlock(ERTS_RUNQ_IX(ix__)); \
+} while (0)
+
+#define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \
+ ERTS_ATOMIC_FOREACH_RUNQ_X(RQVAR, DO, )
+/*
+ * Local functions.
+ */
+
+static void init_processes_bif(void);
+static void save_terminating_process(Process *p);
+static void exec_misc_ops(ErtsRunQueue *);
+static void print_function_from_pc(int to, void *to_arg, Eterm* x);
+static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
+ int yreg);
+#ifdef ERTS_SMP
+static void handle_pending_exiters(ErtsProcList *);
+
+static void cpu_bind_order_sort(erts_cpu_topology_t *cpudata,
+ int size,
+ ErtsCpuBindOrder bind_order,
+ int mk_seq);
+static void signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size);
+
+#endif
+
+static void early_cpu_bind_init(void);
+static void late_cpu_bind_init(void);
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int
+erts_smp_lc_runq_is_locked(ErtsRunQueue *runq)
+{
+ return erts_smp_lc_mtx_is_locked(&runq->mtx);
+}
+#endif
+
+void
+erts_pre_init_process(void)
+{
+#ifdef USE_THREADS
+ erts_tsd_key_create(&sched_data_key);
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ int ix;
+
+ erts_psd_required_locks[ERTS_PSD_ERROR_HANDLER].get_locks
+ = ERTS_PSD_ERROR_HANDLER_BUF_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ERROR_HANDLER].set_locks
+ = ERTS_PSD_ERROR_HANDLER_BUF_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_SAVED_CALLS_BUF].get_locks
+ = ERTS_PSD_SAVED_CALLS_BUF_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_SAVED_CALLS_BUF].set_locks
+ = ERTS_PSD_SAVED_CALLS_BUF_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_SCHED_ID].get_locks
+ = ERTS_PSD_SCHED_ID_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_SCHED_ID].set_locks
+ = ERTS_PSD_SCHED_ID_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks
+ = ERTS_PSD_DIST_ENTRY_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
+ = ERTS_PSD_DIST_ENTRY_GET_LOCKS;
+
+ /* Check that we have locks for all entries */
+ for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
+ ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks);
+ ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks);
+ }
+ }
+#endif
+}
+
+/* initialize the scheduler */
+void
+erts_init_process(void)
+{
+ Uint proc_bits = ERTS_PROC_BITS;
+
+#ifdef ERTS_SMP
+ erts_disable_proc_not_running_opt = 0;
+ erts_init_proc_lock();
+#endif
+
+ init_proclist_alloc();
+
+ erts_smp_atomic_init(&process_count, 0);
+
+ if (erts_use_r9_pids_ports) {
+ proc_bits = ERTS_R9_PROC_BITS;
+ ASSERT(erts_max_processes <= (1 << ERTS_R9_PROC_BITS));
+ }
+
+ process_tab = (Process**) erts_alloc(ERTS_ALC_T_PROC_TABLE,
+ erts_max_processes*sizeof(Process*));
+ sys_memzero(process_tab, erts_max_processes * sizeof(Process*));
+#ifdef HYBRID
+ erts_active_procs = (Process**)
+ erts_alloc(ERTS_ALC_T_ACTIVE_PROCS,
+ erts_max_processes * sizeof(Process*));
+ erts_num_active_procs = 0;
+#endif
+
+ erts_smp_mtx_init(&proc_tab_mtx, "proc_tab");
+ p_last = -1;
+ p_next = 0;
+ p_serial = 0;
+
+ p_serial_shift = erts_fit_in_bits(erts_max_processes - 1);
+ p_serial_mask = ((~(~((Uint) 0) << proc_bits)) >> p_serial_shift);
+ erts_process_tab_index_mask = ~(~((Uint) 0) << p_serial_shift);
+#ifndef BM_COUNTERS
+ processes_busy = 0;
+#endif
+ last_reductions = 0;
+ last_exact_reductions = 0;
+ erts_default_process_flags = 0;
+}
+
+void
+erts_late_init_process(void)
+{
+ int ix;
+ init_processes_bif();
+
+ erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
+ for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
+ Eterm atom;
+ char *atom_str;
+ switch (ix) {
+ case PRIORITY_MAX:
+ atom_str = "process_max";
+ break;
+ case PRIORITY_HIGH:
+ atom_str = "process_high";
+ break;
+ case PRIORITY_NORMAL:
+ atom_str = "process_normal";
+ break;
+ case PRIORITY_LOW:
+ atom_str = "process_low";
+ break;
+ case ERTS_PORT_PRIO_LEVEL:
+ atom_str = "port";
+ break;
+ default:
+ atom_str = "bad_prio";
+ ASSERT(!"bad prio");
+ break;
+ }
+ atom = am_atom_put(atom_str, sys_strlen(atom_str));
+ erts_sched_stat.prio[ix].name = atom;
+ erts_sched_stat.prio[ix].total_executed = 0;
+ erts_sched_stat.prio[ix].executed = 0;
+ erts_sched_stat.prio[ix].total_migrated = 0;
+ erts_sched_stat.prio[ix].migrated = 0;
+ }
+
+}
+
+static ERTS_INLINE ErtsProcList *
+proclist_create(Process *p)
+{
+ ErtsProcList *plp = proclist_alloc();
+ plp->pid = p->id;
+ plp->started = p->started;
+ return plp;
+}
+
+static ERTS_INLINE void
+proclist_destroy(ErtsProcList *plp)
+{
+ proclist_free(plp);
+}
+
+static ERTS_INLINE int
+proclist_same(ErtsProcList *plp, Process *p)
+{
+ return (plp->pid == p->id
+ && erts_cmp_timeval(&plp->started, &p->started) == 0);
+}
+
+ErtsProcList *
+erts_proclist_create(Process *p)
+{
+ return proclist_create(p);
+}
+
+void
+erts_proclist_destroy(ErtsProcList *plp)
+{
+ proclist_destroy(plp);
+}
+
+int
+erts_proclist_same(ErtsProcList *plp, Process *p)
+{
+ return proclist_same(plp, p);
+}
+
+void *
+erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
+{
+ void *old;
+ ErtsProcLocks xplocks;
+ int refc = 0;
+ ErtsPSD *psd = erts_alloc(ERTS_ALC_T_PSD, sizeof(ErtsPSD));
+ int i;
+ for (i = 0; i < ERTS_PSD_SIZE; i++)
+ psd->data[i] = NULL;
+
+ ERTS_SMP_LC_ASSERT(plocks);
+ ERTS_SMP_LC_ASSERT(plocks == erts_proc_lc_my_proc_locks(p));
+
+ xplocks = ERTS_PROC_LOCKS_ALL;
+ xplocks &= ~plocks;
+ if (xplocks && erts_smp_proc_trylock(p, xplocks) == EBUSY) {
+ if (xplocks & ERTS_PROC_LOCK_MAIN) {
+ erts_smp_proc_inc_refc(p);
+ erts_smp_proc_unlock(p, plocks);
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL);
+ refc = 1;
+ }
+ else {
+ if (plocks & ERTS_PROC_LOCKS_ALL_MINOR)
+ erts_smp_proc_unlock(p, plocks & ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ if (!p->psd)
+ p->psd = psd;
+ if (xplocks)
+ erts_smp_proc_unlock(p, xplocks);
+ if (refc)
+ erts_smp_proc_dec_refc(p);
+ ASSERT(p->psd);
+ if (p->psd != psd)
+ erts_free(ERTS_ALC_T_PSD, psd);
+ old = p->psd->data[ix];
+ p->psd->data[ix] = data;
+ ERTS_SMP_LC_ASSERT(plocks == erts_proc_lc_my_proc_locks(p));
+ return old;
+}
+
+#ifdef ERTS_SMP
+
+static void
+prepare_for_block(void *vrq)
+{
+ erts_smp_runq_unlock((ErtsRunQueue *) vrq);
+}
+
+static void
+resume_after_block(void *vrq)
+{
+ erts_smp_runq_lock((ErtsRunQueue *) vrq);
+}
+
+#endif
+
+static ERTS_INLINE void
+sched_waiting_sys(Uint no, ErtsRunQueue *rq)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ASSERT(rq->waiting >= 0);
+ rq->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
+ rq->waiting++;
+ rq->waiting *= -1;
+ rq->woken = 0;
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(no), am_inactive);
+}
+
+static ERTS_INLINE void
+sched_active_sys(Uint no, ErtsRunQueue *rq)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ASSERT(rq->waiting < 0);
+ rq->waiting *= -1;
+ rq->waiting--;
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(no), am_active);
+}
+
+Uint
+erts_active_schedulers(void)
+{
+ /* RRRRRRRRR */
+
+ Uint as = erts_no_schedulers;
+
+ ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting));
+
+ ASSERT(as >= 0);
+ return as;
+}
+
+#ifdef ERTS_SMP
+
+static ERTS_INLINE void
+sched_waiting(Uint no, ErtsRunQueue *rq)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ rq->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
+ if (rq->waiting < 0)
+ rq->waiting--;
+ else
+ rq->waiting++;
+ rq->woken = 0;
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(no), am_inactive);
+}
+
+static ERTS_INLINE void
+sched_active(Uint no, ErtsRunQueue *rq)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ if (rq->waiting < 0)
+ rq->waiting++;
+ else
+ rq->waiting--;
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(no), am_active);
+}
+
+static int ERTS_INLINE
+ongoing_multi_scheduling_block(void)
+{
+ return erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing) != 0;
+}
+
+static ERTS_INLINE void
+empty_runq(ErtsRunQueue *rq)
+{
+ long oifls = erts_smp_atomic_band(&rq->info_flags, ~ERTS_RUNQ_IFLG_NONEMPTY);
+ if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
+#ifdef DEBUG
+ long empty = erts_smp_atomic_read(&no_empty_run_queues);
+ ASSERT(0 <= empty && empty < erts_no_run_queues);
+#endif
+ erts_smp_atomic_inc(&no_empty_run_queues);
+ }
+}
+
+static ERTS_INLINE void
+non_empty_runq(ErtsRunQueue *rq)
+{
+ long oifls = erts_smp_atomic_bor(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
+ if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
+#ifdef DEBUG
+ long empty = erts_smp_atomic_read(&no_empty_run_queues);
+ ASSERT(0 < empty && empty <= erts_no_run_queues);
+#endif
+ erts_smp_atomic_dec(&no_empty_run_queues);
+ }
+}
+
+static ERTS_INLINE int
+sched_spin_wake(ErtsRunQueue *rq)
+{
+#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
+ return 0;
+#else
+ long val;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+
+ val = erts_smp_atomic_read(&rq->spin_waiter);
+ ASSERT(val >= 0);
+ if (val != 0) {
+ erts_smp_atomic_inc(&rq->spin_wake);
+ return 1;
+ }
+ return 0;
+#endif
+}
+
+static ERTS_INLINE int
+sched_spin_wake_all(ErtsRunQueue *rq)
+{
+#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
+ return 0;
+#else
+ long val;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+
+ val = erts_smp_atomic_read(&rq->spin_waiter);
+ ASSERT(val >= 0);
+ if (val != 0)
+ erts_smp_atomic_add(&rq->spin_wake, val);
+ return val;
+#endif
+}
+
+static void
+sched_sys_wait(Uint no, ErtsRunQueue *rq)
+{
+ long dt;
+#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
+ int val;
+ int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+
+#endif
+
+ sched_waiting_sys(no, rq);
+
+#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
+ erts_smp_atomic_inc(&rq->spin_waiter);
+ erts_smp_runq_unlock(rq);
+
+ erl_sys_schedule(1); /* Might give us something to do */
+
+ dt = do_time_read_and_reset();
+ if (dt) bump_timer(dt);
+
+ while (spincount-- > 0) {
+ val = erts_smp_atomic_read(&rq->spin_wake);
+ ASSERT(val >= 0);
+ if (val != 0) {
+ erts_smp_runq_lock(rq);
+ val = erts_smp_atomic_read(&rq->spin_wake);
+ ASSERT(val >= 0);
+ if (val != 0)
+ goto woken;
+ if (spincount == 0)
+ goto sleep;
+ erts_smp_runq_unlock(rq);
+ }
+ }
+
+ erts_smp_runq_lock(rq);
+ val = erts_smp_atomic_read(&rq->spin_wake);
+ ASSERT(val >= 0);
+ if (val != 0) {
+ woken:
+ erts_smp_atomic_dec(&rq->spin_wake);
+ ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
+ erts_smp_atomic_dec(&rq->spin_waiter);
+ ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ }
+ else {
+ sleep:
+ erts_smp_atomic_dec(&rq->spin_waiter);
+ ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ /*
+ * If we got new I/O tasks we aren't allowed to
+ * sleep in erl_sys_schedule().
+ */
+ if (!erts_port_task_have_outstanding_io_tasks()) {
+#endif
+
+ erts_sys_schedule_interrupt(0);
+ erts_smp_runq_unlock(rq);
+
+ erl_sys_schedule(0);
+
+ dt = do_time_read_and_reset();
+ if (dt) bump_timer(dt);
+
+ erts_smp_runq_lock(rq);
+
+#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
+ }
+ }
+#endif
+
+ sched_active_sys(no, rq);
+}
+
+static void
+sched_cnd_wait(Uint no, ErtsRunQueue *rq)
+{
+#if ERTS_SCHED_SLEEP_SPINCOUNT != 0
+ int val;
+ int spincount = ERTS_SCHED_SLEEP_SPINCOUNT;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+#endif
+
+ sched_waiting(no, rq);
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ prepare_for_block,
+ resume_after_block,
+ (void *) rq);
+
+#if ERTS_SCHED_SLEEP_SPINCOUNT == 0
+ erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
+#else
+ erts_smp_atomic_inc(&rq->spin_waiter);
+ erts_smp_mtx_unlock(&rq->mtx);
+
+ while (spincount-- > 0) {
+ val = erts_smp_atomic_read(&rq->spin_wake);
+ ASSERT(val >= 0);
+ if (val != 0) {
+ erts_smp_mtx_lock(&rq->mtx);
+ val = erts_smp_atomic_read(&rq->spin_wake);
+ ASSERT(val >= 0);
+ if (val != 0)
+ goto woken;
+ if (spincount == 0)
+ goto sleep;
+ erts_smp_mtx_unlock(&rq->mtx);
+ }
+ }
+
+ erts_smp_mtx_lock(&rq->mtx);
+ val = erts_smp_atomic_read(&rq->spin_wake);
+ ASSERT(val >= 0);
+ if (val == 0) {
+ sleep:
+ erts_smp_atomic_dec(&rq->spin_waiter);
+ ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ erts_smp_cnd_wait(&rq->cnd, &rq->mtx);
+ }
+ else {
+ woken:
+ erts_smp_atomic_dec(&rq->spin_wake);
+ ASSERT(erts_smp_atomic_read(&rq->spin_wake) >= 0);
+ erts_smp_atomic_dec(&rq->spin_waiter);
+ ASSERT(erts_smp_atomic_read(&rq->spin_waiter) >= 0);
+ }
+#endif
+
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ prepare_for_block,
+ resume_after_block,
+ (void *) rq);
+
+ sched_active(no, rq);
+}
+
+static void
+wake_one_scheduler(void)
+{
+ ASSERT(erts_common_run_queue);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(erts_common_run_queue));
+ if (erts_common_run_queue->waiting) {
+ if (!sched_spin_wake(erts_common_run_queue)) {
+ if (erts_common_run_queue->waiting == -1) /* One scheduler waiting
+ and doing so in
+ sys_schedule */
+ erts_sys_schedule_interrupt(1);
+ else
+ erts_smp_cnd_signal(&erts_common_run_queue->cnd);
+ }
+ }
+}
+
+static void
+wake_scheduler(ErtsRunQueue *rq, int incq)
+{
+ ASSERT(!erts_common_run_queue);
+ ASSERT(-1 <= rq->waiting && rq->waiting <= 1);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ if (rq->waiting && !rq->woken) {
+ if (!sched_spin_wake(rq)) {
+ if (rq->waiting < 0)
+ erts_sys_schedule_interrupt(1);
+ else
+ erts_smp_cnd_signal(&rq->cnd);
+ }
+ rq->woken = 1;
+ if (incq)
+ non_empty_runq(rq);
+ }
+}
+
+static void
+wake_all_schedulers(void)
+{
+ if (erts_common_run_queue) {
+ erts_smp_runq_lock(erts_common_run_queue);
+ if (erts_common_run_queue->waiting) {
+ if (erts_common_run_queue->waiting < 0)
+ erts_sys_schedule_interrupt(1);
+ sched_spin_wake_all(erts_common_run_queue);
+ erts_smp_cnd_broadcast(&erts_common_run_queue->cnd);
+ }
+ erts_smp_runq_unlock(erts_common_run_queue);
+ }
+ else {
+ int ix;
+ for (ix = 0; ix < erts_no_run_queues; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ wake_scheduler(rq, 0);
+ erts_smp_runq_unlock(rq);
+ }
+ }
+}
+
+static ERTS_INLINE int
+chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
+{
+ long iflgs;
+ ErtsRunQueue *wrq;
+ if (crq->ix == ix)
+ return 0;
+ wrq = ERTS_RUNQ_IX(ix);
+ iflgs = erts_smp_atomic_read(&wrq->info_flags);
+ if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
+ erts_smp_xrunq_lock(crq, wrq);
+ if (activate) {
+ if (ix == erts_smp_atomic_cmpxchg(&balance_info.active_runqs, ix+1, ix)) {
+ wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE;
+ }
+ }
+ wake_scheduler(wrq, 0);
+ erts_smp_xrunq_unlock(crq, wrq);
+ return 1;
+ }
+ return 0;
+}
+
+static void
+wake_scheduler_on_empty_runq(ErtsRunQueue *crq)
+{
+ int ix = crq->ix;
+ int stop_ix = ix;
+ int active_ix = erts_smp_atomic_read(&balance_info.active_runqs);
+ int balance_ix = erts_smp_atomic_read(&balance_info.used_runqs);
+
+ if (active_ix > balance_ix)
+ active_ix = balance_ix;
+
+ if (ix >= active_ix)
+ stop_ix = ix = active_ix;
+
+ /* Try to wake a scheduler on an active run queue */
+ while (1) {
+ ix--;
+ if (ix < 0) {
+ if (active_ix == stop_ix)
+ break;
+ ix = active_ix - 1;
+ }
+ if (ix == stop_ix)
+ break;
+ if (chk_wake_sched(crq, ix, 0))
+ return;
+ }
+
+ if (active_ix < balance_ix) {
+ /* Try to activate a new run queue and wake its scheduler */
+ (void) chk_wake_sched(crq, active_ix, 1);
+ }
+}
+
+#endif /* ERTS_SMP */
+
+static ERTS_INLINE void
+smp_notify_inc_runq(ErtsRunQueue *runq)
+{
+#ifdef ERTS_SMP
+ if (erts_common_run_queue)
+ wake_one_scheduler();
+ else
+ wake_scheduler(runq, 1);
+#endif
+}
+
+void
+erts_smp_notify_inc_runq__(ErtsRunQueue *runq)
+{
+ smp_notify_inc_runq(runq);
+}
+
+#ifdef ERTS_SMP
+
+ErtsRunQueue *
+erts_prepare_emigrate(ErtsRunQueue *c_rq, ErtsRunQueueInfo *c_rqi, int prio)
+{
+ ASSERT(ERTS_CHK_RUNQ_FLG_EMIGRATE(c_rq->flags, prio));
+ ASSERT(ERTS_CHK_RUNQ_FLG_EVACUATE(c_rq->flags, prio)
+ || c_rqi->len >= c_rqi->migrate.limit.this);
+
+ while (1) {
+ ErtsRunQueue *n_rq = c_rqi->migrate.runq;
+ ERTS_DBG_VERIFY_VALID_RUNQP(n_rq);
+ erts_smp_xrunq_lock(c_rq, n_rq);
+
+ /*
+ * erts_smp_xrunq_lock() may release lock on c_rq! We have
+ * to check that we still want to emigrate and emigrate
+ * to the same run queue as before.
+ */
+
+ if (ERTS_CHK_RUNQ_FLG_EMIGRATE(c_rq->flags, prio)) {
+ Uint32 force = (ERTS_CHK_RUNQ_FLG_EVACUATE(c_rq->flags, prio)
+ | (c_rq->flags & ERTS_RUNQ_FLG_INACTIVE));
+ if (force || c_rqi->len > c_rqi->migrate.limit.this) {
+ ErtsRunQueueInfo *n_rqi;
+ /* We still want to emigrate */
+
+ if (n_rq != c_rqi->migrate.runq) {
+ /* Ahh... run queue changed; need to do it all over again... */
+ erts_smp_runq_unlock(n_rq);
+ continue;
+ }
+ else {
+
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ n_rqi = &n_rq->ports.info;
+ else
+ n_rqi = &n_rq->procs.prio_info[prio];
+
+ if (force || (n_rqi->len < c_rqi->migrate.limit.other)) {
+ /* emigrate ... */
+ return n_rq;
+ }
+ }
+ }
+ }
+
+ ASSERT(n_rq != c_rq);
+ erts_smp_runq_unlock(n_rq);
+ if (!(c_rq->flags & ERTS_RUNQ_FLG_INACTIVE)) {
+ /* No more emigrations to this runq */
+ ERTS_UNSET_RUNQ_FLG_EMIGRATE(c_rq->flags, prio);
+ ERTS_DBG_SET_INVALID_RUNQP(c_rqi->migrate.runq, 0x3);
+ }
+
+ return NULL;
+ }
+}
+
+static void
+immigrate(ErtsRunQueue *rq)
+{
+ int prio;
+
+ ASSERT(rq->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK);
+
+ for (prio = 0; prio < ERTS_NO_PRIO_LEVELS; prio++) {
+ if (ERTS_CHK_RUNQ_FLG_IMMIGRATE(rq->flags, prio)) {
+ ErtsRunQueueInfo *rqi = (prio == ERTS_PORT_PRIO_LEVEL
+ ? &rq->ports.info
+ : &rq->procs.prio_info[prio]);
+ ErtsRunQueue *from_rq = rqi->migrate.runq;
+ int rq_locked, from_rq_locked;
+
+ ERTS_DBG_VERIFY_VALID_RUNQP(from_rq);
+
+ rq_locked = 1;
+ from_rq_locked = 1;
+ erts_smp_xrunq_lock(rq, from_rq);
+ /*
+ * erts_smp_xrunq_lock() may release lock on rq! We have
+ * to check that we still want to immigrate from the same
+ * run queue as before.
+ */
+ if (ERTS_CHK_RUNQ_FLG_IMMIGRATE(rq->flags, prio)
+ && from_rq == rqi->migrate.runq) {
+ ErtsRunQueueInfo *from_rqi = (prio == ERTS_PORT_PRIO_LEVEL
+ ? &from_rq->ports.info
+ : &from_rq->procs.prio_info[prio]);
+ if ((ERTS_CHK_RUNQ_FLG_EVACUATE(rq->flags, prio)
+ && ERTS_CHK_RUNQ_FLG_EVACUATE(from_rq->flags, prio)
+ && from_rqi->len)
+ || (from_rqi->len > rqi->migrate.limit.other
+ && rqi->len < rqi->migrate.limit.this)) {
+ if (prio == ERTS_PORT_PRIO_LEVEL) {
+ Port *prt = from_rq->ports.start;
+ if (prt) {
+ int prt_locked = 0;
+ (void) erts_port_migrate(prt, &prt_locked,
+ from_rq, &from_rq_locked,
+ rq, &rq_locked);
+ if (prt_locked)
+ erts_smp_port_unlock(prt);
+ }
+ }
+ else {
+ Process *proc;
+ ErtsRunPrioQueue *from_rpq;
+ from_rpq = (prio == PRIORITY_LOW
+ ? &from_rq->procs.prio[PRIORITY_NORMAL]
+ : &from_rq->procs.prio[prio]);
+ for (proc = from_rpq->first; proc; proc = proc->next)
+ if (proc->prio == prio && !proc->bound_runq)
+ break;
+ if (proc) {
+ ErtsProcLocks proc_locks = 0;
+ (void) erts_proc_migrate(proc, &proc_locks,
+ from_rq, &from_rq_locked,
+ rq, &rq_locked);
+ if (proc_locks)
+ erts_smp_proc_unlock(proc, proc_locks);
+ }
+ }
+ }
+ else {
+ ERTS_UNSET_RUNQ_FLG_IMMIGRATE(rq->flags, prio);
+ ERTS_DBG_SET_INVALID_RUNQP(rqi->migrate.runq, 0x1);
+ }
+ }
+ if (from_rq_locked)
+ erts_smp_runq_unlock(from_rq);
+ if (!rq_locked)
+ erts_smp_runq_lock(rq);
+ }
+ }
+}
+
+static void
+evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
+{
+ Port *prt;
+ int prio;
+ int prt_locked = 0;
+ int rq_locked = 0;
+ int evac_rq_locked = 1;
+
+ erts_smp_runq_lock(evac_rq);
+
+ evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
+ evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
+ | ERTS_RUNQ_FLGS_EVACUATE_QMASK
+ | ERTS_RUNQ_FLG_SUSPENDED);
+
+ erts_smp_atomic_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
+
+ /*
+ * Need to set up evacuation paths first since we
+ * may release the run queue lock on evac_rq
+ * when evacuating.
+ */
+ evac_rq->misc.evac_runq = rq;
+ evac_rq->ports.info.migrate.runq = rq;
+ for (prio = 0; prio < ERTS_NO_PROC_PRIO_LEVELS; prio++)
+ evac_rq->procs.prio_info[prio].migrate.runq = rq;
+
+ /* Evacuate scheduled misc ops */
+
+ if (evac_rq->misc.start) {
+ rq_locked = 1;
+ erts_smp_xrunq_lock(evac_rq, rq);
+ if (rq->misc.end)
+ rq->misc.end->next = evac_rq->misc.start;
+ else
+ rq->misc.start = evac_rq->misc.start;
+ rq->misc.end = evac_rq->misc.end;
+ evac_rq->misc.start = NULL;
+ evac_rq->misc.end = NULL;
+ }
+
+ /* Evacuate scheduled ports */
+ prt = evac_rq->ports.start;
+ while (prt) {
+ (void) erts_port_migrate(prt, &prt_locked,
+ evac_rq, &evac_rq_locked,
+ rq, &rq_locked);
+ if (prt_locked)
+ erts_smp_port_unlock(prt);
+ if (!evac_rq_locked) {
+ evac_rq_locked = 1;
+ erts_smp_runq_lock(evac_rq);
+ }
+ prt = evac_rq->ports.start;
+ }
+
+ /* Evacuate scheduled processes */
+ for (prio = 0; prio < ERTS_NO_PROC_PRIO_LEVELS; prio++) {
+ Process *proc;
+
+ switch (prio) {
+ case PRIORITY_MAX:
+ case PRIORITY_HIGH:
+ case PRIORITY_NORMAL:
+ proc = evac_rq->procs.prio[prio].first;
+ while (proc) {
+ ErtsProcLocks proc_locks = 0;
+
+ /* Bound processes are stuck... */
+ while (proc->bound_runq) {
+ proc = proc->next;
+ if (!proc)
+ goto end_of_proc;
+ }
+
+ (void) erts_proc_migrate(proc, &proc_locks,
+ evac_rq, &evac_rq_locked,
+ rq, &rq_locked);
+ if (proc_locks)
+ erts_smp_proc_unlock(proc, proc_locks);
+ if (!evac_rq_locked) {
+ erts_smp_runq_lock(evac_rq);
+ evac_rq_locked = 1;
+ }
+
+ proc = evac_rq->procs.prio[prio].first;
+ }
+
+ end_of_proc:
+
+#ifdef DEBUG
+ for (proc = evac_rq->procs.prio[prio].first;
+ proc;
+ proc = proc->next) {
+ ASSERT(proc->bound_runq);
+ }
+#endif
+ break;
+ case PRIORITY_LOW:
+ break;
+ default:
+ ASSERT(!"Invalid process priority");
+ break;
+ }
+ }
+
+ if (rq_locked)
+ erts_smp_runq_unlock(rq);
+
+ if (!evac_rq_locked)
+ erts_smp_runq_lock(evac_rq);
+ wake_scheduler(evac_rq, 0);
+ erts_smp_runq_unlock(evac_rq);
+}
+
+static int
+try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq)
+{
+ Process *proc;
+ int vrq_locked;
+
+ if (*rq_lockedp)
+ erts_smp_xrunq_lock(rq, vrq);
+ else
+ erts_smp_runq_lock(vrq);
+ vrq_locked = 1;
+
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked);
+
+ /*
+ * Check for a runnable process to steal...
+ */
+
+ switch (vrq->flags & ERTS_RUNQ_FLGS_PROCS_QMASK) {
+ case MAX_BIT:
+ case MAX_BIT|HIGH_BIT:
+ case MAX_BIT|NORMAL_BIT:
+ case MAX_BIT|LOW_BIT:
+ case MAX_BIT|HIGH_BIT|NORMAL_BIT:
+ case MAX_BIT|HIGH_BIT|LOW_BIT:
+ case MAX_BIT|NORMAL_BIT|LOW_BIT:
+ case MAX_BIT|HIGH_BIT|NORMAL_BIT|LOW_BIT:
+ for (proc = vrq->procs.prio[PRIORITY_MAX].last;
+ proc;
+ proc = proc->prev) {
+ if (!proc->bound_runq)
+ break;
+ }
+ if (proc)
+ break;
+ case HIGH_BIT:
+ case HIGH_BIT|NORMAL_BIT:
+ case HIGH_BIT|LOW_BIT:
+ case HIGH_BIT|NORMAL_BIT|LOW_BIT:
+ for (proc = vrq->procs.prio[PRIORITY_HIGH].last;
+ proc;
+ proc = proc->prev) {
+ if (!proc->bound_runq)
+ break;
+ }
+ if (proc)
+ break;
+ case NORMAL_BIT:
+ case LOW_BIT:
+ case NORMAL_BIT|LOW_BIT:
+ for (proc = vrq->procs.prio[PRIORITY_NORMAL].last;
+ proc;
+ proc = proc->prev) {
+ if (!proc->bound_runq)
+ break;
+ }
+ if (proc)
+ break;
+ case 0:
+ proc = NULL;
+ break;
+ default:
+ ASSERT(!"Invalid queue mask");
+ proc = NULL;
+ break;
+ }
+
+ if (proc) {
+ ErtsProcLocks proc_locks = 0;
+ int res;
+ ErtsMigrateResult mres;
+ mres = erts_proc_migrate(proc, &proc_locks,
+ vrq, &vrq_locked,
+ rq, rq_lockedp);
+ if (proc_locks)
+ erts_smp_proc_unlock(proc, proc_locks);
+ res = !0;
+ switch (mres) {
+ case ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED:
+ res = 0;
+ case ERTS_MIGRATE_SUCCESS:
+ if (vrq_locked)
+ erts_smp_runq_unlock(vrq);
+ return res;
+ default: /* Other failures */
+ break;
+ }
+ }
+
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked);
+
+ if (!vrq_locked) {
+ if (*rq_lockedp)
+ erts_smp_xrunq_lock(rq, vrq);
+ else
+ erts_smp_runq_lock(vrq);
+ vrq_locked = 1;
+ }
+
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked);
+
+ /*
+ * Check for a runnable port to steal...
+ */
+
+ if (vrq->ports.info.len) {
+ Port *prt = vrq->ports.end;
+ int prt_locked = 0;
+ int res;
+ ErtsMigrateResult mres;
+
+ mres = erts_port_migrate(prt, &prt_locked,
+ vrq, &vrq_locked,
+ rq, rq_lockedp);
+ if (prt_locked)
+ erts_smp_port_unlock(prt);
+ res = !0;
+ switch (mres) {
+ case ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED:
+ res = 0;
+ case ERTS_MIGRATE_SUCCESS:
+ if (vrq_locked)
+ erts_smp_runq_unlock(vrq);
+ return res;
+ default: /* Other failures */
+ break;
+ }
+ }
+
+ if (vrq_locked)
+ erts_smp_runq_unlock(vrq);
+
+ return 0;
+}
+
+
+static ERTS_INLINE int
+check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
+{
+ ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
+ long iflgs = erts_smp_atomic_read(&vrq->info_flags);
+ if (iflgs & ERTS_RUNQ_IFLG_NONEMPTY)
+ return try_steal_task_from_victim(rq, rq_lockedp, vrq);
+ else
+ return 0;
+}
+
+
+static int
+try_steal_task(ErtsRunQueue *rq)
+{
+ int res, rq_locked, vix, active_rqs, blnc_rqs;
+
+ if (erts_common_run_queue)
+ return 0;
+
+ /*
+ * We are not allowed to steal jobs to this run queue
+ * if it is suspended. Note that it might get suspended
+ * at any time when we don't have the lock on the run
+ * queue.
+ */
+ if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
+ return 0;
+
+ res = 0;
+ rq_locked = 1;
+
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked);
+
+ active_rqs = erts_smp_atomic_read(&balance_info.active_runqs);
+ blnc_rqs = erts_smp_atomic_read(&balance_info.used_runqs);
+
+ if (active_rqs > blnc_rqs)
+ active_rqs = blnc_rqs;
+
+ if (rq->ix < active_rqs) {
+
+ /* First try to steal from an inactive run queue... */
+ if (active_rqs < blnc_rqs) {
+ int no = blnc_rqs - active_rqs;
+ int stop_ix = vix = active_rqs + rq->ix % no;
+ while (erts_smp_atomic_read(&no_empty_run_queues) < blnc_rqs) {
+ res = check_possible_steal_victim(rq, &rq_locked, vix);
+ if (res)
+ goto done;
+ vix++;
+ if (vix >= blnc_rqs)
+ vix = active_rqs;
+ if (vix == stop_ix)
+ break;
+ }
+ }
+
+ vix = rq->ix;
+
+ /* ... then try to steal a job from another active queue... */
+ while (erts_smp_atomic_read(&no_empty_run_queues) < blnc_rqs) {
+ vix++;
+ if (vix >= active_rqs)
+ vix = 0;
+ if (vix == rq->ix)
+ break;
+
+ res = check_possible_steal_victim(rq, &rq_locked, vix);
+ if (res)
+ goto done;
+ }
+
+ }
+
+ done:
+
+ if (!rq_locked)
+ erts_smp_runq_lock(rq);
+
+ if (!res)
+ res = !ERTS_EMPTY_RUNQ(rq);
+
+ return res;
+}
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+void
+erts_smp_notify_check_children_needed(void)
+{
+ int i;
+ for (i = 0; i < erts_no_schedulers; i++) {
+ erts_smp_runq_lock(ERTS_SCHEDULER_IX(i)->run_queue);
+ ERTS_SCHEDULER_IX(i)->check_children = 1;
+ if (!erts_common_run_queue)
+ wake_scheduler(ERTS_SCHEDULER_IX(i)->run_queue, 0);
+ erts_smp_runq_unlock(ERTS_SCHEDULER_IX(i)->run_queue);
+ }
+ if (ongoing_multi_scheduling_block()) {
+ /* Also blocked schedulers need to check children */
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ for (i = 0; i < erts_no_schedulers; i++)
+ ERTS_SCHEDULER_IX(i)->blocked_check_children = 1;
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ }
+ if (erts_common_run_queue)
+ wake_all_schedulers();
+}
+#endif
+
+/* Run queue balancing */
+
+typedef struct {
+ Uint32 flags;
+ struct {
+ int max_len;
+ int avail;
+ int reds;
+ int migration_limit;
+ int emigrate_to;
+ int immigrate_from;
+ } prio[ERTS_NO_PRIO_LEVELS];
+ int reds;
+ int full_reds;
+ int full_reds_history_sum;
+ int full_reds_history_change;
+ int oowc;
+ int max_len;
+} ErtsRunQueueBalance;
+static ErtsRunQueueBalance *run_queue_info;
+
+typedef struct {
+ int qix;
+ int len;
+} ErtsRunQueueCompare;
+static ErtsRunQueueCompare *run_queue_compare;
+
+static int
+rqc_len_cmp(const void *x, const void *y)
+{
+ return ((ErtsRunQueueCompare *) x)->len - ((ErtsRunQueueCompare *) y)->len;
+}
+
+#define ERTS_PERCENT(X, Y) \
+ ((Y) == 0 \
+ ? ((X) == 0 ? 100 : INT_MAX) \
+ : ((100*(X))/(Y)))
+
+#define ERTS_UPDATE_FULL_REDS(QIX, LAST_REDS) \
+do { \
+ run_queue_info[(QIX)].full_reds \
+ = run_queue_info[(QIX)].full_reds_history_sum; \
+ run_queue_info[(QIX)].full_reds += (LAST_REDS); \
+ run_queue_info[(QIX)].full_reds \
+ >>= ERTS_FULL_REDS_HISTORY_AVG_SHFT; \
+ run_queue_info[(QIX)].full_reds_history_sum \
+ -= run_queue_info[(QIX)].full_reds_history_change; \
+ run_queue_info[(QIX)].full_reds_history_sum += (LAST_REDS); \
+ run_queue_info[(QIX)].full_reds_history_change = (LAST_REDS); \
+} while (0)
+
+#define ERTS_DBG_CHK_FULL_REDS_HISTORY(RQ) \
+do { \
+ int sum__ = 0; \
+ int rix__; \
+ for (rix__ = 0; rix__ < ERTS_FULL_REDS_HISTORY_SIZE; rix__++) \
+ sum__ += (RQ)->full_reds_history[rix__]; \
+ ASSERT(sum__ == (RQ)->full_reds_history_sum); \
+} while (0);
+
+static void
+check_balance(ErtsRunQueue *c_rq)
+{
+ ErtsRunQueueBalance avg = {0};
+ Sint64 scheds_reds, full_scheds_reds;
+ int forced, active, current_active, oowc, half_full_scheds, full_scheds,
+ mmax_len, blnc_no_rqs, qix, pix, freds_hist_ix;
+
+ if (erts_smp_atomic_xchg(&balance_info.checking_balance, 1)) {
+ c_rq->check_balance_reds = INT_MAX;
+ return;
+ }
+
+ blnc_no_rqs = (int) erts_smp_atomic_read(&balance_info.used_runqs);
+ if (blnc_no_rqs == 1) {
+ c_rq->check_balance_reds = INT_MAX;
+ erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ return;
+ }
+
+ erts_smp_runq_unlock(c_rq);
+
+ if (balance_info.halftime) {
+ balance_info.halftime = 0;
+ erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ ERTS_FOREACH_RUNQ(rq,
+ {
+ if (rq->waiting)
+ rq->flags |= ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK;
+ else
+ rq->flags &= ~ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK;
+ rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
+ });
+
+ erts_smp_runq_lock(c_rq);
+ return;
+ }
+
+ /*
+ * check_balance() is never called in more threads
+ * than one at a time, i.e., we will normally never
+ * get any conflicts on the balance_info.update_mtx.
+ * However, when blocking multi scheduling (which performance
+ * critical applications do *not* do) migration information
+ * is manipulated. Such updates of the migration information
+ * might clash with balancing.
+ */
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+
+ forced = balance_info.forced_check_balance;
+ balance_info.forced_check_balance = 0;
+
+ blnc_no_rqs = (int) erts_smp_atomic_read(&balance_info.used_runqs);
+ if (blnc_no_rqs == 1) {
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_smp_runq_lock(c_rq);
+ c_rq->check_balance_reds = INT_MAX;
+ erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ return;
+ }
+
+ freds_hist_ix = balance_info.full_reds_history_index;
+ balance_info.full_reds_history_index++;
+ if (balance_info.full_reds_history_index >= ERTS_FULL_REDS_HISTORY_SIZE)
+ balance_info.full_reds_history_index = 0;
+
+ current_active = erts_smp_atomic_read(&balance_info.active_runqs);
+
+ /* Read balance information for all run queues */
+ for (qix = 0; qix < blnc_no_rqs; qix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(qix);
+ erts_smp_runq_lock(rq);
+
+ run_queue_info[qix].flags = rq->flags;
+ for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
+ run_queue_info[qix].prio[pix].max_len
+ = rq->procs.prio_info[pix].max_len;
+ run_queue_info[qix].prio[pix].reds
+ = rq->procs.prio_info[pix].reds;
+ }
+ run_queue_info[qix].prio[ERTS_PORT_PRIO_LEVEL].max_len
+ = rq->ports.info.max_len;
+ run_queue_info[qix].prio[ERTS_PORT_PRIO_LEVEL].reds
+ = rq->ports.info.reds;
+
+ run_queue_info[qix].full_reds_history_sum
+ = rq->full_reds_history_sum;
+ run_queue_info[qix].full_reds_history_change
+ = rq->full_reds_history[freds_hist_ix];
+
+ run_queue_info[qix].oowc = rq->out_of_work_count;
+ run_queue_info[qix].max_len = rq->max_len;
+ rq->check_balance_reds = INT_MAX;
+
+ erts_smp_runq_unlock(rq);
+ }
+
+ full_scheds = 0;
+ half_full_scheds = 0;
+ full_scheds_reds = 0;
+ scheds_reds = 0;
+ oowc = 0;
+ mmax_len = 0;
+
+ /* Calculate availability for each priority in each run queues */
+ for (qix = 0; qix < blnc_no_rqs; qix++) {
+ int treds = 0;
+
+ if (run_queue_info[qix].flags & ERTS_RUNQ_FLG_OUT_OF_WORK) {
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ run_queue_info[qix].prio[pix].avail = 100;
+ treds += run_queue_info[qix].prio[pix].reds;
+ }
+ if (!(run_queue_info[qix].flags & ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK))
+ half_full_scheds++;
+ ERTS_UPDATE_FULL_REDS(qix, ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED);
+ }
+ else {
+ ASSERT(!(run_queue_info[qix].flags & ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++)
+ treds += run_queue_info[qix].prio[pix].reds;
+ if (treds == 0) {
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++)
+ run_queue_info[qix].prio[pix].avail = 0;
+ }
+ else {
+ int xreds = 0;
+ int procreds = treds;
+ procreds -= run_queue_info[qix].prio[ERTS_PORT_PRIO_LEVEL].reds;
+
+ for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
+ int av;
+
+ if (xreds == 0)
+ av = 100;
+ else if (procreds == xreds)
+ av = 0;
+ else {
+ av = (100*(procreds - xreds)) / procreds;
+ if (av == 0)
+ av = 1;
+ }
+ run_queue_info[qix].prio[pix].avail = av;
+ if (pix < PRIORITY_NORMAL) /* ie., max or high */
+ xreds += run_queue_info[qix].prio[pix].reds;
+ }
+ run_queue_info[qix].prio[ERTS_PORT_PRIO_LEVEL].avail = 100;
+ }
+ ERTS_UPDATE_FULL_REDS(qix, treds);
+ full_scheds_reds += run_queue_info[qix].full_reds;
+ full_scheds++;
+ half_full_scheds++;
+ }
+ run_queue_info[qix].reds = treds;
+ scheds_reds += treds;
+ oowc += run_queue_info[qix].oowc;
+ if (mmax_len < run_queue_info[qix].max_len)
+ mmax_len = run_queue_info[qix].max_len;
+ }
+
+ if (!forced && half_full_scheds != blnc_no_rqs) {
+ int min = 1;
+ if (min < half_full_scheds)
+ min = half_full_scheds;
+ if (full_scheds) {
+ active = (scheds_reds - 1)/ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED+1;
+ }
+ else {
+ active = balance_info.last_active_runqs - 1;
+ }
+
+ if (balance_info.last_active_runqs < current_active) {
+ ERTS_BLNCE_SAVE_RISE(current_active, mmax_len, scheds_reds);
+ active = current_active;
+ }
+ else if (active < balance_info.prev_rise.active_runqs) {
+ if (ERTS_PERCENT(mmax_len,
+ balance_info.prev_rise.max_len) >= 90
+ && ERTS_PERCENT(scheds_reds,
+ balance_info.prev_rise.reds) >= 90) {
+ active = balance_info.prev_rise.active_runqs;
+ }
+ }
+
+ if (active < min)
+ active = min;
+ else if (active > blnc_no_rqs)
+ active = blnc_no_rqs;
+
+ if (active == blnc_no_rqs)
+ goto all_active;
+
+ for (qix = 0; qix < active; qix++) {
+ run_queue_info[qix].flags = 0;
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ run_queue_info[qix].prio[pix].emigrate_to = -1;
+ run_queue_info[qix].prio[pix].immigrate_from = -1;
+ run_queue_info[qix].prio[pix].migration_limit = 0;
+ }
+ }
+ for (qix = active; qix < blnc_no_rqs; qix++) {
+ run_queue_info[qix].flags = ERTS_RUNQ_FLG_INACTIVE;
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ int tix = qix % active;
+ ERTS_SET_RUNQ_FLG_EMIGRATE(run_queue_info[qix].flags, pix);
+ run_queue_info[qix].prio[pix].emigrate_to = tix;
+ run_queue_info[qix].prio[pix].immigrate_from = -1;
+ run_queue_info[qix].prio[pix].migration_limit = 0;
+ }
+ }
+ }
+ else {
+ if (balance_info.last_active_runqs < current_active)
+ ERTS_BLNCE_SAVE_RISE(current_active, mmax_len, scheds_reds);
+ all_active:
+
+ active = blnc_no_rqs;
+
+ for (qix = 0; qix < blnc_no_rqs; qix++) {
+
+ if (full_scheds_reds > 0) {
+ /* Calculate availability compared to other schedulers */
+ if (!(run_queue_info[qix].flags & ERTS_RUNQ_FLG_OUT_OF_WORK)) {
+ Sint64 tmp = ((Sint64) run_queue_info[qix].full_reds
+ * (Sint64) full_scheds);
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ Sint64 avail = run_queue_info[qix].prio[pix].avail;
+ avail = (avail*tmp)/full_scheds_reds;
+ ASSERT(avail >= 0);
+ run_queue_info[qix].prio[pix].avail = (int) avail;
+ }
+ }
+ }
+
+ /* Calculate average max length */
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ run_queue_info[qix].prio[pix].emigrate_to = -1;
+ run_queue_info[qix].prio[pix].immigrate_from = -1;
+ avg.prio[pix].max_len += run_queue_info[qix].prio[pix].max_len;
+ avg.prio[pix].avail += run_queue_info[qix].prio[pix].avail;
+ }
+
+ }
+
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ int max_len = avg.prio[pix].max_len;
+ if (max_len != 0) {
+ int avail = avg.prio[pix].avail;
+ if (avail != 0) {
+ max_len = ((100*max_len - 1) / avail) + 1;
+ avg.prio[pix].max_len = max_len;
+ ASSERT(max_len >= 0);
+ }
+ }
+ }
+
+ /* Calculate migration limits for all priority queues in all
+ run queues */
+ for (qix = 0; qix < blnc_no_rqs; qix++) {
+ run_queue_info[qix].flags = 0; /* Reset for later use... */
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ int limit;
+ if (avg.prio[pix].max_len == 0
+ || run_queue_info[qix].prio[pix].avail == 0)
+ limit = 0;
+ else
+ limit = (((avg.prio[pix].max_len
+ * run_queue_info[qix].prio[pix].avail) - 1)
+ / 100 + 1);
+ run_queue_info[qix].prio[pix].migration_limit = limit;
+ }
+ }
+
+ /* Setup migration paths for all priorities */
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ int low = 0, high = 0;
+ for (qix = 0; qix < blnc_no_rqs; qix++) {
+ int len_diff = run_queue_info[qix].prio[pix].max_len;
+ len_diff -= run_queue_info[qix].prio[pix].migration_limit;
+#ifdef DBG_PRINT
+if (pix == 2) erts_fprintf(stderr, "%d ", len_diff);
+#endif
+ run_queue_compare[qix].qix = qix;
+ run_queue_compare[qix].len = len_diff;
+ if (len_diff != 0) {
+ if (len_diff < 0)
+ low++;
+ else
+ high++;
+ }
+ }
+#ifdef DBG_PRINT
+if (pix == 2) erts_fprintf(stderr, "\n");
+#endif
+ if (low && high) {
+ int from_qix;
+ int to_qix;
+ int eof = 0;
+ int eot = 0;
+ int tix = 0;
+ int fix = blnc_no_rqs-1;
+ qsort(run_queue_compare,
+ blnc_no_rqs,
+ sizeof(ErtsRunQueueCompare),
+ rqc_len_cmp);
+
+ while (1) {
+ if (run_queue_compare[fix].len <= 0)
+ eof = 1;
+ if (run_queue_compare[tix].len >= 0)
+ eot = 1;
+ if (eof || eot)
+ break;
+ from_qix = run_queue_compare[fix].qix;
+ to_qix = run_queue_compare[tix].qix;
+ if (run_queue_info[from_qix].prio[pix].avail == 0) {
+ ERTS_SET_RUNQ_FLG_EVACUATE(run_queue_info[from_qix].flags,
+ pix);
+ ERTS_SET_RUNQ_FLG_EVACUATE(run_queue_info[to_qix].flags,
+ pix);
+ }
+ ERTS_SET_RUNQ_FLG_EMIGRATE(run_queue_info[from_qix].flags, pix);
+ ERTS_SET_RUNQ_FLG_IMMIGRATE(run_queue_info[to_qix].flags, pix);
+ run_queue_info[from_qix].prio[pix].emigrate_to = to_qix;
+ run_queue_info[to_qix].prio[pix].immigrate_from = from_qix;
+ tix++;
+ fix--;
+
+#ifdef DBG_PRINT
+if (pix == 2) erts_fprintf(stderr, "%d >--> %d\n", from_qix, to_qix);
+#endif
+ }
+
+ if (!eot && eof) {
+ if (fix < blnc_no_rqs-1)
+ fix++;
+
+ if (run_queue_compare[fix].len > 0) {
+ int fix2 = -1;
+ while (tix < fix) {
+ if (run_queue_compare[tix].len >= 0)
+ break;
+ if (fix2 < fix)
+ fix2 = blnc_no_rqs-1;
+ from_qix = run_queue_compare[fix2].qix;
+ to_qix = run_queue_compare[tix].qix;
+ ASSERT(to_qix != from_qix);
+ if (run_queue_info[from_qix].prio[pix].avail == 0)
+ ERTS_SET_RUNQ_FLG_EVACUATE(run_queue_info[to_qix].flags,
+ pix);
+ ERTS_SET_RUNQ_FLG_IMMIGRATE(run_queue_info[to_qix].flags, pix);
+ run_queue_info[to_qix].prio[pix].immigrate_from = from_qix;
+ tix++;
+ fix2--;
+#ifdef DBG_PRINT
+if (pix == 2) erts_fprintf(stderr, "%d --> %d\n", from_qix, to_qix);
+#endif
+ }
+ }
+ }
+ else if (!eof && eot) {
+ if (tix > 0)
+ tix--;
+ if (run_queue_compare[tix].len < 0) {
+ int tix2 = 0;
+ while (tix < fix) {
+ if (run_queue_compare[fix].len <= 0)
+ break;
+ if (tix2 > tix)
+ tix2 = 0;
+ from_qix = run_queue_compare[fix].qix;
+ to_qix = run_queue_compare[tix2].qix;
+ ASSERT(to_qix != from_qix);
+ if (run_queue_info[from_qix].prio[pix].avail == 0)
+ ERTS_SET_RUNQ_FLG_EVACUATE(run_queue_info[from_qix].flags,
+ pix);
+ ERTS_SET_RUNQ_FLG_EMIGRATE(run_queue_info[from_qix].flags, pix);
+ run_queue_info[from_qix].prio[pix].emigrate_to = to_qix;
+ fix--;
+ tix2++;
+#ifdef DBG_PRINT
+if (pix == 2) erts_fprintf(stderr, "%d >-- %d\n", from_qix, to_qix);
+#endif
+
+ }
+ }
+ }
+ }
+ }
+
+#ifdef DBG_PRINT
+erts_fprintf(stderr, "--------------------------------\n");
+#endif
+ }
+
+ balance_info.last_active_runqs = active;
+ erts_smp_atomic_set(&balance_info.active_runqs, active);
+
+ balance_info.halftime = 1;
+ erts_smp_atomic_set(&balance_info.checking_balance, 0);
+
+ /* Write migration paths and reset balance statistics in all queues */
+ for (qix = 0; qix < blnc_no_rqs; qix++) {
+ int mqix;
+ Uint32 flags;
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(qix);
+ ErtsRunQueueInfo *rqi;
+ flags = run_queue_info[qix].flags;
+ erts_smp_runq_lock(rq);
+ flags |= (rq->flags & ~ERTS_RUNQ_FLGS_MIGRATION_INFO);
+ ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK));
+ if (rq->waiting)
+ flags |= ERTS_RUNQ_FLG_OUT_OF_WORK;
+
+ rq->full_reds_history_sum
+ = run_queue_info[qix].full_reds_history_sum;
+ rq->full_reds_history[freds_hist_ix]
+ = run_queue_info[qix].full_reds_history_change;
+
+ ERTS_DBG_CHK_FULL_REDS_HISTORY(rq);
+
+ rq->out_of_work_count = 0;
+ rq->flags = flags;
+ rq->max_len = rq->len;
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ rqi = (pix == ERTS_PORT_PRIO_LEVEL
+ ? &rq->ports.info
+ : &rq->procs.prio_info[pix]);
+ rqi->max_len = rqi->len;
+ rqi->reds = 0;
+ if (!(ERTS_CHK_RUNQ_FLG_EMIGRATE(flags, pix)
+ | ERTS_CHK_RUNQ_FLG_IMMIGRATE(flags, pix))) {
+ ASSERT(run_queue_info[qix].prio[pix].immigrate_from < 0);
+ ASSERT(run_queue_info[qix].prio[pix].emigrate_to < 0);
+#ifdef DEBUG
+ rqi->migrate.limit.this = -1;
+ rqi->migrate.limit.other = -1;
+ ERTS_DBG_SET_INVALID_RUNQP(rqi->migrate.runq, 0x2);
+#endif
+
+ }
+ else if (ERTS_CHK_RUNQ_FLG_EMIGRATE(flags, pix)) {
+ ASSERT(!ERTS_CHK_RUNQ_FLG_IMMIGRATE(flags, pix));
+ ASSERT(run_queue_info[qix].prio[pix].immigrate_from < 0);
+ ASSERT(run_queue_info[qix].prio[pix].emigrate_to >= 0);
+
+ mqix = run_queue_info[qix].prio[pix].emigrate_to;
+ rqi->migrate.limit.this
+ = run_queue_info[qix].prio[pix].migration_limit;
+ rqi->migrate.limit.other
+ = run_queue_info[mqix].prio[pix].migration_limit;
+ rqi->migrate.runq = ERTS_RUNQ_IX(mqix);
+ }
+ else {
+ ASSERT(ERTS_CHK_RUNQ_FLG_IMMIGRATE(flags, pix));
+ ASSERT(run_queue_info[qix].prio[pix].emigrate_to < 0);
+ ASSERT(run_queue_info[qix].prio[pix].immigrate_from >= 0);
+
+ mqix = run_queue_info[qix].prio[pix].immigrate_from;
+ rqi->migrate.limit.this
+ = run_queue_info[qix].prio[pix].migration_limit;
+ rqi->migrate.limit.other
+ = run_queue_info[mqix].prio[pix].migration_limit;
+ rqi->migrate.runq = ERTS_RUNQ_IX(mqix);
+ }
+ }
+
+ rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
+ erts_smp_runq_unlock(rq);
+ }
+
+ balance_info.n++;
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+
+ erts_smp_runq_lock(c_rq);
+}
+
+#endif /* #ifdef ERTS_SMP */
+
+Uint
+erts_debug_nbalance(void)
+{
+#ifdef ERTS_SMP
+ Uint n;
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+ n = balance_info.n;
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+ return n;
+#else
+ return 0;
+#endif
+}
+
+void
+erts_early_init_scheduling(void)
+{
+ early_cpu_bind_init();
+}
+
+void
+erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
+{
+ int ix, n;
+
+#ifndef ERTS_SMP
+ mrq = 0;
+#endif
+
+ init_misc_op_list_alloc();
+
+ ASSERT(no_schedulers_online <= no_schedulers);
+ ASSERT(no_schedulers_online >= 1);
+ ASSERT(no_schedulers >= 1);
+
+ /* Create and initialize run queues */
+
+ n = (int) (mrq ? no_schedulers : 1);
+
+ erts_aligned_run_queues = erts_alloc(ERTS_ALC_T_RUNQS,
+ (sizeof(ErtsAlignedRunQueue)*(n+1)));
+ if ((((Uint) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) == 0)
+ erts_aligned_run_queues = ((ErtsAlignedRunQueue *)
+ ((((Uint) erts_aligned_run_queues)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+
+#ifdef ERTS_SMP
+ erts_smp_atomic_init(&no_empty_run_queues, 0);
+#endif
+
+ for (ix = 0; ix < n; ix++) {
+ int pix, rix;
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+
+ rq->ix = ix;
+ erts_smp_atomic_init(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
+
+ erts_smp_mtx_init(&rq->mtx, "run_queue");
+ erts_smp_cnd_init(&rq->cnd);
+
+ erts_smp_atomic_init(&rq->spin_waiter, 0);
+ erts_smp_atomic_init(&rq->spin_wake, 0);
+
+ rq->waiting = 0;
+ rq->woken = 0;
+ rq->flags = !mrq ? ERTS_RUNQ_FLG_SHARED_RUNQ : 0;
+ rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
+ rq->full_reds_history_sum = 0;
+ for (rix = 0; rix < ERTS_FULL_REDS_HISTORY_SIZE; rix++) {
+ rq->full_reds_history_sum += ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED;
+ rq->full_reds_history[rix] = ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED;
+ }
+ rq->out_of_work_count = 0;
+ rq->max_len = 0;
+ rq->len = 0;
+ rq->wakeup_other = 0;
+ rq->wakeup_other_reds = 0;
+
+ rq->procs.len = 0;
+ rq->procs.pending_exiters = NULL;
+ rq->procs.context_switches = 0;
+ rq->procs.reductions = 0;
+
+ for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
+ rq->procs.prio_info[pix].len = 0;
+ rq->procs.prio_info[pix].max_len = 0;
+ rq->procs.prio_info[pix].reds = 0;
+ rq->procs.prio_info[pix].migrate.limit.this = 0;
+ rq->procs.prio_info[pix].migrate.limit.other = 0;
+ ERTS_DBG_SET_INVALID_RUNQP(rq->procs.prio_info[pix].migrate.runq,
+ 0x0);
+ if (pix < ERTS_NO_PROC_PRIO_LEVELS - 1) {
+ rq->procs.prio[pix].first = NULL;
+ rq->procs.prio[pix].last = NULL;
+ }
+ }
+
+ rq->misc.start = NULL;
+ rq->misc.end = NULL;
+ rq->misc.evac_runq = NULL;
+
+ rq->ports.info.len = 0;
+ rq->ports.info.max_len = 0;
+ rq->ports.info.reds = 0;
+ rq->ports.info.migrate.limit.this = 0;
+ rq->ports.info.migrate.limit.other = 0;
+ rq->ports.info.migrate.runq = NULL;
+ rq->ports.start = NULL;
+ rq->ports.end = NULL;
+ }
+
+ erts_common_run_queue = !mrq ? ERTS_RUNQ_IX(0) : NULL;
+ erts_no_run_queues = n;
+
+#ifdef ERTS_SMP
+
+ if (erts_no_run_queues != 1) {
+ run_queue_info = erts_alloc(ERTS_ALC_T_RUNQ_BLNS,
+ (sizeof(ErtsRunQueueBalance)
+ * erts_no_run_queues));
+ run_queue_compare = erts_alloc(ERTS_ALC_T_RUNQ_BLNS,
+ (sizeof(ErtsRunQueueCompare)
+ * erts_no_run_queues));
+ }
+
+#endif
+
+ /* Create and initialize scheduler specific data */
+
+ n = (int) no_schedulers;
+ erts_aligned_scheduler_data = erts_alloc(ERTS_ALC_T_SCHDLR_DATA,
+ (sizeof(ErtsAlignedSchedulerData)
+ *(n+1)));
+ if ((((Uint) erts_aligned_scheduler_data) & ERTS_CACHE_LINE_MASK) == 0)
+ erts_aligned_scheduler_data = ((ErtsAlignedSchedulerData *)
+ ((((Uint) erts_aligned_scheduler_data)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+ for (ix = 0; ix < n; ix++) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
+#ifdef ERTS_SMP
+ erts_bits_init_state(&esdp->erl_bits_state);
+ esdp->match_pseudo_process = NULL;
+ esdp->free_process = NULL;
+#endif
+ esdp->no = (Uint) ix+1;
+ esdp->current_process = NULL;
+ esdp->current_port = NULL;
+
+ esdp->virtual_reds = 0;
+ esdp->cpu_id = -1;
+
+ erts_init_atom_cache_map(&esdp->atom_cache_map);
+
+ if (erts_common_run_queue) {
+ esdp->run_queue = erts_common_run_queue;
+ esdp->run_queue->scheduler = NULL;
+ }
+ else {
+ esdp->run_queue = ERTS_RUNQ_IX(ix);
+ esdp->run_queue->scheduler = esdp;
+ }
+
+#ifdef ERTS_SMP
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ esdp->check_children = 0;
+ esdp->blocked_check_children = 0;
+#endif
+ erts_smp_atomic_init(&esdp->suspended, 0);
+ erts_smp_atomic_init(&esdp->chk_cpu_bind, 0);
+#endif
+ }
+
+#ifdef ERTS_SMP
+ erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
+ erts_smp_cnd_init(&schdlr_sspnd.cnd);
+
+ schdlr_sspnd.changing = 0;
+ schdlr_sspnd.online = no_schedulers_online;
+ schdlr_sspnd.curr_online = no_schedulers;
+ erts_smp_atomic_init(&schdlr_sspnd.msb.ongoing, 0);
+ erts_smp_atomic_init(&schdlr_sspnd.active, no_schedulers);
+ schdlr_sspnd.msb.procs = NULL;
+ erts_smp_atomic_set(&balance_info.used_runqs,
+ erts_common_run_queue ? 1 : no_schedulers_online);
+ erts_smp_atomic_init(&balance_info.active_runqs, no_schedulers);
+ balance_info.last_active_runqs = no_schedulers;
+ erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
+ balance_info.forced_check_balance = 0;
+ balance_info.halftime = 1;
+ balance_info.full_reds_history_index = 0;
+ erts_smp_atomic_init(&balance_info.checking_balance, 0);
+ balance_info.prev_rise.active_runqs = 0;
+ balance_info.prev_rise.max_len = 0;
+ balance_info.prev_rise.reds = 0;
+ balance_info.n = 0;
+
+ if (no_schedulers_online < no_schedulers) {
+ if (erts_common_run_queue) {
+ for (ix = no_schedulers_online; ix < no_schedulers; ix++)
+ erts_smp_atomic_set(&(ERTS_SCHEDULER_IX(ix)->suspended), 1);
+ }
+ else {
+ for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix),
+ ERTS_RUNQ_IX(ix % no_schedulers_online));
+ }
+ }
+
+ schdlr_sspnd.wait_curr_online = no_schedulers_online;
+ schdlr_sspnd.curr_online *= 2; /* Boot strapping... */
+ schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+
+ erts_smp_atomic_init(&doing_sys_schedule, 0);
+
+#else /* !ERTS_SMP */
+ {
+ ErtsSchedulerData *esdp;
+ esdp = ERTS_SCHEDULER_IX(0);
+ erts_scheduler_data = esdp;
+#ifdef USE_THREADS
+ erts_tsd_set(sched_data_key, (void *) esdp);
+#endif
+ }
+ erts_no_schedulers = 1;
+#endif
+
+ erts_smp_atomic_init(&function_calls, 0);
+
+ /* init port tasks */
+ erts_port_task_init();
+
+ late_cpu_bind_init();
+}
+
+ErtsRunQueue *
+erts_schedid2runq(Uint id)
+{
+ int ix;
+ if (erts_common_run_queue)
+ return erts_common_run_queue;
+ ix = (int) id - 1;
+ ASSERT(0 <= ix && ix < erts_no_run_queues);
+ return ERTS_RUNQ_IX(ix);
+}
+
+#ifdef USE_THREADS
+
+ErtsSchedulerData *
+erts_get_scheduler_data(void)
+{
+ return (ErtsSchedulerData *) erts_tsd_get(sched_data_key);
+}
+
+#endif
+
+static int remove_proc_from_runq(ErtsRunQueue *rq, Process *p, int to_inactive);
+
+static ERTS_INLINE void
+suspend_process(ErtsRunQueue *rq, Process *p)
+{
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ p->rcount++; /* count number of suspend */
+#ifdef ERTS_SMP
+ ASSERT(!(p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING)
+ || p == erts_get_current_process());
+ ASSERT(p->status != P_RUNNING
+ || p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING);
+ if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ)
+ goto runable;
+#endif
+ switch(p->status) {
+ case P_SUSPENDED:
+ break;
+ case P_RUNABLE:
+#ifdef ERTS_SMP
+ runable:
+ if (!ERTS_PROC_PENDING_EXIT(p))
+#endif
+ remove_proc_from_runq(rq, p, 1);
+ /* else:
+ * leave process in schedq so it will discover the pending exit
+ */
+ p->rstatus = P_RUNABLE; /* wakeup as runnable */
+ break;
+ case P_RUNNING:
+ p->rstatus = P_RUNABLE; /* wakeup as runnable */
+ break;
+ case P_WAITING:
+ p->rstatus = P_WAITING; /* wakeup as waiting */
+ break;
+ case P_EXITING:
+ return; /* ignore this */
+ case P_GARBING:
+ case P_FREE:
+ erl_exit(1, "bad state in suspend_process()\n");
+ }
+
+ if ((erts_system_profile_flags.runnable_procs) && (p->rcount == 1) && (p->status != P_WAITING)) {
+ profile_runnable_proc(p, am_inactive);
+ }
+
+ p->status = P_SUSPENDED;
+
+}
+
+static ERTS_INLINE void
+resume_process(Process *p)
+{
+ Uint32 *statusp;
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ switch (p->status) {
+ case P_SUSPENDED:
+ statusp = &p->status;
+ break;
+ case P_GARBING:
+ if (p->gcstatus == P_SUSPENDED) {
+ statusp = &p->gcstatus;
+ break;
+ }
+ /* Fall through */
+ default:
+ return;
+ }
+
+ ASSERT(p->rcount > 0);
+
+ if (--p->rcount > 0) /* multiple suspend */
+ return;
+ switch(p->rstatus) {
+ case P_RUNABLE:
+ *statusp = P_WAITING; /* make erts_add_to_runq work */
+ erts_add_to_runq(p);
+ break;
+ case P_WAITING:
+ *statusp = P_WAITING;
+ break;
+ default:
+ erl_exit(1, "bad state in resume_process()\n");
+ }
+ p->rstatus = P_FREE;
+}
+
+#ifdef ERTS_SMP
+
+static void
+susp_sched_prep_block(void *unused)
+{
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+}
+
+static void
+susp_sched_resume_block(void *unused)
+{
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+}
+
+static void
+suspend_scheduler(ErtsSchedulerData *esdp)
+{
+ long no = (long) esdp->no;
+ ErtsRunQueue *rq = esdp->run_queue;
+ long active_schedulers;
+ int curr_online = 1;
+ int wake = 0;
+
+ /*
+ * Schedulers may be suspended in two different ways:
+ * - A scheduler may be suspended since it is not online.
+ * All schedulers with scheduler ids greater than
+ * schdlr_sspnd.online are suspended.
+ * - Multi scheduling is blocked. All schedulers except the
+ * scheduler with scheduler id 1 are suspended.
+ *
+ * Regardless of why a scheduler is suspended, it ends up here.
+ */
+
+ ASSERT(no != 1);
+
+ erts_smp_runq_unlock(esdp->run_queue);
+
+ /* Unbind from cpu */
+ erts_smp_rwmtx_rwlock(&erts_cpu_bind_rwmtx);
+ if (scheduler2cpu_map[esdp->no].bound_id >= 0
+ && erts_unbind_from_cpu(erts_cpuinfo) == 0) {
+ esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
+ }
+ erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_inactive);
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
+ ASSERT(active_schedulers >= 1);
+ if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_MULTI_SCHED) {
+ if (active_schedulers == schdlr_sspnd.msb.wait_active)
+ wake = 1;
+ if (active_schedulers == 1)
+ schdlr_sspnd.changing = 0;
+ }
+
+ while (1) {
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ int check_children;
+ erts_smp_runq_lock(esdp->run_queue);
+ check_children = esdp->check_children;
+ esdp->check_children = 0;
+ erts_smp_runq_unlock(esdp->run_queue);
+ if (check_children) {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_check_children();
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+#endif
+
+ if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE) {
+ int changed = 0;
+ if (no > schdlr_sspnd.online && curr_online) {
+ schdlr_sspnd.curr_online--;
+ curr_online = 0;
+ changed = 1;
+ }
+ else if (no <= schdlr_sspnd.online && !curr_online) {
+ schdlr_sspnd.curr_online++;
+ curr_online = 1;
+ changed = 1;
+ }
+ if (changed
+ && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
+ wake = 1;
+ if (schdlr_sspnd.online == schdlr_sspnd.curr_online)
+ schdlr_sspnd.changing = 0;
+ }
+
+ if (wake) {
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ wake = 0;
+ }
+
+
+ if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ|ERTS_RUNQ_FLG_SUSPENDED)))
+ break;
+ if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
+ && !erts_smp_atomic_read(&esdp->suspended))
+ break;
+
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ while (1) {
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ if (esdp->blocked_check_children)
+ break;
+#endif
+
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+
+ if (schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE)
+ break;
+
+ if (!(rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
+ | ERTS_RUNQ_FLG_SUSPENDED)))
+ break;
+ if ((rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
+ && !erts_smp_atomic_read(&esdp->suspended))
+ break;
+ }
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ esdp->blocked_check_children = 0;
+#endif
+
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ }
+
+ erts_smp_atomic_inc(&schdlr_sspnd.active);
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ if (erts_system_profile_flags.scheduler)
+ profile_scheduler(make_small(esdp->no), am_active);
+
+ erts_smp_runq_lock(esdp->run_queue);
+ non_empty_runq(esdp->run_queue);
+
+ /* Make sure we check if we should bind to a cpu or not... */
+ if (rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
+ erts_smp_atomic_set(&esdp->chk_cpu_bind, 1);
+ else
+ rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
+}
+
+#define ERTS_RUNQ_RESET_SUSPEND_INFO(RQ, DBG_ID) \
+do { \
+ int pix__; \
+ (RQ)->misc.evac_runq = NULL; \
+ (RQ)->ports.info.migrate.runq = NULL; \
+ (RQ)->flags &= ~(ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
+ | ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
+ | ERTS_RUNQ_FLGS_EVACUATE_QMASK \
+ | ERTS_RUNQ_FLG_SUSPENDED); \
+ (RQ)->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK \
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); \
+ (RQ)->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; \
+ erts_smp_atomic_band(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED); \
+ for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) { \
+ (RQ)->procs.prio_info[pix__].max_len = 0; \
+ (RQ)->procs.prio_info[pix__].reds = 0; \
+ ERTS_DBG_SET_INVALID_RUNQP((RQ)->procs.prio_info[pix__].migrate.runq,\
+ (DBG_ID)); \
+ } \
+ (RQ)->ports.info.max_len = 0; \
+ (RQ)->ports.info.reds = 0; \
+} while (0)
+
+#define ERTS_RUNQ_RESET_MIGRATION_PATHS__(RQ) \
+do { \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked((RQ))); \
+ (RQ)->misc.evac_runq = NULL; \
+ (RQ)->ports.info.migrate.runq = NULL; \
+ (RQ)->flags &= ~(ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
+ | ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
+ | ERTS_RUNQ_FLGS_EVACUATE_QMASK); \
+} while (0)
+
+#ifdef DEBUG
+#define ERTS_RUNQ_RESET_MIGRATION_PATHS(RQ, DBG_ID) \
+do { \
+ int pix__; \
+ ERTS_RUNQ_RESET_MIGRATION_PATHS__((RQ)); \
+ for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) \
+ ERTS_DBG_SET_INVALID_RUNQP((RQ)->procs.prio_info[pix__].migrate.runq,\
+ (DBG_ID)); \
+} while (0)
+#else
+#define ERTS_RUNQ_RESET_MIGRATION_PATHS(RQ, DBG_ID) \
+ ERTS_RUNQ_RESET_MIGRATION_PATHS__((RQ))
+#endif
+
+ErtsSchedSuspendResult
+erts_schedulers_state(Uint *total,
+ Uint *online,
+ Uint *active,
+ int yield_allowed)
+{
+ int res;
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ if (yield_allowed && schdlr_sspnd.changing)
+ res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
+ else {
+ *active = *online = schdlr_sspnd.online;
+ if (ongoing_multi_scheduling_block())
+ *active = 1;
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ *total = erts_no_schedulers;
+ return res;
+}
+
+ErtsSchedSuspendResult
+erts_set_schedulers_online(Process *p,
+ ErtsProcLocks plocks,
+ Sint new_no,
+ Sint *old_no)
+{
+ int ix, res, no, have_unlocked_plocks;
+
+ if (new_no < 1 || erts_no_schedulers < new_no)
+ return ERTS_SCHDLR_SSPND_EINVAL;
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ have_unlocked_plocks = 0;
+ no = (int) new_no;
+
+ if (schdlr_sspnd.changing) {
+ res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
+ }
+ else {
+ int online = *old_no = schdlr_sspnd.online;
+ if (no == schdlr_sspnd.online) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ else {
+ schdlr_sspnd.changing = ERTS_SCHED_CHANGING_ONLINE;
+ schdlr_sspnd.online = no;
+ if (no > online) {
+ int ix;
+ schdlr_sspnd.wait_curr_online = no;
+ if (ongoing_multi_scheduling_block())
+ /* No schedulers to resume */;
+ else if (erts_common_run_queue) {
+ for (ix = online; ix < no; ix++)
+ erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
+ 0);
+ }
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+ for (ix = online; ix < no; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x5);
+ erts_smp_runq_unlock(rq);
+ }
+ /*
+ * Spread evacuation paths among all online
+ * run queues.
+ */
+ for (ix = no; ix < erts_no_run_queues; ix++) {
+ ErtsRunQueue *from_rq = ERTS_RUNQ_IX(ix);
+ ErtsRunQueue *to_rq = ERTS_RUNQ_IX(ix % no);
+ evacuate_run_queue(from_rq, to_rq);
+ }
+ erts_smp_atomic_set(&balance_info.used_runqs, no);
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ else /* if (no < online) */ {
+ if (p->scheduler_data->no <= no) {
+ res = ERTS_SCHDLR_SSPND_DONE;
+ schdlr_sspnd.wait_curr_online = no;
+ }
+ else {
+ /*
+ * Yield! Current process needs to migrate
+ * before bif returns.
+ */
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE;
+ schdlr_sspnd.wait_curr_online = no+1;
+ }
+
+ if (ongoing_multi_scheduling_block())
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ else if (erts_common_run_queue) {
+ for (ix = no; ix < online; ix++)
+ erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended,
+ 1);
+ wake_all_schedulers();
+ }
+ else {
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+
+ for (ix = 0; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x6);
+ erts_smp_runq_unlock(rq);
+ }
+ /*
+ * Evacutation order important! Newly suspended run queues
+ * has to be evacuated last.
+ */
+ for (ix = erts_no_run_queues-1; ix >= no; ix--)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix),
+ ERTS_RUNQ_IX(ix % no));
+ erts_smp_atomic_set(&balance_info.used_runqs, no);
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ ERTS_FOREACH_OP_RUNQ(rq, wake_scheduler(rq, 0));
+ }
+ }
+
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ }
+ }
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (have_unlocked_plocks)
+ erts_smp_proc_lock(p, plocks);
+
+ return res;
+}
+
+ErtsSchedSuspendResult
+erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
+{
+ int ix, res, have_unlocked_plocks = 0;
+ ErtsProcList *plp;
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ if (on) {
+ if (schdlr_sspnd.changing) {
+ res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
+ }
+ else if (erts_is_multi_scheduling_blocked()) {
+ plp = proclist_create(p);
+ plp->next = schdlr_sspnd.msb.procs;
+ schdlr_sspnd.msb.procs = plp;
+ p->flags |= F_HAVE_BLCKD_MSCHED;
+ ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ ASSERT(p->scheduler_data->no == 1);
+ res = 1;
+ }
+ else {
+ p->flags |= F_HAVE_BLCKD_MSCHED;
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 1);
+ if (schdlr_sspnd.online == 1) {
+ res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
+ ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ ASSERT(p->scheduler_data->no == 1);
+ }
+ else {
+ schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+ if (p->scheduler_data->no == 1) {
+ res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
+ schdlr_sspnd.msb.wait_active = 1;
+ }
+ else {
+ /*
+ * Yield! Current process needs to migrate
+ * before bif returns.
+ */
+ res = ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED;
+ schdlr_sspnd.msb.wait_active = 2;
+ }
+ if (erts_common_run_queue) {
+ for (ix = 1; ix < schdlr_sspnd.online; ix++)
+ erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 1);
+ wake_all_schedulers();
+ }
+ else {
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+ erts_smp_atomic_set(&balance_info.used_runqs, 1);
+ for (ix = 0; ix < schdlr_sspnd.online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
+ erts_smp_runq_unlock(rq);
+ }
+ /*
+ * Evacuate all activities in all other run queues
+ * into the first run queue. Note order is important,
+ * online run queues has to be evacuated last.
+ */
+ for (ix = erts_no_run_queues-1; ix >= 1; ix--)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(0));
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ while (erts_smp_atomic_read(&schdlr_sspnd.active)
+ != schdlr_sspnd.msb.wait_active)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ }
+ plp = proclist_create(p);
+ plp->next = schdlr_sspnd.msb.procs;
+ schdlr_sspnd.msb.procs = plp;
+#ifdef DEBUG
+ ERTS_FOREACH_RUNQ(srq,
+ {
+ if (srq != ERTS_RUNQ_IX(0)) {
+ ASSERT(ERTS_EMPTY_RUNQ(srq));
+ ASSERT(srq->flags & ERTS_RUNQ_FLG_SUSPENDED);
+ }
+ });
+#endif
+ ASSERT(p->scheduler_data);
+ }
+ }
+ else if (!ongoing_multi_scheduling_block()) {
+ ASSERT(!schdlr_sspnd.msb.procs);
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ else {
+ if (p->flags & F_HAVE_BLCKD_MSCHED) {
+ ErtsProcList **plpp = &schdlr_sspnd.msb.procs;
+ plp = schdlr_sspnd.msb.procs;
+
+ while (plp) {
+ if (!proclist_same(plp, p)){
+ plpp = &plp->next;
+ plp = plp->next;
+ }
+ else {
+ *plpp = plp->next;
+ proclist_destroy(plp);
+ if (!all)
+ break;
+ plp = *plpp;
+ }
+ }
+ }
+ if (schdlr_sspnd.msb.procs)
+ res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
+ else {
+ schdlr_sspnd.changing = ERTS_SCHED_CHANGING_MULTI_SCHED;
+#ifdef DEBUG
+ ERTS_FOREACH_RUNQ(rq,
+ {
+ if (rq != p->scheduler_data->run_queue) {
+ if (!ERTS_EMPTY_RUNQ(rq)) {
+ Process *rp;
+ int pix;
+ ASSERT(rq->ports.info.len == 0);
+ for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
+ for (rp = rq->procs.prio[pix].first;
+ rp;
+ rp = rp->next) {
+ ASSERT(rp->bound_runq);
+ }
+ }
+ }
+
+ ASSERT(rq->flags & ERTS_RUNQ_FLG_SUSPENDED);
+ }
+ });
+#endif
+ p->flags &= ~F_HAVE_BLCKD_MSCHED;
+ erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 0);
+ if (schdlr_sspnd.online == 1)
+ /* No schedulers to resume */;
+ else if (erts_common_run_queue) {
+ for (ix = 1; ix < schdlr_sspnd.online; ix++)
+ erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->suspended, 0);
+ wake_all_schedulers();
+ }
+ else {
+ int online = schdlr_sspnd.online;
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (plocks) {
+ have_unlocked_plocks = 1;
+ erts_smp_proc_unlock(p, plocks);
+ }
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+
+ /* Resume all online run queues */
+ for (ix = 1; ix < online; ix++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
+ erts_smp_runq_lock(rq);
+ ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x4);
+ erts_smp_runq_unlock(rq);
+ }
+
+ /* Spread evacuation paths among all online run queues */
+ for (ix = online; ix < erts_no_run_queues; ix++)
+ evacuate_run_queue(ERTS_RUNQ_IX(ix),
+ ERTS_RUNQ_IX(ix % online));
+
+ erts_smp_atomic_set(&balance_info.used_runqs, online);
+ /* Make sure that we balance soon... */
+ balance_info.forced_check_balance = 1;
+ erts_smp_runq_lock(ERTS_RUNQ_IX(0));
+ ERTS_RUNQ_IX(0)->check_balance_reds = 0;
+ erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ }
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ schdlr_sspnd.changing = 0;
+ res = ERTS_SCHDLR_SSPND_DONE;
+ }
+ }
+
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ if (have_unlocked_plocks)
+ erts_smp_proc_lock(p, plocks);
+ return res;
+}
+
+#ifdef DEBUG
+void
+erts_dbg_multi_scheduling_return_trap(Process *p, Eterm return_value)
+{
+ if (return_value == am_blocked) {
+ long active = erts_smp_atomic_read(&schdlr_sspnd.active);
+ ASSERT(1 <= active && active <= 2);
+ ASSERT(ERTS_PROC_GET_SCHDATA(p)->no == 1);
+ }
+}
+#endif
+
+int
+erts_is_multi_scheduling_blocked(void)
+{
+ return (erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing)
+ && erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+}
+
+Eterm
+erts_multi_scheduling_blockers(Process *p)
+{
+ Eterm res = NIL;
+
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ if (erts_is_multi_scheduling_blocked()) {
+ Eterm *hp, *hp_end;
+ ErtsProcList *plp1, *plp2;
+ Uint max_size;
+ ASSERT(schdlr_sspnd.msb.procs);
+ for (max_size = 0, plp1 = schdlr_sspnd.msb.procs;
+ plp1;
+ plp1 = plp1->next) {
+ max_size += 2;
+ }
+ ASSERT(max_size);
+ hp = HAlloc(p, max_size);
+ hp_end = hp + max_size;
+ for (plp1 = schdlr_sspnd.msb.procs; plp1; plp1 = plp1->next) {
+ for (plp2 = schdlr_sspnd.msb.procs;
+ plp2->pid != plp1->pid;
+ plp2 = plp2->next);
+ if (plp2 == plp1) {
+ res = CONS(hp, plp1->pid, res);
+ hp += 2;
+ }
+ /* else: already in result list */
+ }
+ HRelease(p, hp_end, hp);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+ return res;
+}
+
+static void *
+sched_thread_func(void *vesdp)
+{
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ char buf[31];
+ Uint no = ((ErtsSchedulerData *) vesdp)->no;
+ erts_snprintf(&buf[0], 31, "scheduler %bpu", no);
+ erts_lc_set_thread_name(&buf[0]);
+ }
+#endif
+ erts_alloc_reg_scheduler_id(((ErtsSchedulerData *) vesdp)->no);
+ erts_tsd_set(sched_data_key, vesdp);
+#ifdef ERTS_SMP
+ erts_proc_lock_prepare_proc_lock_waiter();
+#endif
+ erts_register_blockable_thread();
+#ifdef HIPE
+ hipe_thread_signal_init();
+#endif
+ erts_thread_init_float();
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ ASSERT(schdlr_sspnd.changing == ERTS_SCHED_CHANGING_ONLINE);
+
+ schdlr_sspnd.curr_online--;
+
+ if (((ErtsSchedulerData *) vesdp)->no != 1) {
+ if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
+ schdlr_sspnd.changing = 0;
+ erts_smp_cnd_broadcast(&schdlr_sspnd.cnd);
+ }
+ }
+ else if (schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
+ schdlr_sspnd.changing = 0;
+ else {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ while (schdlr_sspnd.curr_online != schdlr_sspnd.wait_curr_online)
+ erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
+ susp_sched_prep_block,
+ susp_sched_resume_block,
+ NULL);
+ ASSERT(!schdlr_sspnd.changing);
+ }
+ erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
+
+ process_main();
+ /* No schedulers should *ever* terminate */
+ erl_exit(ERTS_ABORT_EXIT, "Scheduler thread number %bpu terminated\n",
+ ((ErtsSchedulerData *) vesdp)->no);
+ return NULL;
+}
+
+void
+erts_start_schedulers(void)
+{
+ int res = 0;
+ Uint actual = 0;
+ Uint wanted = erts_no_schedulers;
+ Uint wanted_no_schedulers = erts_no_schedulers;
+ ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
+
+ opts.detached = 1;
+ opts.suggested_stack_size = erts_sched_thread_suggested_stack_size;
+
+ if (wanted < 1)
+ wanted = 1;
+ if (wanted > ERTS_MAX_NO_OF_SCHEDULERS) {
+ wanted = ERTS_MAX_NO_OF_SCHEDULERS;
+ res = ENOTSUP;
+ }
+
+ erts_block_system(0);
+
+ while (actual < wanted) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
+ actual++;
+ ASSERT(actual == esdp->no);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ res = erts_lcnt_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts);
+#else
+ res = ethr_thr_create(&esdp->tid,sched_thread_func,(void*)esdp,&opts);
+#endif
+ if (res != 0) {
+ actual--;
+ break;
+ }
+ }
+
+ erts_no_schedulers = actual;
+ erts_release_system();
+
+ if (actual < 1)
+ erl_exit(1,
+ "Failed to create any scheduler-threads: %s (%d)\n",
+ erl_errno_id(res),
+ res);
+ if (res != 0) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ ASSERT(actual != wanted_no_schedulers);
+ erts_dsprintf(dsbufp,
+ "Failed to create %bpu scheduler-threads (%s:%d); "
+ "only %bpu scheduler-thread%s created.\n",
+ wanted_no_schedulers, erl_errno_id(res), res,
+ actual, actual == 1 ? " was" : "s were");
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
+}
+
+#endif /* ERTS_SMP */
+
+static int
+int_cmp(const void *vx, const void *vy)
+{
+ return *((int *) vx) - *((int *) vy);
+}
+
+static int
+cpu_spread_order_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ if (x->core != y->core)
+ return x->core - y->core;
+ if (x->processor_node != y->processor_node)
+ return x->processor_node - y->processor_node;
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ if (x->node != y->node)
+ return x->node - y->node;
+ return 0;
+}
+
+static int
+cpu_processor_spread_order_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ if (x->processor_node != y->processor_node)
+ return x->processor_node - y->processor_node;
+ if (x->core != y->core)
+ return x->core - y->core;
+ if (x->node != y->node)
+ return x->node - y->node;
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ return 0;
+}
+
+static int
+cpu_thread_spread_order_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ if (x->node != y->node)
+ return x->node - y->node;
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ if (x->processor_node != y->processor_node)
+ return x->processor_node - y->processor_node;
+ if (x->core != y->core)
+ return x->core - y->core;
+ return 0;
+}
+
+static int
+cpu_thread_no_node_processor_spread_order_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ if (x->node != y->node)
+ return x->node - y->node;
+ if (x->core != y->core)
+ return x->core - y->core;
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ return 0;
+}
+
+static int
+cpu_no_node_processor_spread_order_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->node != y->node)
+ return x->node - y->node;
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ if (x->core != y->core)
+ return x->core - y->core;
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ return 0;
+}
+
+static int
+cpu_no_node_thread_spread_order_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->node != y->node)
+ return x->node - y->node;
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ if (x->core != y->core)
+ return x->core - y->core;
+ return 0;
+}
+
+static int
+cpu_no_spread_order_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->node != y->node)
+ return x->node - y->node;
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ if (x->processor_node != y->processor_node)
+ return x->processor_node - y->processor_node;
+ if (x->core != y->core)
+ return x->core - y->core;
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ return 0;
+}
+
+static ERTS_INLINE void
+make_cpudata_id_seq(erts_cpu_topology_t *cpudata, int size, int no_node)
+{
+ int ix;
+ int node = -1;
+ int processor = -1;
+ int processor_node = -1;
+ int processor_node_node = -1;
+ int core = -1;
+ int thread = -1;
+ int old_node = -1;
+ int old_processor = -1;
+ int old_processor_node = -1;
+ int old_core = -1;
+ int old_thread = -1;
+
+ for (ix = 0; ix < size; ix++) {
+ if (!no_node || cpudata[ix].node >= 0) {
+ if (old_node == cpudata[ix].node)
+ cpudata[ix].node = node;
+ else {
+ old_node = cpudata[ix].node;
+ old_processor = processor = -1;
+ if (!no_node)
+ old_processor_node = processor_node = -1;
+ old_core = core = -1;
+ old_thread = thread = -1;
+ if (no_node || cpudata[ix].node >= 0)
+ cpudata[ix].node = ++node;
+ }
+ }
+ if (old_processor == cpudata[ix].processor)
+ cpudata[ix].processor = processor;
+ else {
+ old_processor = cpudata[ix].processor;
+ if (!no_node)
+ processor_node_node = old_processor_node = processor_node = -1;
+ old_core = core = -1;
+ old_thread = thread = -1;
+ cpudata[ix].processor = ++processor;
+ }
+ if (no_node && cpudata[ix].processor_node < 0)
+ old_processor_node = -1;
+ else {
+ if (old_processor_node == cpudata[ix].processor_node) {
+ if (no_node)
+ cpudata[ix].node = cpudata[ix].processor_node = node;
+ else {
+ if (processor_node_node >= 0)
+ cpudata[ix].node = processor_node_node;
+ cpudata[ix].processor_node = processor_node;
+ }
+ }
+ else {
+ old_processor_node = cpudata[ix].processor_node;
+ old_core = core = -1;
+ old_thread = thread = -1;
+ if (no_node)
+ cpudata[ix].node = cpudata[ix].processor_node = ++node;
+ else {
+ cpudata[ix].node = processor_node_node = ++node;
+ cpudata[ix].processor_node = ++processor_node;
+ }
+ }
+ }
+ if (!no_node && cpudata[ix].processor_node < 0)
+ cpudata[ix].processor_node = 0;
+ if (old_core == cpudata[ix].core)
+ cpudata[ix].core = core;
+ else {
+ old_core = cpudata[ix].core;
+ old_thread = thread = -1;
+ cpudata[ix].core = ++core;
+ }
+ if (old_thread == cpudata[ix].thread)
+ cpudata[ix].thread = thread;
+ else
+ old_thread = cpudata[ix].thread = ++thread;
+ }
+}
+
+static void
+cpu_bind_order_sort(erts_cpu_topology_t *cpudata,
+ int size,
+ ErtsCpuBindOrder bind_order,
+ int mk_seq)
+{
+ if (size > 1) {
+ int no_node = 0;
+ int (*cmp_func)(const void *, const void *);
+ switch (bind_order) {
+ case ERTS_CPU_BIND_SPREAD:
+ cmp_func = cpu_spread_order_cmp;
+ break;
+ case ERTS_CPU_BIND_PROCESSOR_SPREAD:
+ cmp_func = cpu_processor_spread_order_cmp;
+ break;
+ case ERTS_CPU_BIND_THREAD_SPREAD:
+ cmp_func = cpu_thread_spread_order_cmp;
+ break;
+ case ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD:
+ no_node = 1;
+ cmp_func = cpu_thread_no_node_processor_spread_order_cmp;
+ break;
+ case ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD:
+ no_node = 1;
+ cmp_func = cpu_no_node_processor_spread_order_cmp;
+ break;
+ case ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD:
+ no_node = 1;
+ cmp_func = cpu_no_node_thread_spread_order_cmp;
+ break;
+ case ERTS_CPU_BIND_NO_SPREAD:
+ cmp_func = cpu_no_spread_order_cmp;
+ break;
+ default:
+ cmp_func = NULL;
+ erl_exit(ERTS_ABORT_EXIT,
+ "Bad cpu bind type: %d\n",
+ (int) cpu_bind_order);
+ break;
+ }
+
+ if (mk_seq)
+ make_cpudata_id_seq(cpudata, size, no_node);
+
+ qsort(cpudata, size, sizeof(erts_cpu_topology_t), cmp_func);
+ }
+}
+
+static int
+processor_order_cmp(const void *vx, const void *vy)
+{
+ erts_cpu_topology_t *x = (erts_cpu_topology_t *) vx;
+ erts_cpu_topology_t *y = (erts_cpu_topology_t *) vy;
+
+ if (x->processor != y->processor)
+ return x->processor - y->processor;
+ if (x->node != y->node)
+ return x->node - y->node;
+ if (x->processor_node != y->processor_node)
+ return x->processor_node - y->processor_node;
+ if (x->core != y->core)
+ return x->core - y->core;
+ if (x->thread != y->thread)
+ return x->thread - y->thread;
+ return 0;
+}
+
+static void
+check_cpu_bind(ErtsSchedulerData *esdp)
+{
+ int res;
+ int cpu_id;
+ erts_smp_runq_unlock(esdp->run_queue);
+ erts_smp_rwmtx_rwlock(&erts_cpu_bind_rwmtx);
+ cpu_id = scheduler2cpu_map[esdp->no].bind_id;
+ if (cpu_id >= 0 && cpu_id != scheduler2cpu_map[esdp->no].bound_id) {
+ res = erts_bind_to_cpu(erts_cpuinfo, cpu_id);
+ if (res == 0)
+ esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = cpu_id;
+ else {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Scheduler %d failed to bind to cpu %d: %s\n",
+ (int) esdp->no, cpu_id, erl_errno_id(-res));
+ erts_send_error_to_logger_nogl(dsbufp);
+ if (scheduler2cpu_map[esdp->no].bound_id >= 0)
+ goto unbind;
+ }
+ }
+ else if (cpu_id < 0 && scheduler2cpu_map[esdp->no].bound_id >= 0) {
+ unbind:
+ /* Get rid of old binding */
+ res = erts_unbind_from_cpu(erts_cpuinfo);
+ if (res == 0)
+ esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1;
+ else {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Scheduler %d failed to unbind from cpu %d: %s\n",
+ (int) esdp->no, cpu_id, erl_errno_id(-res));
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
+ }
+ erts_smp_runq_lock(esdp->run_queue);
+#ifdef ERTS_SMP
+ if (erts_common_run_queue)
+ erts_smp_atomic_set(&esdp->chk_cpu_bind, 0);
+ else {
+ esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ }
+#endif
+ erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+
+}
+
+static void
+signal_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
+{
+ int s_ix = 1;
+ int cpu_ix;
+
+ if (cpu_bind_order != ERTS_CPU_BIND_NONE) {
+
+ cpu_bind_order_sort(cpudata, size, cpu_bind_order, 1);
+
+ for (cpu_ix = 0; cpu_ix < size && cpu_ix < erts_no_schedulers; cpu_ix++)
+ if (erts_is_cpu_available(erts_cpuinfo, cpudata[cpu_ix].logical))
+ scheduler2cpu_map[s_ix++].bind_id = cpudata[cpu_ix].logical;
+ }
+
+ if (s_ix <= erts_no_schedulers)
+ for (; s_ix <= erts_no_schedulers; s_ix++)
+ scheduler2cpu_map[s_ix].bind_id = -1;
+
+#ifdef ERTS_SMP
+ if (erts_common_run_queue) {
+ for (s_ix = 0; s_ix < erts_no_schedulers; s_ix++)
+ erts_smp_atomic_set(&ERTS_SCHEDULER_IX(s_ix)->chk_cpu_bind, 1);
+ wake_all_schedulers();
+ }
+ else {
+ ERTS_FOREACH_RUNQ(rq,
+ {
+ rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ wake_scheduler(rq, 0);
+ });
+ }
+#else
+ check_cpu_bind(erts_get_scheduler_data());
+#endif
+}
+
+int
+erts_init_scheduler_bind_type(char *how)
+{
+ if (erts_bind_to_cpu(erts_cpuinfo, -1) == -ENOTSUP)
+ return ERTS_INIT_SCHED_BIND_TYPE_NOT_SUPPORTED;
+
+ if (!system_cpudata && !user_cpudata)
+ return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY;
+
+ if (sys_strcmp(how, "s") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_SPREAD;
+ else if (sys_strcmp(how, "ps") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD;
+ else if (sys_strcmp(how, "ts") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD;
+ else if (sys_strcmp(how, "db") == 0
+ || sys_strcmp(how, "tnnps") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD;
+ else if (sys_strcmp(how, "nnps") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD;
+ else if (sys_strcmp(how, "nnts") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD;
+ else if (sys_strcmp(how, "ns") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_NO_SPREAD;
+ else if (sys_strcmp(how, "u") == 0)
+ cpu_bind_order = ERTS_CPU_BIND_NONE;
+ else
+ return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_BAD_TYPE;
+
+ return ERTS_INIT_SCHED_BIND_TYPE_SUCCESS;
+}
+
+typedef struct {
+ int *id;
+ int used;
+ int size;
+} ErtsCpuTopIdSeq;
+
+typedef struct {
+ ErtsCpuTopIdSeq logical;
+ ErtsCpuTopIdSeq thread;
+ ErtsCpuTopIdSeq core;
+ ErtsCpuTopIdSeq processor_node;
+ ErtsCpuTopIdSeq processor;
+ ErtsCpuTopIdSeq node;
+} ErtsCpuTopEntry;
+
+static void
+init_cpu_top_entry(ErtsCpuTopEntry *cte)
+{
+ int size = 10;
+ cte->logical.id = erts_alloc(ERTS_ALC_T_TMP_CPU_IDS,
+ sizeof(int)*size);
+ cte->logical.size = size;
+ cte->thread.id = erts_alloc(ERTS_ALC_T_TMP_CPU_IDS,
+ sizeof(int)*size);
+ cte->thread.size = size;
+ cte->core.id = erts_alloc(ERTS_ALC_T_TMP_CPU_IDS,
+ sizeof(int)*size);
+ cte->core.size = size;
+ cte->processor_node.id = erts_alloc(ERTS_ALC_T_TMP_CPU_IDS,
+ sizeof(int)*size);
+ cte->processor_node.size = size;
+ cte->processor.id = erts_alloc(ERTS_ALC_T_TMP_CPU_IDS,
+ sizeof(int)*size);
+ cte->processor.size = size;
+ cte->node.id = erts_alloc(ERTS_ALC_T_TMP_CPU_IDS,
+ sizeof(int)*size);
+ cte->node.size = size;
+}
+
+static void
+destroy_cpu_top_entry(ErtsCpuTopEntry *cte)
+{
+ erts_free(ERTS_ALC_T_TMP_CPU_IDS, cte->logical.id);
+ erts_free(ERTS_ALC_T_TMP_CPU_IDS, cte->thread.id);
+ erts_free(ERTS_ALC_T_TMP_CPU_IDS, cte->core.id);
+ erts_free(ERTS_ALC_T_TMP_CPU_IDS, cte->processor_node.id);
+ erts_free(ERTS_ALC_T_TMP_CPU_IDS, cte->processor.id);
+ erts_free(ERTS_ALC_T_TMP_CPU_IDS, cte->node.id);
+}
+
+static int
+get_cput_value_or_range(int *v, int *vr, char **str)
+{
+ long l;
+ char *c = *str;
+ errno = 0;
+ if (!isdigit((unsigned char)*c))
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID;
+ l = strtol(c, &c, 10);
+ if (errno != 0 || l < 0 || ERTS_MAX_CPU_TOPOLOGY_ID < l)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID;
+ *v = (int) l;
+ if (*c == '-') {
+ c++;
+ if (!isdigit((unsigned char)*c))
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE;
+ l = strtol(c, &c, 10);
+ if (errno != 0 || l < 0 || ERTS_MAX_CPU_TOPOLOGY_ID < l)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE;
+ *vr = (int) l;
+ }
+ *str = c;
+ return ERTS_INIT_CPU_TOPOLOGY_OK;
+}
+
+static int
+get_cput_id_seq(ErtsCpuTopIdSeq *idseq, char **str)
+{
+ int ix = 0;
+ int need_size = 0;
+ char *c = *str;
+
+ while (1) {
+ int res;
+ int val;
+ int nids;
+ int val_range = -1;
+ res = get_cput_value_or_range(&val, &val_range, &c);
+ if (res != ERTS_INIT_CPU_TOPOLOGY_OK)
+ return res;
+ if (val_range < 0 || val_range == val)
+ nids = 1;
+ else {
+ if (val_range > val)
+ nids = val_range - val + 1;
+ else
+ nids = val - val_range + 1;
+ }
+ need_size += nids;
+ if (need_size > idseq->size) {
+ idseq->size = need_size + 10;
+ idseq->id = erts_realloc(ERTS_ALC_T_TMP_CPU_IDS,
+ idseq->id,
+ sizeof(int)*idseq->size);
+ }
+ if (nids == 1)
+ idseq->id[ix++] = val;
+ else if (val_range > val) {
+ for (; val <= val_range; val++)
+ idseq->id[ix++] = val;
+ }
+ else {
+ for (; val >= val_range; val--)
+ idseq->id[ix++] = val;
+ }
+ if (*c != ',')
+ break;
+ c++;
+ }
+ *str = c;
+ idseq->used = ix;
+ return ERTS_INIT_CPU_TOPOLOGY_OK;
+}
+
+static int
+get_cput_entry(ErtsCpuTopEntry *cput, char **str)
+{
+ int h;
+ char *c = *str;
+
+ cput->logical.used = 0;
+ cput->thread.id[0] = 0;
+ cput->thread.used = 1;
+ cput->core.id[0] = 0;
+ cput->core.used = 1;
+ cput->processor_node.id[0] = -1;
+ cput->processor_node.used = 1;
+ cput->processor.id[0] = 0;
+ cput->processor.used = 1;
+ cput->node.id[0] = -1;
+ cput->node.used = 1;
+
+ h = ERTS_TOPOLOGY_MAX_DEPTH;
+ while (*c != ':' && *c != '\0') {
+ int res;
+ ErtsCpuTopIdSeq *idseqp;
+ switch (*c++) {
+ case 'L':
+ if (h <= ERTS_TOPOLOGY_LOGICAL)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_HIERARCHY;
+ idseqp = &cput->logical;
+ h = ERTS_TOPOLOGY_LOGICAL;
+ break;
+ case 't':
+ case 'T':
+ if (h <= ERTS_TOPOLOGY_THREAD)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_HIERARCHY;
+ idseqp = &cput->thread;
+ h = ERTS_TOPOLOGY_THREAD;
+ break;
+ case 'c':
+ case 'C':
+ if (h <= ERTS_TOPOLOGY_CORE)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_HIERARCHY;
+ idseqp = &cput->core;
+ h = ERTS_TOPOLOGY_CORE;
+ break;
+ case 'p':
+ case 'P':
+ if (h <= ERTS_TOPOLOGY_PROCESSOR)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_HIERARCHY;
+ idseqp = &cput->processor;
+ h = ERTS_TOPOLOGY_PROCESSOR;
+ break;
+ case 'n':
+ case 'N':
+ if (h <= ERTS_TOPOLOGY_PROCESSOR) {
+ do_node:
+ if (h <= ERTS_TOPOLOGY_NODE)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_HIERARCHY;
+ idseqp = &cput->node;
+ h = ERTS_TOPOLOGY_NODE;
+ }
+ else {
+ int p_node = 0;
+ char *p_chk = c;
+ while (*p_chk != '\0' && *p_chk != ':') {
+ if (*p_chk == 'p' || *p_chk == 'P') {
+ p_node = 1;
+ break;
+ }
+ p_chk++;
+ }
+ if (!p_node)
+ goto do_node;
+ if (h <= ERTS_TOPOLOGY_PROCESSOR_NODE)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_HIERARCHY;
+ idseqp = &cput->processor_node;
+ h = ERTS_TOPOLOGY_PROCESSOR_NODE;
+ }
+ break;
+ default:
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_TYPE;
+ }
+ res = get_cput_id_seq(idseqp, &c);
+ if (res != ERTS_INIT_CPU_TOPOLOGY_OK)
+ return res;
+ }
+
+ if (cput->logical.used < 1)
+ return ERTS_INIT_CPU_TOPOLOGY_MISSING_LID;
+
+ if (*c == ':') {
+ c++;
+ }
+
+ if (cput->thread.used != 1
+ && cput->thread.used != cput->logical.used)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE;
+ if (cput->core.used != 1
+ && cput->core.used != cput->logical.used)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE;
+ if (cput->processor_node.used != 1
+ && cput->processor_node.used != cput->logical.used)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE;
+ if (cput->processor.used != 1
+ && cput->processor.used != cput->logical.used)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE;
+ if (cput->node.used != 1
+ && cput->node.used != cput->logical.used)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE;
+
+ *str = c;
+ return ERTS_INIT_CPU_TOPOLOGY_OK;
+}
+
+static int
+verify_topology(erts_cpu_topology_t *cpudata, int size)
+{
+ if (size > 0) {
+ int *logical;
+ int node, processor, no_nodes, i;
+
+ /* Verify logical ids */
+ logical = erts_alloc(ERTS_ALC_T_TMP, sizeof(int)*size);
+
+ for (i = 0; i < user_cpudata_size; i++)
+ logical[i] = user_cpudata[i].logical;
+
+ qsort(logical, user_cpudata_size, sizeof(int), int_cmp);
+ for (i = 0; i < user_cpudata_size-1; i++) {
+ if (logical[i] == logical[i+1]) {
+ erts_free(ERTS_ALC_T_TMP, logical);
+ return ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_LIDS;
+ }
+ }
+
+ erts_free(ERTS_ALC_T_TMP, logical);
+
+ qsort(cpudata, size, sizeof(erts_cpu_topology_t), processor_order_cmp);
+
+ /* Verify unique entities */
+
+ for (i = 1; i < user_cpudata_size; i++) {
+ if (user_cpudata[i-1].processor == user_cpudata[i].processor
+ && user_cpudata[i-1].node == user_cpudata[i].node
+ && (user_cpudata[i-1].processor_node
+ == user_cpudata[i].processor_node)
+ && user_cpudata[i-1].core == user_cpudata[i].core
+ && user_cpudata[i-1].thread == user_cpudata[i].thread) {
+ return ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_ENTITIES;
+ }
+ }
+
+ /* Verify numa nodes */
+ node = cpudata[0].node;
+ processor = cpudata[0].processor;
+ no_nodes = cpudata[0].node < 0 && cpudata[0].processor_node < 0;
+ for (i = 1; i < size; i++) {
+ if (no_nodes) {
+ if (cpudata[i].node >= 0 || cpudata[i].processor_node >= 0)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_NODES;
+ }
+ else {
+ if (cpudata[i].processor == processor && cpudata[i].node != node)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_NODES;
+ node = cpudata[i].node;
+ processor = cpudata[i].processor;
+ if (node >= 0 && cpudata[i].processor_node >= 0)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_NODES;
+ if (node < 0 && cpudata[i].processor_node < 0)
+ return ERTS_INIT_CPU_TOPOLOGY_INVALID_NODES;
+ }
+ }
+ }
+
+ return ERTS_INIT_CPU_TOPOLOGY_OK;
+}
+
+int
+erts_init_cpu_topology(char *topology_str)
+{
+ ErtsCpuTopEntry cput;
+ int need_size;
+ char *c;
+ int ix;
+ int error = ERTS_INIT_CPU_TOPOLOGY_OK;
+
+ if (user_cpudata)
+ erts_free(ERTS_ALC_T_CPUDATA, user_cpudata);
+ user_cpudata_size = 10;
+
+ user_cpudata = erts_alloc(ERTS_ALC_T_CPUDATA,
+ (sizeof(erts_cpu_topology_t)
+ * user_cpudata_size));
+
+ init_cpu_top_entry(&cput);
+
+ ix = 0;
+ need_size = 0;
+
+ c = topology_str;
+ if (*c == '\0') {
+ error = ERTS_INIT_CPU_TOPOLOGY_MISSING;
+ goto fail;
+ }
+ do {
+ int r;
+ error = get_cput_entry(&cput, &c);
+ if (error != ERTS_INIT_CPU_TOPOLOGY_OK)
+ goto fail;
+ need_size += cput.logical.used;
+ if (user_cpudata_size < need_size) {
+ user_cpudata_size = need_size + 10;
+ user_cpudata = erts_realloc(ERTS_ALC_T_CPUDATA,
+ user_cpudata,
+ (sizeof(erts_cpu_topology_t)
+ * user_cpudata_size));
+ }
+
+ ASSERT(cput.thread.used == 1
+ || cput.thread.used == cput.logical.used);
+ ASSERT(cput.core.used == 1
+ || cput.core.used == cput.logical.used);
+ ASSERT(cput.processor_node.used == 1
+ || cput.processor_node.used == cput.logical.used);
+ ASSERT(cput.processor.used == 1
+ || cput.processor.used == cput.logical.used);
+ ASSERT(cput.node.used == 1
+ || cput.node.used == cput.logical.used);
+
+ for (r = 0; r < cput.logical.used; r++) {
+ user_cpudata[ix].logical = cput.logical.id[r];
+ user_cpudata[ix].thread =
+ cput.thread.id[cput.thread.used == 1 ? 0 : r];
+ user_cpudata[ix].core =
+ cput.core.id[cput.core.used == 1 ? 0 : r];
+ user_cpudata[ix].processor_node =
+ cput.processor_node.id[cput.processor_node.used == 1 ? 0 : r];
+ user_cpudata[ix].processor =
+ cput.processor.id[cput.processor.used == 1 ? 0 : r];
+ user_cpudata[ix].node =
+ cput.node.id[cput.node.used == 1 ? 0 : r];
+ ix++;
+ }
+ } while (*c != '\0');
+
+ if (user_cpudata_size != ix) {
+ user_cpudata_size = ix;
+ user_cpudata = erts_realloc(ERTS_ALC_T_CPUDATA,
+ user_cpudata,
+ (sizeof(erts_cpu_topology_t)
+ * user_cpudata_size));
+ }
+
+ error = verify_topology(user_cpudata, user_cpudata_size);
+ if (error == ERTS_INIT_CPU_TOPOLOGY_OK) {
+ destroy_cpu_top_entry(&cput);
+ return ERTS_INIT_CPU_TOPOLOGY_OK;
+ }
+
+ fail:
+ if (user_cpudata)
+ erts_free(ERTS_ALC_T_CPUDATA, user_cpudata);
+ user_cpudata_size = 0;
+ destroy_cpu_top_entry(&cput);
+ return error;
+}
+
+#define ERTS_GET_CPU_TOPOLOGY_ERROR -1
+#define ERTS_GET_USED_CPU_TOPOLOGY 0
+#define ERTS_GET_DETECTED_CPU_TOPOLOGY 1
+#define ERTS_GET_DEFINED_CPU_TOPOLOGY 2
+
+static Eterm get_cpu_topology_term(Process *c_p, int type);
+
+Eterm
+erts_set_cpu_topology(Process *c_p, Eterm term)
+{
+ erts_cpu_topology_t *cpudata = NULL;
+ int cpudata_size = 0;
+ Eterm res;
+
+ erts_smp_rwmtx_rwlock(&erts_cpu_bind_rwmtx);
+ res = get_cpu_topology_term(c_p, ERTS_GET_USED_CPU_TOPOLOGY);
+ if (term == am_undefined) {
+ if (user_cpudata)
+ erts_free(ERTS_ALC_T_CPUDATA, user_cpudata);
+ user_cpudata = NULL;
+ user_cpudata_size = 0;
+
+ if (cpu_bind_order != ERTS_CPU_BIND_NONE && system_cpudata) {
+ cpudata_size = system_cpudata_size;
+ cpudata = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_cpu_topology_t)
+ * cpudata_size));
+
+ sys_memcpy((void *) cpudata,
+ (void *) system_cpudata,
+ sizeof(erts_cpu_topology_t)*cpudata_size);
+ }
+ }
+ else if (is_not_list(term)) {
+ error:
+ res = THE_NON_VALUE;
+ goto done;
+ }
+ else {
+ Eterm list = term;
+ int ix = 0;
+
+ cpudata_size = 100;
+ cpudata = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_cpu_topology_t)
+ * cpudata_size));
+
+ while (is_list(list)) {
+ Eterm *lp = list_val(list);
+ Eterm cpu = CAR(lp);
+ Eterm* tp;
+ Sint id;
+
+ if (is_not_tuple(cpu))
+ goto error;
+
+ tp = tuple_val(cpu);
+
+ if (arityval(tp[0]) != 7 || tp[1] != am_cpu)
+ goto error;
+
+ if (ix >= cpudata_size) {
+ cpudata_size += 100;
+ cpudata = erts_realloc(ERTS_ALC_T_TMP,
+ cpudata,
+ (sizeof(erts_cpu_topology_t)
+ * cpudata_size));
+ }
+
+ id = signed_val(tp[2]);
+ if (id < -1 || ERTS_MAX_CPU_TOPOLOGY_ID < id)
+ goto error;
+ cpudata[ix].node = (int) id;
+
+ id = signed_val(tp[3]);
+ if (id < -1 || ERTS_MAX_CPU_TOPOLOGY_ID < id)
+ goto error;
+ cpudata[ix].processor = (int) id;
+
+ id = signed_val(tp[4]);
+ if (id < -1 || ERTS_MAX_CPU_TOPOLOGY_ID < id)
+ goto error;
+ cpudata[ix].processor_node = (int) id;
+
+ id = signed_val(tp[5]);
+ if (id < -1 || ERTS_MAX_CPU_TOPOLOGY_ID < id)
+ goto error;
+ cpudata[ix].core = (int) id;
+
+ id = signed_val(tp[6]);
+ if (id < -1 || ERTS_MAX_CPU_TOPOLOGY_ID < id)
+ goto error;
+ cpudata[ix].thread = (int) id;
+
+ id = signed_val(tp[7]);
+ if (id < -1 || ERTS_MAX_CPU_TOPOLOGY_ID < id)
+ goto error;
+ cpudata[ix].logical = (int) id;
+
+ list = CDR(lp);
+ ix++;
+ }
+
+ if (is_not_nil(list))
+ goto error;
+
+ cpudata_size = ix;
+
+ if (ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(cpudata, cpudata_size))
+ goto error;
+
+ if (user_cpudata_size != cpudata_size) {
+ if (user_cpudata)
+ erts_free(ERTS_ALC_T_CPUDATA, user_cpudata);
+ user_cpudata = erts_alloc(ERTS_ALC_T_CPUDATA,
+ sizeof(erts_cpu_topology_t)*cpudata_size);
+ user_cpudata_size = cpudata_size;
+ }
+
+ sys_memcpy((void *) user_cpudata,
+ (void *) cpudata,
+ sizeof(erts_cpu_topology_t)*cpudata_size);
+ }
+
+ signal_schedulers_bind_change(cpudata, cpudata_size);
+
+ done:
+ erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+
+ if (cpudata)
+ erts_free(ERTS_ALC_T_TMP, cpudata);
+
+ return res;
+}
+
+static Eterm
+bound_schedulers_term(ErtsCpuBindOrder order)
+{
+ switch (order) {
+ case ERTS_CPU_BIND_SPREAD: {
+ ERTS_DECL_AM(spread);
+ return AM_spread;
+ }
+ case ERTS_CPU_BIND_PROCESSOR_SPREAD: {
+ ERTS_DECL_AM(processor_spread);
+ return AM_processor_spread;
+ }
+ case ERTS_CPU_BIND_THREAD_SPREAD: {
+ ERTS_DECL_AM(thread_spread);
+ return AM_thread_spread;
+ }
+ case ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD: {
+ ERTS_DECL_AM(thread_no_node_processor_spread);
+ return AM_thread_no_node_processor_spread;
+ }
+ case ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD: {
+ ERTS_DECL_AM(no_node_processor_spread);
+ return AM_no_node_processor_spread;
+ }
+ case ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD: {
+ ERTS_DECL_AM(no_node_thread_spread);
+ return AM_no_node_thread_spread;
+ }
+ case ERTS_CPU_BIND_NO_SPREAD: {
+ ERTS_DECL_AM(no_spread);
+ return AM_no_spread;
+ }
+ case ERTS_CPU_BIND_NONE: {
+ ERTS_DECL_AM(unbound);
+ return AM_unbound;
+ }
+ default:
+ ASSERT(0);
+ return THE_NON_VALUE;
+ }
+}
+
+Eterm
+erts_bound_schedulers_term(Process *c_p)
+{
+ ErtsCpuBindOrder order;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ order = cpu_bind_order;
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+ return bound_schedulers_term(order);
+}
+
+static void
+create_tmp_cpu_topology_copy(erts_cpu_topology_t **cpudata, int *cpudata_size)
+{
+ if (user_cpudata) {
+ *cpudata_size = user_cpudata_size;
+ *cpudata = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_cpu_topology_t)
+ * (*cpudata_size)));
+ sys_memcpy((void *) *cpudata,
+ (void *) user_cpudata,
+ sizeof(erts_cpu_topology_t)*(*cpudata_size));
+ }
+ else if (system_cpudata) {
+ *cpudata_size = system_cpudata_size;
+ *cpudata = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_cpu_topology_t)
+ * (*cpudata_size)));
+ sys_memcpy((void *) *cpudata,
+ (void *) system_cpudata,
+ sizeof(erts_cpu_topology_t)*(*cpudata_size));
+ }
+ else {
+ *cpudata = NULL;
+ *cpudata_size = 0;
+ }
+}
+
+static void
+destroy_tmp_cpu_topology_copy(erts_cpu_topology_t *cpudata)
+{
+ if (cpudata)
+ erts_free(ERTS_ALC_T_TMP, cpudata);
+}
+
+Eterm
+erts_bind_schedulers(Process *c_p, Eterm how)
+{
+ Eterm res;
+ erts_cpu_topology_t *cpudata;
+ int cpudata_size;
+ ErtsCpuBindOrder old_cpu_bind_order;
+
+ erts_smp_rwmtx_rwlock(&erts_cpu_bind_rwmtx);
+
+ if (erts_bind_to_cpu(erts_cpuinfo, -1) == -ENOTSUP) {
+ ERTS_BIF_PREP_ERROR(res, c_p, EXC_NOTSUP);
+ }
+ else {
+
+ old_cpu_bind_order = cpu_bind_order;
+
+ if (ERTS_IS_ATOM_STR("spread", how))
+ cpu_bind_order = ERTS_CPU_BIND_SPREAD;
+ else if (ERTS_IS_ATOM_STR("processor_spread", how))
+ cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD;
+ else if (ERTS_IS_ATOM_STR("thread_spread", how))
+ cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD;
+ else if (ERTS_IS_ATOM_STR("default_bind", how)
+ || ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
+ cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD;
+ else if (ERTS_IS_ATOM_STR("no_node_processor_spread", how))
+ cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD;
+ else if (ERTS_IS_ATOM_STR("no_node_thread_spread", how))
+ cpu_bind_order = ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD;
+ else if (ERTS_IS_ATOM_STR("no_spread", how))
+ cpu_bind_order = ERTS_CPU_BIND_NO_SPREAD;
+ else if (ERTS_IS_ATOM_STR("unbound", how))
+ cpu_bind_order = ERTS_CPU_BIND_NONE;
+ else {
+ cpu_bind_order = old_cpu_bind_order;
+ ERTS_BIF_PREP_ERROR(res, c_p, BADARG);
+ goto done;
+ }
+
+ create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
+
+ if (!cpudata) {
+ cpu_bind_order = old_cpu_bind_order;
+ ERTS_BIF_PREP_ERROR(res, c_p, BADARG);
+ goto done;
+ }
+
+ signal_schedulers_bind_change(cpudata, cpudata_size);
+
+ destroy_tmp_cpu_topology_copy(cpudata);
+
+ res = bound_schedulers_term(old_cpu_bind_order);
+ }
+
+ done:
+
+ erts_smp_rwmtx_rwunlock(&erts_cpu_bind_rwmtx);
+
+ return res;
+}
+
+Eterm
+erts_fake_scheduler_bindings(Process *p, Eterm how)
+{
+ ErtsCpuBindOrder fake_cpu_bind_order;
+ erts_cpu_topology_t *cpudata;
+ int cpudata_size;
+ Eterm res;
+
+ if (ERTS_IS_ATOM_STR("spread", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_SPREAD;
+ else if (ERTS_IS_ATOM_STR("processor_spread", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD;
+ else if (ERTS_IS_ATOM_STR("thread_spread", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD;
+ else if (ERTS_IS_ATOM_STR("default_bind", how)
+ || ERTS_IS_ATOM_STR("thread_no_node_processor_spread", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD;
+ else if (ERTS_IS_ATOM_STR("no_node_processor_spread", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD;
+ else if (ERTS_IS_ATOM_STR("no_node_thread_spread", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD;
+ else if (ERTS_IS_ATOM_STR("no_spread", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_NO_SPREAD;
+ else if (ERTS_IS_ATOM_STR("unbound", how))
+ fake_cpu_bind_order = ERTS_CPU_BIND_NONE;
+ else {
+ ERTS_BIF_PREP_ERROR(res, p, BADARG);
+ return res;
+ }
+
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+
+ if (!cpudata || fake_cpu_bind_order == ERTS_CPU_BIND_NONE)
+ ERTS_BIF_PREP_RET(res, am_false);
+ else {
+ int i;
+ Eterm *hp;
+
+ cpu_bind_order_sort(cpudata, cpudata_size, fake_cpu_bind_order, 1);
+
+#ifdef ERTS_FAKE_SCHED_BIND_PRINT_SORTED_CPU_DATA
+
+ erts_fprintf(stderr, "node: ");
+ for (i = 0; i < cpudata_size; i++)
+ erts_fprintf(stderr, " %2d", cpudata[i].node);
+ erts_fprintf(stderr, "\n");
+ erts_fprintf(stderr, "processor: ");
+ for (i = 0; i < cpudata_size; i++)
+ erts_fprintf(stderr, " %2d", cpudata[i].processor);
+ erts_fprintf(stderr, "\n");
+ if (fake_cpu_bind_order != ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD
+ && fake_cpu_bind_order != ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD
+ && fake_cpu_bind_order != ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD) {
+ erts_fprintf(stderr, "processor_node:");
+ for (i = 0; i < cpudata_size; i++)
+ erts_fprintf(stderr, " %2d", cpudata[i].processor_node);
+ erts_fprintf(stderr, "\n");
+ }
+ erts_fprintf(stderr, "core: ");
+ for (i = 0; i < cpudata_size; i++)
+ erts_fprintf(stderr, " %2d", cpudata[i].core);
+ erts_fprintf(stderr, "\n");
+ erts_fprintf(stderr, "thread: ");
+ for (i = 0; i < cpudata_size; i++)
+ erts_fprintf(stderr, " %2d", cpudata[i].thread);
+ erts_fprintf(stderr, "\n");
+ erts_fprintf(stderr, "logical: ");
+ for (i = 0; i < cpudata_size; i++)
+ erts_fprintf(stderr, " %2d", cpudata[i].logical);
+ erts_fprintf(stderr, "\n");
+#endif
+
+ hp = HAlloc(p, cpudata_size+1);
+ ERTS_BIF_PREP_RET(res, make_tuple(hp));
+ *hp++ = make_arityval((Uint) cpudata_size);
+ for (i = 0; i < cpudata_size; i++)
+ *hp++ = make_small((Uint) cpudata[i].logical);
+ }
+
+ destroy_tmp_cpu_topology_copy(cpudata);
+
+ return res;
+}
+
+Eterm
+erts_get_schedulers_binds(Process *c_p)
+{
+ int ix;
+ ERTS_DECL_AM(unbound);
+ Eterm *hp = HAlloc(c_p, erts_no_schedulers+1);
+ Eterm res = make_tuple(hp);
+
+ *(hp++) = make_arityval(erts_no_schedulers);
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ for (ix = 1; ix <= erts_no_schedulers; ix++)
+ *(hp++) = (scheduler2cpu_map[ix].bound_id >= 0
+ ? make_small(scheduler2cpu_map[ix].bound_id)
+ : AM_unbound);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+ return res;
+}
+
+static Eterm
+bld_topology_term(Eterm **hpp,
+ Uint *hszp,
+ erts_cpu_topology_t *cpudata,
+ int size)
+{
+ Eterm res = NIL;
+ int i;
+
+ if (size == 0)
+ return am_undefined;
+
+ for (i = size-1; i >= 0; i--) {
+ res = erts_bld_cons(hpp,
+ hszp,
+ erts_bld_tuple(hpp,
+ hszp,
+ 7,
+ am_cpu,
+ make_small(cpudata[i].node),
+ make_small(cpudata[i].processor),
+ make_small(cpudata[i].processor_node),
+ make_small(cpudata[i].core),
+ make_small(cpudata[i].thread),
+ make_small(cpudata[i].logical)),
+ res);
+ }
+ return res;
+}
+
+static Eterm
+get_cpu_topology_term(Process *c_p, int type)
+{
+#ifdef DEBUG
+ Eterm *hp_end;
+#endif
+ Eterm *hp;
+ Uint hsz;
+ Eterm res = THE_NON_VALUE;
+ erts_cpu_topology_t *cpudata = NULL;
+ int size = 0;
+
+ switch (type) {
+ case ERTS_GET_USED_CPU_TOPOLOGY:
+ if (user_cpudata)
+ goto defined;
+ else
+ goto detected;
+ case ERTS_GET_DETECTED_CPU_TOPOLOGY:
+ detected:
+ if (!system_cpudata)
+ res = am_undefined;
+ else {
+ size = system_cpudata_size;
+ cpudata = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(erts_cpu_topology_t)
+ * size));
+ sys_memcpy((void *) cpudata,
+ (void *) system_cpudata,
+ sizeof(erts_cpu_topology_t)*size);
+ }
+ break;
+ case ERTS_GET_DEFINED_CPU_TOPOLOGY:
+ defined:
+ if (!user_cpudata)
+ res = am_undefined;
+ else {
+ size = user_cpudata_size;
+ cpudata = user_cpudata;
+ }
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "Bad cpu topology type: %d\n", type);
+ break;
+ }
+
+ if (res == am_undefined) {
+ ASSERT(!cpudata);
+ return res;
+ }
+
+ hsz = 0;
+
+ bld_topology_term(NULL, &hsz,
+ cpudata, size);
+
+ hp = HAlloc(c_p, hsz);
+
+#ifdef DEBUG
+ hp_end = hp + hsz;
+#endif
+
+ res = bld_topology_term(&hp, NULL,
+ cpudata, size);
+
+ ASSERT(hp_end == hp);
+
+ if (cpudata && cpudata != system_cpudata && cpudata != user_cpudata)
+ erts_free(ERTS_ALC_T_TMP, cpudata);
+
+ return res;
+}
+
+Eterm
+erts_get_cpu_topology_term(Process *c_p, Eterm which)
+{
+ Eterm res;
+ int type;
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ if (ERTS_IS_ATOM_STR("used", which))
+ type = ERTS_GET_USED_CPU_TOPOLOGY;
+ else if (ERTS_IS_ATOM_STR("detected", which))
+ type = ERTS_GET_DETECTED_CPU_TOPOLOGY;
+ else if (ERTS_IS_ATOM_STR("defined", which))
+ type = ERTS_GET_DEFINED_CPU_TOPOLOGY;
+ else
+ type = ERTS_GET_CPU_TOPOLOGY_ERROR;
+ if (type == ERTS_GET_CPU_TOPOLOGY_ERROR)
+ res = THE_NON_VALUE;
+ else
+ res = get_cpu_topology_term(c_p, type);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+ return res;
+}
+
+static void
+early_cpu_bind_init(void)
+{
+ user_cpudata = NULL;
+ user_cpudata_size = 0;
+
+ system_cpudata_size = erts_get_cpu_topology_size(erts_cpuinfo);
+ system_cpudata = erts_alloc(ERTS_ALC_T_CPUDATA,
+ (sizeof(erts_cpu_topology_t)
+ * system_cpudata_size));
+
+ cpu_bind_order = ERTS_CPU_BIND_NONE;
+
+ if (!erts_get_cpu_topology(erts_cpuinfo, system_cpudata)
+ || ERTS_INIT_CPU_TOPOLOGY_OK != verify_topology(system_cpudata,
+ system_cpudata_size)) {
+ erts_free(ERTS_ALC_T_CPUDATA, system_cpudata);
+ system_cpudata = NULL;
+ system_cpudata_size = 0;
+ }
+}
+
+static void
+late_cpu_bind_init(void)
+{
+ int ix;
+
+ erts_smp_rwmtx_init(&erts_cpu_bind_rwmtx, "cpu_bind");
+
+ scheduler2cpu_map = erts_alloc(ERTS_ALC_T_CPUDATA,
+ (sizeof(ErtsCpuBindData)
+ * (erts_no_schedulers+1)));
+ for (ix = 1; ix <= erts_no_schedulers; ix++) {
+ scheduler2cpu_map[ix].bind_id = -1;
+ scheduler2cpu_map[ix].bound_id = -1;
+ }
+
+ if (cpu_bind_order != ERTS_CPU_BIND_NONE) {
+ erts_cpu_topology_t *cpudata;
+ int cpudata_size;
+ create_tmp_cpu_topology_copy(&cpudata, &cpudata_size);
+ ASSERT(cpudata);
+ signal_schedulers_bind_change(cpudata, cpudata_size);
+ destroy_tmp_cpu_topology_copy(cpudata);
+ }
+}
+
+#ifdef ERTS_SMP
+
+static void
+add_pend_suspend(Process *suspendee,
+ Eterm originator_pid,
+ void (*handle_func)(Process *,
+ ErtsProcLocks,
+ int,
+ Eterm))
+{
+ ErtsPendingSuspend *psp = erts_alloc(ERTS_ALC_T_PEND_SUSPEND,
+ sizeof(ErtsPendingSuspend));
+ psp->next = NULL;
+#ifdef DEBUG
+#ifdef ARCH_64
+ psp->end = (ErtsPendingSuspend *) 0xdeaddeaddeaddead;
+#else
+ psp->end = (ErtsPendingSuspend *) 0xdeaddead;
+#endif
+#endif
+ psp->pid = originator_pid;
+ psp->handle_func = handle_func;
+
+ if (suspendee->pending_suspenders)
+ suspendee->pending_suspenders->end->next = psp;
+ else
+ suspendee->pending_suspenders = psp;
+ suspendee->pending_suspenders->end = psp;
+}
+
+static void
+handle_pending_suspend(Process *p, ErtsProcLocks p_locks)
+{
+ ErtsPendingSuspend *psp;
+ int is_alive = !ERTS_PROC_IS_EXITING(p);
+
+ ERTS_SMP_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS);
+
+ /*
+ * New pending suspenders might appear while we are processing
+ * (since we may release the status lock on p while processing).
+ */
+ while (p->pending_suspenders) {
+ psp = p->pending_suspenders;
+ p->pending_suspenders = NULL;
+ while (psp) {
+ ErtsPendingSuspend *free_psp;
+ (*psp->handle_func)(p, p_locks, is_alive, psp->pid);
+ free_psp = psp;
+ psp = psp->next;
+ erts_free(ERTS_ALC_T_PEND_SUSPEND, (void *) free_psp);
+ }
+ }
+
+}
+
+static ERTS_INLINE void
+cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks)
+{
+ if (is_not_nil(p->suspendee)) {
+ Process *rp;
+ if (!(p_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS,
+ p->suspendee, ERTS_PROC_LOCK_STATUS);
+ if (rp) {
+ erts_resume(rp, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ }
+ if (!(p_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ p->suspendee = NIL;
+ }
+}
+
+static void
+handle_pend_sync_suspend(Process *suspendee,
+ ErtsProcLocks suspendee_locks,
+ int suspendee_alive,
+ Eterm suspender_pid)
+{
+ Process *suspender;
+
+ ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
+
+ suspender = erts_pid2proc(suspendee,
+ suspendee_locks,
+ suspender_pid,
+ ERTS_PROC_LOCK_STATUS);
+ if (suspender) {
+ ASSERT(is_nil(suspender->suspendee));
+ if (suspendee_alive) {
+ ErtsRunQueue *rq = erts_get_runq_proc(suspendee);
+ erts_smp_runq_lock(rq);
+ suspend_process(rq, suspendee);
+ erts_smp_runq_unlock(rq);
+ suspender->suspendee = suspendee->id;
+ }
+ /* suspender is suspended waiting for suspendee to suspend;
+ resume suspender */
+ resume_process(suspender);
+ erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
+ }
+}
+
+/*
+ * Like erts_pid2proc() but:
+ *
+ * * At least ERTS_PROC_LOCK_MAIN have to be held on c_p.
+ * * At least ERTS_PROC_LOCK_MAIN have to be taken on pid.
+ * * It also waits for proc to be in a state != running and garbing.
+ * * If ERTS_PROC_LOCK_BUSY is returned, the calling process has to
+ * yield (ERTS_BIF_YIELD[0-3]()). c_p might in this case have been
+ * suspended.
+ */
+
+
+Process *
+erts_pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm pid, ErtsProcLocks pid_locks)
+{
+ Process *rp;
+ int unlock_c_p_status;
+
+ ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
+
+ ERTS_SMP_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS));
+
+ if (c_p->id == pid)
+ return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
+
+ if (c_p_locks & ERTS_PROC_LOCK_STATUS)
+ unlock_c_p_status = 0;
+ else {
+ unlock_c_p_status = 1;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ }
+
+ if (c_p->suspendee == pid) {
+ /* Process previously suspended by c_p (below)... */
+ ErtsProcLocks rp_locks = pid_locks|ERTS_PROC_LOCK_STATUS;
+ rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, pid, rp_locks);
+ c_p->suspendee = NIL;
+ ASSERT(c_p->flags & F_P2PNR_RESCHED);
+ c_p->flags &= ~F_P2PNR_RESCHED;
+ if (rp)
+ resume_process(rp);
+ }
+ else {
+ ErtsRunQueue *cp_rq, *rp_rq;
+
+ rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
+ pid, ERTS_PROC_LOCK_STATUS);
+
+ if (!rp) {
+ c_p->flags &= ~F_P2PNR_RESCHED;
+ goto done;
+ }
+
+ ASSERT(!(c_p->flags & F_P2PNR_RESCHED));
+
+ cp_rq = erts_get_runq_proc(c_p);
+ rp_rq = erts_get_runq_proc(rp);
+ erts_smp_runqs_lock(cp_rq, rp_rq);
+ if (rp->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
+ running:
+ /* Phiu... */
+
+ /*
+ * If we got pending suspenders and suspend ourselves waiting
+ * to suspend another process we might deadlock.
+ * In this case we have to yield, be suspended by
+ * someone else and then do it all over again.
+ */
+ if (!c_p->pending_suspenders) {
+ /* Mark rp pending for suspend by c_p */
+ add_pend_suspend(rp, c_p->id, handle_pend_sync_suspend);
+ ASSERT(is_nil(c_p->suspendee));
+
+ /* Suspend c_p; when rp is suspended c_p will be resumed. */
+ suspend_process(cp_rq, c_p);
+ c_p->flags |= F_P2PNR_RESCHED;
+ }
+ /* Yield (caller is assumed to yield immediately in bif). */
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ rp = ERTS_PROC_LOCK_BUSY;
+ }
+ else {
+ ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
+ if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
+ erts_smp_runqs_unlock(cp_rq, rp_rq);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
+ pid, pid_locks|ERTS_PROC_LOCK_STATUS);
+ if (!rp)
+ goto done;
+ /* run-queues may have changed */
+ cp_rq = erts_get_runq_proc(c_p);
+ rp_rq = erts_get_runq_proc(rp);
+ erts_smp_runqs_lock(cp_rq, rp_rq);
+ if (rp->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
+ /* Ahh... */
+ erts_smp_proc_unlock(rp,
+ pid_locks & ~ERTS_PROC_LOCK_STATUS);
+ goto running;
+ }
+ }
+
+ /* rp is not running and we got the locks we want... */
+ }
+ erts_smp_runqs_unlock(cp_rq, rp_rq);
+ }
+
+ done:
+ if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ if (unlock_c_p_status)
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ return rp;
+}
+
+/*
+ * erts_pid2proc_nropt() is normally the same as
+ * erts_pid2proc_not_running(). However it is only
+ * to be used when 'not running' is a pure optimization,
+ * not a requirement.
+ */
+
+Process *
+erts_pid2proc_nropt(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm pid, ErtsProcLocks pid_locks)
+{
+ if (erts_disable_proc_not_running_opt)
+ return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
+ else
+ return erts_pid2proc_not_running(c_p, c_p_locks, pid, pid_locks);
+}
+
+static ERTS_INLINE void
+do_bif_suspend_process(ErtsSuspendMonitor *smon,
+ Process *suspendee,
+ ErtsRunQueue *locked_runq)
+{
+ ASSERT(suspendee);
+ ASSERT(!suspendee->is_exiting);
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
+ & erts_proc_lc_my_proc_locks(suspendee));
+ if (smon) {
+ if (!smon->active) {
+ ErtsRunQueue *rq;
+
+ if (locked_runq)
+ rq = locked_runq;
+ else {
+ rq = erts_get_runq_proc(suspendee);
+ erts_smp_runq_lock(rq);
+ }
+
+ suspend_process(rq, suspendee);
+
+ if (!locked_runq)
+ erts_smp_runq_unlock(rq);
+ }
+ smon->active += smon->pending;
+ ASSERT(smon->active);
+ smon->pending = 0;
+ }
+
+}
+
+static void
+handle_pend_bif_sync_suspend(Process *suspendee,
+ ErtsProcLocks suspendee_locks,
+ int suspendee_alive,
+ Eterm suspender_pid)
+{
+ Process *suspender;
+
+ ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
+
+ suspender = erts_pid2proc(suspendee,
+ suspendee_locks,
+ suspender_pid,
+ ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ if (suspender) {
+ ASSERT(is_nil(suspender->suspendee));
+ if (!suspendee_alive)
+ erts_delete_suspend_monitor(&suspender->suspend_monitors,
+ suspendee->id);
+ else {
+ ErtsSuspendMonitor *smon;
+ smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
+ suspendee->id);
+ do_bif_suspend_process(smon, suspendee, NULL);
+ suspender->suspendee = suspendee->id;
+ }
+ /* suspender is suspended waiting for suspendee to suspend;
+ resume suspender */
+ resume_process(suspender);
+ erts_smp_proc_unlock(suspender,
+ ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ }
+}
+
+static void
+handle_pend_bif_async_suspend(Process *suspendee,
+ ErtsProcLocks suspendee_locks,
+ int suspendee_alive,
+ Eterm suspender_pid)
+{
+
+ Process *suspender;
+
+ ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
+
+ suspender = erts_pid2proc(suspendee,
+ suspendee_locks,
+ suspender_pid,
+ ERTS_PROC_LOCK_LINK);
+ if (suspender) {
+ ASSERT(is_nil(suspender->suspendee));
+ if (!suspendee_alive)
+ erts_delete_suspend_monitor(&suspender->suspend_monitors,
+ suspendee->id);
+ else {
+ ErtsSuspendMonitor *smon;
+ smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
+ suspendee->id);
+ do_bif_suspend_process(smon, suspendee, NULL);
+ }
+ erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK);
+ }
+}
+
+#endif /* ERTS_SMP */
+
+/*
+ * The erlang:suspend_process/2 BIF
+ */
+
+BIF_RETTYPE
+suspend_process_2(BIF_ALIST_2)
+{
+ Eterm res;
+ Process* suspendee = NULL;
+ ErtsSuspendMonitor *smon;
+ ErtsProcLocks xlocks = (ErtsProcLocks) 0;
+
+ /* Options and default values: */
+ int asynchronous = 0;
+ int unless_suspending = 0;
+
+
+ if (BIF_P->id == BIF_ARG_1)
+ goto badarg; /* We are not allowed to suspend ourselves */
+
+ if (is_not_nil(BIF_ARG_2)) {
+ /* Parse option list */
+ Eterm arg = BIF_ARG_2;
+
+ while (is_list(arg)) {
+ Eterm *lp = list_val(arg);
+ arg = CAR(lp);
+ switch (arg) {
+ case am_unless_suspending:
+ unless_suspending = 1;
+ break;
+ case am_asynchronous:
+ asynchronous = 1;
+ break;
+ default:
+ goto badarg;
+ }
+ arg = CDR(lp);
+ }
+ if (is_not_nil(arg))
+ goto badarg;
+ }
+
+ xlocks = ERTS_PROC_LOCK_LINK | (asynchronous
+ ? (ErtsProcLocks) 0
+ : ERTS_PROC_LOCK_STATUS);
+
+ erts_smp_proc_lock(BIF_P, xlocks);
+
+ suspendee = erts_pid2proc(BIF_P,
+ ERTS_PROC_LOCK_MAIN|xlocks,
+ BIF_ARG_1,
+ ERTS_PROC_LOCK_STATUS);
+ if (!suspendee)
+ goto no_suspendee;
+
+ smon = erts_add_or_lookup_suspend_monitor(&BIF_P->suspend_monitors,
+ BIF_ARG_1);
+#ifndef ERTS_SMP /* no ERTS_SMP */
+
+ /* This is really a piece of cake without SMP support... */
+ if (!smon->active) {
+ suspend_process(erts_common_run_queue, suspendee);
+ smon->active++;
+ res = am_true;
+ }
+ else if (unless_suspending)
+ res = am_false;
+ else if (smon->active == INT_MAX)
+ goto system_limit;
+ else {
+ smon->active++;
+ res = am_true;
+ }
+
+#else /* ERTS_SMP */
+
+ /* ... but a little trickier with SMP support ... */
+
+ if (asynchronous) {
+ /* --- Asynchronous suspend begin ---------------------------------- */
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_LINK
+ & erts_proc_lc_my_proc_locks(BIF_P));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
+ == erts_proc_lc_my_proc_locks(suspendee));
+
+ if (smon->active) {
+ smon->active += smon->pending;
+ smon->pending = 0;
+ if (unless_suspending)
+ res = am_false;
+ else if (smon->active == INT_MAX)
+ goto system_limit;
+ else {
+ smon->active++;
+ res = am_true;
+ }
+ /* done */
+ }
+ else {
+ /* We havn't got any active suspends on the suspendee */
+ if (smon->pending && unless_suspending)
+ res = am_false;
+ else {
+ ErtsRunQueue *rq;
+ if (smon->pending == INT_MAX)
+ goto system_limit;
+
+ smon->pending++;
+ rq = erts_get_runq_proc(suspendee);
+ erts_smp_runq_lock(rq);
+
+ if (suspendee->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING)
+ add_pend_suspend(suspendee,
+ BIF_P->id,
+ handle_pend_bif_async_suspend);
+ else
+ do_bif_suspend_process(smon, suspendee, rq);
+ erts_smp_runq_unlock(rq);
+
+ res = am_true;
+ }
+ /* done */
+ }
+ /* --- Asynchronous suspend end ------------------------------------ */
+ }
+ else /* if (!asynchronous) */ {
+ /* --- Synchronous suspend begin ----------------------------------- */
+
+ ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS)
+ & erts_proc_lc_my_proc_locks(BIF_P))
+ == (ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
+ == erts_proc_lc_my_proc_locks(suspendee));
+
+ if (BIF_P->suspendee == BIF_ARG_1) {
+ /* We are back after a yield and the suspendee
+ has been suspended on behalf of us. */
+ ASSERT(smon->active >= 1);
+ BIF_P->suspendee = NIL;
+ res = (!unless_suspending || smon->active == 1
+ ? am_true
+ : am_false);
+ /* done */
+ }
+ else if (smon->active) {
+ if (unless_suspending)
+ res = am_false;
+ else {
+ smon->active++;
+ res = am_true;
+ }
+ /* done */
+ }
+ else {
+ ErtsRunQueue *cp_rq, *s_rq;
+ /* We haven't got any active suspends on the suspendee */
+
+ /*
+ * If we have pending suspenders and suspend ourselves waiting
+ * to suspend another process, or suspend another process
+ * we might deadlock. In this case we have to yield,
+ * be suspended by someone else, and then do it all over again.
+ */
+ if (BIF_P->pending_suspenders)
+ goto yield;
+
+ if (!unless_suspending && smon->pending == INT_MAX)
+ goto system_limit;
+ if (!unless_suspending || smon->pending == 0)
+ smon->pending++;
+
+ cp_rq = erts_get_runq_proc(BIF_P);
+ s_rq = erts_get_runq_proc(suspendee);
+ erts_smp_runqs_lock(cp_rq, s_rq);
+ if (!(suspendee->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING)) {
+ do_bif_suspend_process(smon, suspendee, s_rq);
+ erts_smp_runqs_unlock(cp_rq, s_rq);
+ res = (!unless_suspending || smon->active == 1
+ ? am_true
+ : am_false);
+ /* done */
+ }
+ else {
+ /* Mark suspendee pending for suspend by BIF_P */
+ add_pend_suspend(suspendee,
+ BIF_P->id,
+ handle_pend_bif_sync_suspend);
+
+ ASSERT(is_nil(BIF_P->suspendee));
+
+ /*
+ * Suspend BIF_P; when suspendee is suspended, BIF_P
+ * will be resumed and this BIF will be called again.
+ * This time with BIF_P->suspendee == BIF_ARG_1 (see
+ * above).
+ */
+ suspend_process(cp_rq, BIF_P);
+ erts_smp_runqs_unlock(cp_rq, s_rq);
+ goto yield;
+ }
+ }
+ /* --- Synchronous suspend end ------------------------------------- */
+ }
+
+#endif /* ERTS_SMP */
+
+ ASSERT(suspendee->status == P_SUSPENDED || (asynchronous && smon->pending));
+ ASSERT(suspendee->status == P_SUSPENDED || !smon->active);
+
+ erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_unlock(BIF_P, xlocks);
+ BIF_RET(res);
+
+ system_limit:
+ ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT);
+ goto do_return;
+
+ no_suspendee:
+#ifdef ERTS_SMP
+ BIF_P->suspendee = NIL;
+#endif
+ erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
+
+ badarg:
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+#ifdef ERTS_SMP
+ goto do_return;
+
+ yield:
+ ERTS_BIF_PREP_YIELD2(res, bif_export[BIF_suspend_process_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+#endif
+
+ do_return:
+ if (suspendee)
+ erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
+ if (xlocks)
+ erts_smp_proc_unlock(BIF_P, xlocks);
+ return res;
+
+}
+
+
+/*
+ * The erlang:resume_process/1 BIF
+ */
+
+BIF_RETTYPE
+resume_process_1(BIF_ALIST_1)
+{
+ ErtsSuspendMonitor *smon;
+ Process *suspendee;
+ int is_active;
+
+ if (BIF_P->id == BIF_ARG_1)
+ BIF_ERROR(BIF_P, BADARG);
+
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
+ smon = erts_lookup_suspend_monitor(BIF_P->suspend_monitors, BIF_ARG_1);
+
+ if (!smon) {
+ /* No previous suspend or dead suspendee */
+ goto error;
+ }
+ else if (smon->pending) {
+ smon->pending--;
+ ASSERT(smon->pending >= 0);
+ if (smon->active) {
+ smon->active += smon->pending;
+ smon->pending = 0;
+ }
+ is_active = smon->active;
+ }
+ else if (smon->active) {
+ smon->active--;
+ ASSERT(smon->pending >= 0);
+ is_active = 1;
+ }
+ else {
+ /* No previous suspend or dead suspendee */
+ goto error;
+ }
+
+ if (smon->active || smon->pending || !is_active) {
+ /* Leave the suspendee as it is; just verify that it is still alive */
+ suspendee = erts_pid2proc(BIF_P,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ BIF_ARG_1,
+ 0);
+ if (!suspendee)
+ goto no_suspendee;
+
+ }
+ else {
+ /* Resume */
+ suspendee = erts_pid2proc(BIF_P,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ BIF_ARG_1,
+ ERTS_PROC_LOCK_STATUS);
+ if (!suspendee)
+ goto no_suspendee;
+
+ ASSERT(suspendee->status == P_SUSPENDED
+ || (suspendee->status == P_GARBING
+ && suspendee->gcstatus == P_SUSPENDED));
+ resume_process(suspendee);
+
+ erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
+ }
+
+ if (!smon->active && !smon->pending)
+ erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+
+ BIF_RET(am_true);
+
+ no_suspendee:
+ /* cleanup */
+ erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
+
+ error:
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+Uint
+erts_run_queues_len(Uint *qlen)
+{
+ int i = 0;
+ Uint len = 0;
+ ERTS_ATOMIC_FOREACH_RUNQ(rq,
+ {
+ if (qlen)
+ qlen[i++] = rq->procs.len;
+ len += rq->procs.len;
+ }
+ );
+ return len;
+}
+
+#ifdef HARDDEBUG_RUNQS
+static void
+check_procs_runq(ErtsRunQueue *runq, Process *p_in_q, Process *p_not_in_q)
+{
+ int len[ERTS_NO_PROC_PRIO_LEVELS] = {0};
+ int tot_len;
+ int prioq, prio;
+ int found_p_in_q;
+ Process *p, *prevp;
+
+ found_p_in_q = 0;
+ for (prioq = 0; prioq < ERTS_NO_PROC_PRIO_LEVELS - 1; prioq++) {
+ prevp = NULL;
+ for (p = runq->procs.prio[prioq].first; p; p = p->next) {
+ ASSERT(p != p_not_in_q);
+ if (p == p_in_q)
+ found_p_in_q = 1;
+ switch (p->prio) {
+ case PRIORITY_MAX:
+ case PRIORITY_HIGH:
+ case PRIORITY_NORMAL:
+ ASSERT(prioq == p->prio);
+ break;
+ case PRIORITY_LOW:
+ ASSERT(prioq == PRIORITY_NORMAL);
+ break;
+ default:
+ ASSERT(!"Bad prio on process");
+ }
+ len[p->prio]++;
+ ASSERT(prevp == p->prev);
+ if (p->prev) {
+ ASSERT(p->prev->next == p);
+ }
+ else {
+ ASSERT(runq->procs.prio[prioq].first == p);
+ }
+ if (p->next) {
+ ASSERT(p->next->prev == p);
+ }
+ else {
+ ASSERT(runq->procs.prio[prioq].last == p);
+ }
+ ASSERT(p->run_queue == runq);
+ prevp = p;
+ }
+ }
+
+ ASSERT(!p_in_q || found_p_in_q);
+
+ tot_len = 0;
+ for (prio = 0; prio < ERTS_NO_PROC_PRIO_LEVELS; prio++) {
+ ASSERT(len[prio] == runq->procs.prio_info[prio].len);
+ if (len[prio]) {
+ ASSERT(runq->flags & (1 << prio));
+ }
+ else {
+ ASSERT(!(runq->flags & (1 << prio)));
+ }
+ tot_len += len[prio];
+ }
+ ASSERT(runq->procs.len == tot_len);
+}
+# define ERTS_DBG_CHK_PROCS_RUNQ(RQ) check_procs_runq((RQ), NULL, NULL)
+# define ERTS_DBG_CHK_PROCS_RUNQ_PROC(RQ, P) check_procs_runq((RQ), (P), NULL)
+# define ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(RQ, P) check_procs_runq((RQ), NULL, (P))
+#else
+# define ERTS_DBG_CHK_PROCS_RUNQ(RQ)
+# define ERTS_DBG_CHK_PROCS_RUNQ_PROC(RQ, P)
+# define ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(RQ, P)
+#endif
+
+
+static ERTS_INLINE void
+enqueue_process(ErtsRunQueue *runq, Process *p)
+{
+ ErtsRunPrioQueue *rpq;
+ ErtsRunQueueInfo *rqi;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ ASSERT(p->bound_runq || !(runq->flags & ERTS_RUNQ_FLG_SUSPENDED));
+
+ rqi = &runq->procs.prio_info[p->prio];
+ rqi->len++;
+ if (rqi->max_len < rqi->len)
+ rqi->max_len = rqi->len;
+
+ runq->procs.len++;
+ runq->len++;
+ if (runq->max_len < runq->len)
+ runq->max_len = runq->len;
+
+ runq->flags |= (1 << p->prio);
+
+ rpq = (p->prio == PRIORITY_LOW
+ ? &runq->procs.prio[PRIORITY_NORMAL]
+ : &runq->procs.prio[p->prio]);
+
+ p->next = NULL;
+ p->prev = rpq->last;
+ if (rpq->last)
+ rpq->last->next = p;
+ else
+ rpq->first = p;
+ rpq->last = p;
+
+ switch (p->status) {
+ case P_EXITING:
+ break;
+ case P_GARBING:
+ p->gcstatus = P_RUNABLE;
+ break;
+ default:
+ p->status = P_RUNABLE;
+ break;
+ }
+
+#ifdef ERTS_SMP
+ p->status_flags |= ERTS_PROC_SFLG_INRUNQ;
+#endif
+
+ ERTS_DBG_CHK_PROCS_RUNQ_PROC(runq, p);
+}
+
+
+static ERTS_INLINE int
+dequeue_process(ErtsRunQueue *runq, Process *p)
+{
+ ErtsRunPrioQueue *rpq;
+ int res = 1;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ ERTS_DBG_CHK_PROCS_RUNQ(runq);
+
+ rpq = &runq->procs.prio[p->prio == PRIORITY_LOW ? PRIORITY_NORMAL : p->prio];
+ if (p->prev) {
+ p->prev->next = p->next;
+ }
+ else if (rpq->first == p) {
+ rpq->first = p->next;
+ }
+ else {
+ res = 0;
+ }
+ if (p->next) {
+ p->next->prev = p->prev;
+ }
+ else if (rpq->last == p) {
+ rpq->last = p->prev;
+ }
+ else {
+ ASSERT(res == 0);
+ }
+
+ if (res) {
+
+ if (--runq->procs.prio_info[p->prio].len == 0)
+ runq->flags &= ~(1 << p->prio);
+ runq->procs.len--;
+ runq->len--;
+
+#ifdef ERTS_SMP
+ p->status_flags &= ~ERTS_PROC_SFLG_INRUNQ;
+#endif
+ }
+
+ ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
+ return res;
+}
+
+/* schedule a process */
+static ERTS_INLINE void
+internal_add_to_runq(ErtsRunQueue *runq, Process *p)
+{
+ Uint32 prev_status = p->status;
+ ErtsRunQueue *add_runq;
+#ifdef ERTS_SMP
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+
+ if (p->status_flags & ERTS_PROC_SFLG_INRUNQ)
+ return;
+ else if (p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
+ ASSERT(p->status != P_SUSPENDED);
+ ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
+ p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
+ return;
+ }
+ ASSERT(!p->scheduler_data);
+#endif
+
+ ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
+#ifndef ERTS_SMP
+ /* Never schedule a suspended process (ok in smp case) */
+ ASSERT(p->status != P_SUSPENDED);
+ add_runq = runq;
+
+#else
+ ASSERT(!p->bound_runq || p->bound_runq == p->run_queue);
+ if (p->bound_runq) {
+ if (p->bound_runq == runq)
+ add_runq = runq;
+ else {
+ add_runq = p->bound_runq;
+ erts_smp_xrunq_lock(runq, add_runq);
+ }
+ }
+ else {
+ add_runq = erts_check_emigration_need(runq, p->prio);
+ if (!add_runq)
+ add_runq = runq;
+ else /* Process emigrated */
+ p->run_queue = add_runq;
+ }
+#endif
+
+ /* Enqueue the process */
+ enqueue_process(add_runq, p);
+
+ if ((erts_system_profile_flags.runnable_procs)
+ && (prev_status == P_WAITING
+ || prev_status == P_SUSPENDED)) {
+ profile_runnable_proc(p, am_active);
+ }
+
+ smp_notify_inc_runq(add_runq);
+
+ if (add_runq != runq)
+ erts_smp_runq_unlock(add_runq);
+}
+
+
+void
+erts_add_to_runq(Process *p)
+{
+ ErtsRunQueue *runq = erts_get_runq_proc(p);
+ erts_smp_runq_lock(runq);
+ internal_add_to_runq(runq, p);
+ erts_smp_runq_unlock(runq);
+}
+
+/* Possibly remove a scheduled process we need to suspend */
+
+static int
+remove_proc_from_runq(ErtsRunQueue *rq, Process *p, int to_inactive)
+{
+ int res;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+#ifdef ERTS_SMP
+ if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) {
+ p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ;
+ ASSERT(!remove_proc_from_runq(rq, p, 0));
+ return 1;
+ }
+#endif
+
+ res = dequeue_process(rq, p);
+
+ if (res && erts_system_profile_flags.runnable_procs && to_inactive)
+ profile_runnable_proc(p, am_inactive);
+
+#ifdef ERTS_SMP
+ ASSERT(!(p->status_flags & ERTS_PROC_SFLG_INRUNQ));
+#endif
+
+ return res;
+}
+
+#ifdef ERTS_SMP
+
+ErtsMigrateResult
+erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
+ ErtsRunQueue *from_rq, int *from_locked,
+ ErtsRunQueue *to_rq, int *to_locked)
+{
+ ERTS_SMP_LC_ASSERT(*plcks == erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_STATUS & *plcks)
+ || from_locked);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
+
+ ASSERT(!erts_common_run_queue);
+
+ /*
+ * If we have the lock on the run queue to migrate to,
+ * check that it isn't suspended. If it is suspended,
+ * we will refuse to migrate to it anyway.
+ */
+ if (*to_locked && (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED))
+ return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED;
+
+ /* We need status lock on process and locks on both run queues */
+
+ if (!(ERTS_PROC_LOCK_STATUS & *plcks)) {
+ if (erts_smp_proc_trylock(p, ERTS_PROC_LOCK_STATUS) == EBUSY) {
+ ErtsProcLocks lcks = *plcks;
+ Eterm pid = p->id;
+ Process *proc = *plcks ? p : NULL;
+
+ if (*from_locked) {
+ *from_locked = 0;
+ erts_smp_runq_unlock(from_rq);
+ }
+ if (*to_locked) {
+ *to_locked = 0;
+ erts_smp_runq_unlock(to_rq);
+ }
+
+ proc = erts_pid2proc_opt(proc,
+ lcks,
+ pid,
+ lcks|ERTS_PROC_LOCK_STATUS,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ if (!proc) {
+ *plcks = 0;
+ return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
+ }
+ ASSERT(proc == p);
+ }
+ *plcks |= ERTS_PROC_LOCK_STATUS;
+ }
+
+ ASSERT(!p->bound_runq);
+
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
+
+ if (p->run_queue != from_rq)
+ return ERTS_MIGRATE_FAILED_RUNQ_CHANGED;
+
+ if (!*from_locked || !*to_locked) {
+ if (from_rq < to_rq) {
+ if (!*to_locked) {
+ if (!*from_locked)
+ erts_smp_runq_lock(from_rq);
+ erts_smp_runq_lock(to_rq);
+ }
+ else if (erts_smp_runq_trylock(from_rq) == EBUSY) {
+ erts_smp_runq_unlock(to_rq);
+ erts_smp_runq_lock(from_rq);
+ erts_smp_runq_lock(to_rq);
+ }
+ }
+ else {
+ if (!*from_locked) {
+ if (!*to_locked)
+ erts_smp_runq_lock(to_rq);
+ erts_smp_runq_lock(from_rq);
+ }
+ else if (erts_smp_runq_trylock(to_rq) == EBUSY) {
+ erts_smp_runq_unlock(from_rq);
+ erts_smp_runq_lock(to_rq);
+ erts_smp_runq_lock(from_rq);
+ }
+ }
+ *to_locked = *from_locked = 1;
+ }
+
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
+ ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
+
+ /* Ok we now got all locks we need; do it... */
+
+ /* Refuse to migrate to a suspended run queue */
+ if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
+ return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED;
+
+ if ((p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING)
+ || !(p->status_flags & ERTS_PROC_SFLG_INRUNQ))
+ return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
+
+ dequeue_process(from_rq, p);
+ p->run_queue = to_rq;
+ enqueue_process(to_rq, p);
+
+ return ERTS_MIGRATE_SUCCESS;
+}
+#endif /* ERTS_SMP */
+
+Eterm
+erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
+ Process *rp, Eterm rpid)
+{
+ Eterm res = am_undefined;
+ Process *p;
+
+ if (rp) {
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
+ & erts_proc_lc_my_proc_locks(rp));
+ p = rp;
+ }
+ else {
+ p = erts_pid2proc_opt(c_p, c_p_locks,
+ rpid, ERTS_PROC_LOCK_STATUS,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ }
+
+ if (p) {
+ switch (p->status) {
+ case P_RUNABLE:
+ res = am_runnable;
+ break;
+ case P_WAITING:
+ res = am_waiting;
+ break;
+ case P_RUNNING:
+ res = am_running;
+ break;
+ case P_EXITING:
+ res = am_exiting;
+ break;
+ case P_GARBING:
+ res = am_garbage_collecting;
+ break;
+ case P_SUSPENDED:
+ res = am_suspended;
+ break;
+ case P_FREE: /* We cannot look up a process in P_FREE... */
+ default: /* Not a valid status... */
+ erl_exit(1, "Bad status (%b32u) found for process %T\n",
+ p->status, p->id);
+ break;
+ }
+
+#ifdef ERTS_SMP
+ if (!rp && (p != c_p || !(ERTS_PROC_LOCK_STATUS & c_p_locks)))
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ }
+ else {
+ int i;
+ ErtsSchedulerData *esdp;
+
+ if (erts_common_run_queue)
+ erts_smp_runq_lock(erts_common_run_queue);
+
+ for (i = 0; i < erts_no_schedulers; i++) {
+ esdp = ERTS_SCHEDULER_IX(i);
+ if (!erts_common_run_queue)
+ erts_smp_runq_lock(esdp->run_queue);
+ if (esdp->free_process && esdp->free_process->id == rpid) {
+ res = am_free;
+ if (!erts_common_run_queue)
+ erts_smp_runq_unlock(esdp->run_queue);
+ break;
+ }
+ if (!erts_common_run_queue)
+ erts_smp_runq_unlock(esdp->run_queue);
+ }
+
+ if (erts_common_run_queue)
+ erts_smp_runq_unlock(erts_common_run_queue);
+#endif
+
+ }
+
+ return res;
+}
+
+/*
+** Suspend a process
+** If we are to suspend on a port the busy_port is the thing
+** otherwise busy_port is NIL
+*/
+
+void
+erts_suspend(Process* process, ErtsProcLocks process_locks, Port *busy_port)
+{
+ ErtsRunQueue *rq;
+
+ ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
+ if (!(process_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
+
+ rq = erts_get_runq_proc(process);
+
+ erts_smp_runq_lock(rq);
+
+ suspend_process(rq, process);
+
+ erts_smp_runq_unlock(rq);
+
+ if (busy_port)
+ erts_wake_process_later(busy_port, process);
+
+ if (!(process_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
+
+}
+
+void
+erts_resume(Process* process, ErtsProcLocks process_locks)
+{
+ ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
+ if (!(process_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
+ resume_process(process);
+ if (!(process_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
+}
+
+int
+erts_resume_processes(ErtsProcList *plp)
+{
+ int nresumed = 0;
+ while (plp) {
+ Process *proc;
+ ErtsProcList *fplp;
+ ASSERT(is_internal_pid(plp->pid));
+ proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
+ if (proc) {
+ if (proclist_same(plp, proc)) {
+ resume_process(proc);
+ nresumed++;
+ }
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
+ }
+ fplp = plp;
+ plp = plp->next;
+ proclist_destroy(fplp);
+ }
+ return nresumed;
+}
+
+Eterm
+erts_get_process_priority(Process *p)
+{
+ ErtsRunQueue *rq;
+ Eterm value;
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ rq = erts_get_runq_proc(p);
+ erts_smp_runq_lock(rq);
+ switch(p->prio) {
+ case PRIORITY_MAX: value = am_max; break;
+ case PRIORITY_HIGH: value = am_high; break;
+ case PRIORITY_NORMAL: value = am_normal; break;
+ case PRIORITY_LOW: value = am_low; break;
+ default: ASSERT(0); value = am_undefined; break;
+ }
+ erts_smp_runq_unlock(rq);
+ return value;
+}
+
+Eterm
+erts_set_process_priority(Process *p, Eterm new_value)
+{
+ ErtsRunQueue *rq;
+ Eterm old_value;
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ rq = erts_get_runq_proc(p);
+#ifdef ERTS_SMP
+ ASSERT(!(p->status_flags & ERTS_PROC_SFLG_INRUNQ));
+#endif
+ erts_smp_runq_lock(rq);
+ switch(p->prio) {
+ case PRIORITY_MAX: old_value = am_max; break;
+ case PRIORITY_HIGH: old_value = am_high; break;
+ case PRIORITY_NORMAL: old_value = am_normal; break;
+ case PRIORITY_LOW: old_value = am_low; break;
+ default: ASSERT(0); old_value = am_undefined; break;
+ }
+ switch (new_value) {
+ case am_max: p->prio = PRIORITY_MAX; break;
+ case am_high: p->prio = PRIORITY_HIGH; break;
+ case am_normal: p->prio = PRIORITY_NORMAL; break;
+ case am_low: p->prio = PRIORITY_LOW; break;
+ default: old_value = THE_NON_VALUE; break;
+ }
+ erts_smp_runq_unlock(rq);
+ return old_value;
+}
+
+#ifdef ERTS_SMP
+
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ }
+ return 0;
+}
+
+#else
+
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
+ return !erts_port_task_have_outstanding_io_tasks();
+}
+
+#endif
+
+/* note that P_RUNNING is only set so that we don't try to remove
+** running processes from the schedule queue if they exit - a running
+** process not being in the schedule queue!!
+** Schedule for up to INPUT_REDUCTIONS context switches,
+** return 1 if more to do.
+*/
+
+/*
+ * schedule() is called from BEAM (process_main()) or HiPE
+ * (hipe_mode_switch()) when the current process is to be
+ * replaced by a new process. 'calls' is the number of reduction
+ * steps the current process consumed.
+ * schedule() returns the new process, and the new process'
+ * ->fcalls field is initialised with its allowable number of
+ * reduction steps.
+ *
+ * When no process is runnable, or when sufficiently many reduction
+ * steps have been made, schedule() calls erl_sys_schedule() to
+ * schedule system-level activities.
+ *
+ * We use the same queue for normal and low prio processes.
+ * We reschedule low prio processes a certain number of times
+ * so that normal processes get to run more frequently.
+ */
+
+Process *schedule(Process *p, int calls)
+{
+ ErtsRunQueue *rq;
+ ErtsRunPrioQueue *rpq;
+ long dt;
+ ErtsSchedulerData *esdp;
+ int context_reds;
+ long fcalls;
+ int input_reductions;
+ int actual_reds;
+ int reds;
+
+ if (ERTS_USE_MODIFIED_TIMING()) {
+ context_reds = ERTS_MODIFIED_TIMING_CONTEXT_REDS;
+ input_reductions = ERTS_MODIFIED_TIMING_INPUT_REDS;
+ }
+ else {
+ context_reds = CONTEXT_REDS;
+ input_reductions = INPUT_REDUCTIONS;
+ }
+
+ ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+
+ /*
+ * Clean up after the process being scheduled out.
+ */
+ if (!p) { /* NULL in the very first schedule() call */
+ esdp = erts_get_scheduler_data();
+ rq = erts_get_runq_current(esdp);
+ ASSERT(esdp);
+ fcalls = erts_smp_atomic_read(&function_calls);
+ actual_reds = reds = 0;
+ erts_smp_runq_lock(rq);
+ } else {
+#ifdef ERTS_SMP
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ esdp = p->scheduler_data;
+ ASSERT(esdp->current_process == p
+ || esdp->free_process == p);
+#else
+ esdp = erts_scheduler_data;
+ ASSERT(esdp->current_process == p);
+#endif
+ reds = actual_reds = calls - esdp->virtual_reds;
+ if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
+ reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
+ esdp->virtual_reds = 0;
+
+ fcalls = erts_smp_atomic_addtest(&function_calls, reds);
+ ASSERT(esdp && esdp == erts_get_scheduler_data());
+
+ rq = erts_get_runq_current(esdp);
+
+ p->reds += actual_reds;
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
+ if ((erts_system_profile_flags.runnable_procs)
+ && (p->status == P_WAITING)) {
+ profile_runnable_proc(p, am_inactive);
+ }
+
+ if (IS_TRACED(p)) {
+ switch (p->status) {
+ case P_EXITING:
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, am_out_exiting);
+ break;
+ case P_FREE:
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, am_out_exited);
+ break;
+ default:
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
+ trace_sched(p, am_out);
+ else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_virtual_sched(p, am_out);
+ break;
+ }
+ }
+
+#ifdef ERTS_SMP
+ if (ERTS_PROC_PENDING_EXIT(p)) {
+ erts_handle_pending_exit(p,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
+ }
+
+ if (p->pending_suspenders) {
+ handle_pending_suspend(p,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ ASSERT(!(p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ)
+ || p->status != P_SUSPENDED);
+ }
+#endif
+ erts_smp_runq_lock(rq);
+
+ ERTS_PROC_REDUCTIONS_EXECUTED(rq, p->prio, reds, actual_reds);
+
+ esdp->current_process = NULL;
+#ifdef ERTS_SMP
+ p->scheduler_data = NULL;
+ p->runq_flags &= ~ERTS_PROC_RUNQ_FLG_RUNNING;
+ p->status_flags &= ~ERTS_PROC_SFLG_RUNNING;
+
+ if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) {
+ p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ;
+ internal_add_to_runq(rq, p);
+ }
+#endif
+
+
+ if (p->status == P_FREE) {
+#ifdef ERTS_SMP
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_dec_refc(p);
+#else
+ erts_free_proc(p);
+#endif
+ } else {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ }
+
+#ifdef ERTS_SMP
+ {
+ ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
+ rq->procs.pending_exiters = NULL;
+
+ if (pnd_xtrs) {
+ erts_smp_runq_unlock(rq);
+ handle_pending_exiters(pnd_xtrs);
+ erts_smp_runq_lock(rq);
+ }
+
+ }
+ ASSERT(!esdp->free_process);
+#endif
+ ASSERT(!esdp->current_process);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ dt = do_time_read_and_reset();
+ if (dt) {
+ erts_smp_runq_unlock(rq);
+ bump_timer(dt);
+ erts_smp_runq_lock(rq);
+ }
+ BM_STOP_TIMER(system);
+
+ }
+
+ ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ check_activities_to_run: {
+
+#ifdef ERTS_SMP
+
+ if (!(rq->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
+ && rq->check_balance_reds <= 0) {
+ check_balance(rq);
+ }
+
+ ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+
+ if (rq->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK)
+ immigrate(rq);
+
+ continue_check_activities_to_run:
+
+ if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
+ | ERTS_RUNQ_FLG_CHK_CPU_BIND
+ | ERTS_RUNQ_FLG_SUSPENDED)) {
+ if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
+ || erts_smp_atomic_read(&esdp->suspended)) {
+ suspend_scheduler(esdp);
+ }
+ if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
+ || erts_smp_atomic_read(&esdp->chk_cpu_bind)) {
+ check_cpu_bind(esdp);
+ }
+ }
+
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ if (esdp->check_children) {
+ esdp->check_children = 0;
+ erts_smp_runq_unlock(rq);
+ erts_check_children();
+ erts_smp_runq_lock(rq);
+ }
+#endif
+
+ erts_smp_chk_system_block(prepare_for_block,
+ resume_after_block,
+ (void *) rq);
+
+ ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+
+#endif
+
+ ASSERT(rq->len == rq->procs.len + rq->ports.info.len);
+
+#ifndef ERTS_SMP
+
+ if (rq->len == 0 && !rq->misc.start)
+ goto do_sys_schedule;
+
+#else /* ERTS_SMP */
+ if (rq->len == 0 && !rq->misc.start) {
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+
+ rq->wakeup_other = 0;
+ rq->wakeup_other_reds = 0;
+
+ empty_runq(rq);
+
+ if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
+ | ERTS_RUNQ_FLG_SUSPENDED)) {
+ if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
+ || erts_smp_atomic_read(&esdp->suspended)) {
+ non_empty_runq(rq);
+ goto continue_check_activities_to_run;
+ }
+ }
+ else if (!(rq->flags & ERTS_RUNQ_FLG_INACTIVE)) {
+ /*
+ * Check for ERTS_RUNQ_FLG_SUSPENDED has to be done
+ * after trying to steal a task.
+ */
+ if (try_steal_task(rq)
+ || (rq->flags & ERTS_RUNQ_FLG_SUSPENDED)) {
+ non_empty_runq(rq);
+ goto continue_check_activities_to_run;
+ }
+ }
+
+ if (prepare_for_sys_schedule()) {
+ erts_smp_atomic_set(&function_calls, 0);
+ fcalls = 0;
+ sched_sys_wait(esdp->no, rq);
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ }
+ else {
+ /* If all schedulers are waiting, one of them *should*
+ be waiting in erl_sys_schedule() */
+ sched_cnd_wait(esdp->no, rq);
+ }
+
+ non_empty_runq(rq);
+
+ goto check_activities_to_run;
+ }
+ else
+#endif /* ERTS_SMP */
+ if (fcalls > input_reductions && prepare_for_sys_schedule()) {
+ int runnable;
+
+#ifdef ERTS_SMP
+ runnable = 1;
+#else
+ do_sys_schedule:
+ runnable = rq->len != 0;
+ if (!runnable)
+ sched_waiting_sys(esdp->no, rq);
+#endif
+
+ /*
+ * Schedule system-level activities.
+ */
+
+ erts_smp_atomic_set(&function_calls, 0);
+ fcalls = 0;
+ ASSERT(!erts_port_task_have_outstanding_io_tasks());
+#ifdef ERTS_SMP
+ /* erts_sys_schedule_interrupt(0); */
+#endif
+ erts_smp_runq_unlock(rq);
+ erl_sys_schedule(runnable);
+ dt = do_time_read_and_reset();
+ if (dt) bump_timer(dt);
+#ifdef ERTS_SMP
+ erts_smp_runq_lock(rq);
+ erts_smp_atomic_set(&doing_sys_schedule, 0);
+ goto continue_check_activities_to_run;
+#else
+ if (!runnable)
+ sched_active_sys(esdp->no, rq);
+ goto check_activities_to_run;
+#endif
+ }
+
+ if (rq->misc.start)
+ exec_misc_ops(rq);
+
+#ifdef ERTS_SMP
+ {
+ int wo_reds = rq->wakeup_other_reds;
+ if (wo_reds) {
+ if (rq->len < 2) {
+ rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC*wo_reds;
+ if (rq->wakeup_other < 0)
+ rq->wakeup_other = 0;
+ }
+ else if (rq->wakeup_other < ERTS_WAKEUP_OTHER_LIMIT)
+ rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC;
+ else {
+ if (erts_common_run_queue) {
+ if (erts_common_run_queue->waiting)
+ wake_one_scheduler();
+ }
+ else if (erts_smp_atomic_read(&no_empty_run_queues) != 0) {
+ wake_scheduler_on_empty_runq(rq);
+ rq->wakeup_other = 0;
+ }
+ rq->wakeup_other = 0;
+ }
+ }
+ rq->wakeup_other_reds = 0;
+ }
+#endif
+
+ /*
+ * Find a new port to run.
+ */
+
+ if (rq->ports.info.len) {
+ int have_outstanding_io;
+ have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
+ if (have_outstanding_io && fcalls > 2*input_reductions) {
+ /*
+ * If we have performed more than 2*INPUT_REDUCTIONS since
+ * last call to erl_sys_schedule() and we still haven't
+ * handled all I/O tasks we stop running processes and
+ * focus completely on ports.
+ *
+ * One could argue that this is a strange behavior. The
+ * reason for doing it this way is that it is similar
+ * to the behavior before port tasks were introduced.
+ * We don't want to change the behavior too much, at
+ * least not at the time of writing. This behavior
+ * might change in the future.
+ *
+ * /rickard
+ */
+ goto check_activities_to_run;
+ }
+ }
+
+ /*
+ * Find a new process to run.
+ */
+ pick_next_process:
+
+ ERTS_DBG_CHK_PROCS_RUNQ(rq);
+
+ switch (rq->flags & ERTS_RUNQ_FLGS_PROCS_QMASK) {
+ case MAX_BIT:
+ case MAX_BIT|HIGH_BIT:
+ case MAX_BIT|NORMAL_BIT:
+ case MAX_BIT|LOW_BIT:
+ case MAX_BIT|HIGH_BIT|NORMAL_BIT:
+ case MAX_BIT|HIGH_BIT|LOW_BIT:
+ case MAX_BIT|NORMAL_BIT|LOW_BIT:
+ case MAX_BIT|HIGH_BIT|NORMAL_BIT|LOW_BIT:
+ rpq = &rq->procs.prio[PRIORITY_MAX];
+ break;
+ case HIGH_BIT:
+ case HIGH_BIT|NORMAL_BIT:
+ case HIGH_BIT|LOW_BIT:
+ case HIGH_BIT|NORMAL_BIT|LOW_BIT:
+ rpq = &rq->procs.prio[PRIORITY_HIGH];
+ break;
+ case NORMAL_BIT:
+ rpq = &rq->procs.prio[PRIORITY_NORMAL];
+ break;
+ case LOW_BIT:
+ rpq = &rq->procs.prio[PRIORITY_NORMAL];
+ break;
+ case NORMAL_BIT|LOW_BIT:
+ rpq = &rq->procs.prio[PRIORITY_NORMAL];
+ ASSERT(rpq->first != NULL);
+ p = rpq->first;
+ if (p->prio == PRIORITY_LOW) {
+ if (p == rpq->last || p->skipped >= RESCHEDULE_LOW-1)
+ p->skipped = 0;
+ else {
+ /* skip it */
+ p->skipped++;
+ rpq->first = p->next;
+ rpq->first->prev = NULL;
+ rpq->last->next = p;
+ p->prev = rpq->last;
+ p->next = NULL;
+ rpq->last = p;
+ goto pick_next_process;
+ }
+ }
+ break;
+ case 0: /* No process at all */
+ default:
+ ASSERT((rq->flags & ERTS_RUNQ_FLGS_PROCS_QMASK) == 0);
+ ASSERT(rq->procs.len == 0);
+ goto check_activities_to_run;
+ }
+
+ BM_START_TIMER(system);
+
+ /*
+ * Take the chosen process out of the queue.
+ */
+ ASSERT(rpq->first); /* Wrong qmask in rq->flags? */
+ p = rpq->first;
+#ifdef ERTS_SMP
+ ERTS_SMP_LC_ASSERT(rq == p->run_queue);
+#endif
+ rpq->first = p->next;
+ if (!rpq->first)
+ rpq->last = NULL;
+ else
+ rpq->first->prev = NULL;
+
+ p->next = p->prev = NULL;
+
+ if (--rq->procs.prio_info[p->prio].len == 0)
+ rq->flags &= ~(1 << p->prio);
+ ASSERT(rq->procs.len > 0);
+ rq->procs.len--;
+ ASSERT(rq->len > 0);
+ rq->len--;
+
+ {
+ Uint32 ee_flgs = (ERTS_RUNQ_FLG_EVACUATE(p->prio)
+ | ERTS_RUNQ_FLG_EMIGRATE(p->prio));
+
+ if ((rq->flags & (ERTS_RUNQ_FLG_SUSPENDED|ee_flgs)) == ee_flgs)
+ ERTS_UNSET_RUNQ_FLG_EVACUATE(rq->flags, p->prio);
+ }
+
+ ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(rq, p);
+
+ rq->procs.context_switches++;
+
+ esdp->current_process = p;
+
+#ifdef ERTS_SMP
+ p->runq_flags |= ERTS_PROC_RUNQ_FLG_RUNNING;
+ erts_smp_runq_unlock(rq);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+
+ if (erts_sched_stat.enabled) {
+ Uint old = ERTS_PROC_SCHED_ID(p,
+ (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS),
+ esdp->no);
+ int migrated = old && old != esdp->no;
+
+ erts_smp_spin_lock(&erts_sched_stat.lock);
+ erts_sched_stat.prio[p->prio].total_executed++;
+ erts_sched_stat.prio[p->prio].executed++;
+ if (migrated) {
+ erts_sched_stat.prio[p->prio].total_migrated++;
+ erts_sched_stat.prio[p->prio].migrated++;
+ }
+ erts_smp_spin_unlock(&erts_sched_stat.lock);
+ }
+
+ p->status_flags |= ERTS_PROC_SFLG_RUNNING;
+ p->status_flags &= ~ERTS_PROC_SFLG_INRUNQ;
+ if (ERTS_PROC_PENDING_EXIT(p)) {
+ erts_handle_pending_exit(p,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ }
+ ASSERT(!p->scheduler_data);
+ p->scheduler_data = esdp;
+
+#endif
+ ASSERT(p->status != P_SUSPENDED); /* Never run a suspended process */
+
+ ACTIVATE(p);
+ reds = context_reds;
+
+ if (IS_TRACED(p)) {
+ switch (p->status) {
+ case P_EXITING:
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, am_in_exiting);
+ break;
+ default:
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
+ trace_sched(p, am_in);
+ else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_virtual_sched(p, am_in);
+ break;
+ }
+ }
+ if (p->status != P_EXITING)
+ p->status = P_RUNNING;
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+#ifdef ERTS_SMP
+ if (is_not_nil(p->tracer_proc))
+ erts_check_my_tracer_proc(p);
+#endif
+
+ if ((FLAGS(p) & F_FORCE_GC) || (MSO(p).overhead >= BIN_VHEAP_SZ(p))) {
+ reds -= erts_garbage_collect(p, 0, p->arg_reg, p->arity);
+ if (reds < 0) {
+ reds = 1;
+ }
+ }
+
+ p->fcalls = reds;
+ ASSERT(IS_ACTIVE(p));
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ return p;
+ }
+}
+
+void
+erts_sched_stat_modify(int what)
+{
+ int ix;
+ switch (what) {
+ case ERTS_SCHED_STAT_MODIFY_ENABLE:
+ erts_smp_block_system(0);
+ erts_sched_stat.enabled = 1;
+ erts_smp_release_system();
+ break;
+ case ERTS_SCHED_STAT_MODIFY_DISABLE:
+ erts_smp_block_system(0);
+ erts_sched_stat.enabled = 1;
+ erts_smp_release_system();
+ break;
+ case ERTS_SCHED_STAT_MODIFY_CLEAR:
+ erts_smp_spin_lock(&erts_sched_stat.lock);
+ for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
+ erts_sched_stat.prio[ix].total_executed = 0;
+ erts_sched_stat.prio[ix].executed = 0;
+ erts_sched_stat.prio[ix].total_migrated = 0;
+ erts_sched_stat.prio[ix].migrated = 0;
+ }
+ erts_smp_spin_unlock(&erts_sched_stat.lock);
+ break;
+ }
+}
+
+Eterm
+erts_sched_stat_term(Process *p, int total)
+{
+ Uint sz;
+ Uint *hp;
+ Eterm prio[ERTS_NO_PRIO_LEVELS];
+ Uint executed[ERTS_NO_PRIO_LEVELS];
+ Uint migrated[ERTS_NO_PRIO_LEVELS];
+
+ erts_smp_spin_lock(&erts_sched_stat.lock);
+ if (total) {
+ int i;
+ for (i = 0; i < ERTS_NO_PRIO_LEVELS; i++) {
+ prio[i] = erts_sched_stat.prio[i].name;
+ executed[i] = erts_sched_stat.prio[i].total_executed;
+ migrated[i] = erts_sched_stat.prio[i].total_migrated;
+ }
+ }
+ else {
+ int i;
+ for (i = 0; i < ERTS_NO_PRIO_LEVELS; i++) {
+ prio[i] = erts_sched_stat.prio[i].name;
+ executed[i] = erts_sched_stat.prio[i].executed;
+ erts_sched_stat.prio[i].executed = 0;
+ migrated[i] = erts_sched_stat.prio[i].migrated;
+ erts_sched_stat.prio[i].migrated = 0;
+ }
+ }
+ erts_smp_spin_unlock(&erts_sched_stat.lock);
+
+ sz = 0;
+ (void) erts_bld_atom_2uint_3tup_list(NULL, &sz, ERTS_NO_PRIO_LEVELS,
+ prio, executed, migrated);
+ hp = HAlloc(p, sz);
+ return erts_bld_atom_2uint_3tup_list(&hp, NULL, ERTS_NO_PRIO_LEVELS,
+ prio, executed, migrated);
+}
+
+/*
+ * Scheduling of misc stuff
+ */
+
+void
+erts_schedule_misc_op(void (*func)(void *), void *arg)
+{
+ ErtsRunQueue *rq = erts_get_runq_current(NULL);
+ ErtsMiscOpList *molp = misc_op_list_alloc();
+
+ erts_smp_runq_lock(rq);
+
+ while (rq->misc.evac_runq) {
+ ErtsRunQueue *tmp_rq = rq->misc.evac_runq;
+ erts_smp_runq_unlock(rq);
+ rq = tmp_rq;
+ erts_smp_runq_lock(rq);
+ }
+
+ ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
+
+ molp->next = NULL;
+ molp->func = func;
+ molp->arg = arg;
+ if (rq->misc.end)
+ rq->misc.end->next = molp;
+ else
+ rq->misc.start = molp;
+ rq->misc.end = molp;
+ smp_notify_inc_runq(rq);
+ erts_smp_runq_unlock(rq);
+}
+
+static void
+exec_misc_ops(ErtsRunQueue *rq)
+{
+ int i;
+ ErtsMiscOpList *molp = rq->misc.start;
+ ErtsMiscOpList *tmp_molp = molp;
+
+ for (i = 0; i < ERTS_MAX_MISC_OPS-1; i++) {
+ if (!tmp_molp)
+ goto mtq;
+ tmp_molp = tmp_molp->next;
+ }
+
+ if (!tmp_molp) {
+ mtq:
+ rq->misc.start = NULL;
+ rq->misc.end = NULL;
+ }
+ else {
+ rq->misc.start = tmp_molp->next;
+ tmp_molp->next = NULL;
+ if (!rq->misc.start)
+ rq->misc.end = NULL;
+ }
+
+ erts_smp_runq_unlock(rq);
+
+ while (molp) {
+ tmp_molp = molp;
+ (*molp->func)(molp->arg);
+ molp = molp->next;
+ misc_op_list_free(tmp_molp);
+ }
+
+ erts_smp_runq_lock(rq);
+}
+
+Uint
+erts_get_total_context_switches(void)
+{
+ Uint res = 0;
+ ERTS_ATOMIC_FOREACH_RUNQ(rq, res += rq->procs.context_switches);
+ return res;
+}
+
+void
+erts_get_total_reductions(Uint *redsp, Uint *diffp)
+{
+ Uint reds = 0;
+ ERTS_ATOMIC_FOREACH_RUNQ_X(rq,
+
+ reds += rq->procs.reductions,
+
+ if (redsp) *redsp = reds;
+ if (diffp) *diffp = reds - last_reductions;
+ last_reductions = reds);
+}
+
+void
+erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
+{
+ Uint reds = erts_current_reductions(c_p, c_p);
+ int ix;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ /*
+ * Wait for other schedulers to schedule out their processes
+ * and update 'reductions'.
+ */
+ erts_smp_block_system(0);
+ for (reds = 0, ix = 0; ix < erts_no_run_queues; ix++)
+ reds += ERTS_RUNQ_IX(ix)->procs.reductions;
+ if (redsp)
+ *redsp = reds;
+ if (diffp)
+ *diffp = reds - last_exact_reductions;
+ last_exact_reductions = reds;
+ erts_smp_release_system();
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+}
+
+/*
+ * erts_test_next_pid() is only used for testing.
+ */
+Sint
+erts_test_next_pid(int set, Uint next)
+{
+ Sint res;
+ Sint p_prev;
+
+
+ erts_smp_mtx_lock(&proc_tab_mtx);
+
+ if (!set) {
+ res = p_next < 0 ? -1 : (p_serial << p_serial_shift | p_next);
+ }
+ else {
+
+ p_serial = (Sint) ((next >> p_serial_shift) & p_serial_mask);
+ p_next = (Sint) (erts_process_tab_index_mask & next);
+
+ if (p_next >= erts_max_processes) {
+ p_next = 0;
+ p_serial++;
+ p_serial &= p_serial_mask;
+ }
+
+ p_prev = p_next;
+
+ do {
+ if (!process_tab[p_next])
+ break;
+ p_next++;
+ if(p_next >= erts_max_processes) {
+ p_next = 0;
+ p_serial++;
+ p_serial &= p_serial_mask;
+ }
+ } while (p_prev != p_next);
+
+ res = process_tab[p_next] ? -1 : (p_serial << p_serial_shift | p_next);
+
+ }
+
+ erts_smp_mtx_unlock(&proc_tab_mtx);
+
+ return res;
+
+}
+
+Uint erts_process_count(void)
+{
+ long res = erts_smp_atomic_read(&process_count);
+ ASSERT(res >= 0);
+ return (Uint) res;
+}
+
+void
+erts_free_proc(Process *p)
+{
+#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+ erts_lcnt_proc_lock_destroy(p);
+#endif
+ erts_free(ERTS_ALC_T_PROC, (void *) p);
+}
+
+
+/*
+** Allocate process and find out where to place next process.
+*/
+static Process*
+alloc_process(void)
+{
+#ifdef ERTS_SMP
+ erts_pix_lock_t *pix_lock;
+#endif
+ Process* p;
+ int p_prev;
+
+ erts_smp_mtx_lock(&proc_tab_mtx);
+
+ if (p_next == -1) {
+ p = NULL;
+ goto error; /* Process table full! */
+ }
+
+ p = (Process*) erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process));
+ if (!p)
+ goto error; /* ENOMEM */
+
+ p_last = p_next;
+
+ erts_get_emu_time(&p->started);
+
+#ifdef ERTS_SMP
+ pix_lock = ERTS_PIX2PIXLOCK(p_next);
+ erts_pix_lock(pix_lock);
+#endif
+ ASSERT(!process_tab[p_next]);
+
+ process_tab[p_next] = p;
+ erts_smp_atomic_inc(&process_count);
+ p->id = make_internal_pid(p_serial << p_serial_shift | p_next);
+ if (p->id == ERTS_INVALID_PID) {
+ /* Do not use the invalid pid; change serial */
+ p_serial++;
+ p_serial &= p_serial_mask;
+ p->id = make_internal_pid(p_serial << p_serial_shift | p_next);
+ ASSERT(p->id != ERTS_INVALID_PID);
+ }
+ ASSERT(internal_pid_serial(p->id) <= (erts_use_r9_pids_ports
+ ? ERTS_MAX_PID_R9_SERIAL
+ : ERTS_MAX_PID_SERIAL));
+
+#ifdef ERTS_SMP
+ erts_proc_lock_init(p); /* All locks locked */
+ erts_pix_unlock(pix_lock);
+#endif
+
+ p->rstatus = P_FREE;
+ p->rcount = 0;
+
+ /*
+ * set p_next to the next available slot
+ */
+
+ p_prev = p_next;
+
+ while (1) {
+ p_next++;
+ if(p_next >= erts_max_processes) {
+ p_serial++;
+ p_serial &= p_serial_mask;
+ p_next = 0;
+ }
+
+ if (p_prev == p_next) {
+ p_next = -1;
+ break; /* Table full! */
+ }
+
+ if (!process_tab[p_next])
+ break; /* found a free slot */
+ }
+
+ error:
+
+ erts_smp_mtx_unlock(&proc_tab_mtx);
+
+ return p;
+
+}
+
+Eterm
+erl_create_process(Process* parent, /* Parent of process (default group leader). */
+ Eterm mod, /* Tagged atom for module. */
+ Eterm func, /* Tagged atom for function. */
+ Eterm args, /* Arguments for function (must be well-formed list). */
+ ErlSpawnOpts* so) /* Options for spawn. */
+{
+ ErtsRunQueue *rq;
+ Process *p;
+ Sint arity; /* Number of arguments. */
+#ifndef HYBRID
+ Uint arg_size; /* Size of arguments. */
+#endif
+ Uint sz; /* Needed words on heap. */
+ Uint heap_need; /* Size needed on heap. */
+ Eterm res = THE_NON_VALUE;
+
+#ifdef ERTS_SMP
+ erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
+#endif
+
+#ifdef HYBRID
+ /*
+ * Copy the arguments to the global heap
+ * Since global GC might occur we want to do this before adding the
+ * new process to the process_tab.
+ */
+ BM_SWAP_TIMER(system,copy);
+ LAZY_COPY(parent,args);
+ BM_SWAP_TIMER(copy,system);
+ heap_need = 0;
+#endif /* HYBRID */
+ /*
+ * Check for errors.
+ */
+
+ if (is_not_atom(mod) || is_not_atom(func) || ((arity = list_length(args)) < 0)) {
+ so->error_code = BADARG;
+ goto error;
+ }
+ p = alloc_process(); /* All proc locks are locked by this thread
+ on success */
+ if (!p) {
+ erts_send_error_to_logger_str(parent->group_leader,
+ "Too many processes\n");
+ so->error_code = SYSTEM_LIMIT;
+ goto error;
+ }
+
+ processes_busy++;
+ BM_COUNT(processes_spawned);
+
+#ifndef HYBRID
+ BM_SWAP_TIMER(system,size);
+ arg_size = size_object(args);
+ BM_SWAP_TIMER(size,system);
+ heap_need = arg_size;
+#endif
+
+ p->flags = erts_default_process_flags;
+
+ /* Scheduler queue mutex should be locked when changeing
+ * prio. In this case we don't have to lock it, since
+ * noone except us has access to the process.
+ */
+ if (so->flags & SPO_USE_ARGS) {
+ p->min_heap_size = so->min_heap_size;
+ p->prio = so->priority;
+ p->max_gen_gcs = so->max_gen_gcs;
+ } else {
+ p->min_heap_size = H_MIN_SIZE;
+ p->prio = PRIORITY_NORMAL;
+ p->max_gen_gcs = (Uint16) erts_smp_atomic_read(&erts_max_gen_gcs);
+ }
+ p->skipped = 0;
+ ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
+
+ p->initial[INITIAL_MOD] = mod;
+ p->initial[INITIAL_FUN] = func;
+ p->initial[INITIAL_ARI] = (Uint) arity;
+
+ /*
+ * Must initialize binary lists here before copying binaries to process.
+ */
+ p->off_heap.mso = NULL;
+#ifndef HYBRID /* FIND ME! */
+ p->off_heap.funs = NULL;
+#endif
+ p->off_heap.externals = NULL;
+ p->off_heap.overhead = 0;
+
+ heap_need +=
+ IS_CONST(parent->group_leader) ? 0 : NC_HEAP_SIZE(parent->group_leader);
+
+ if (heap_need < p->min_heap_size) {
+ sz = heap_need = p->min_heap_size;
+ } else {
+ sz = erts_next_heap_size(heap_need, 0);
+ }
+
+#ifdef HIPE
+ hipe_init_process(&p->hipe);
+#ifdef ERTS_SMP
+ hipe_init_process_smp(&p->hipe_smp);
+#endif
+#endif
+
+ p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz);
+ p->old_hend = p->old_htop = p->old_heap = NULL;
+ p->high_water = p->heap;
+#ifdef INCREMENTAL
+ p->scan_top = p->high_water;
+#endif
+ p->gen_gcs = 0;
+ p->stop = p->hend = p->heap + sz;
+ p->htop = p->heap;
+ p->heap_sz = sz;
+ p->catches = 0;
+
+ p->bin_vheap_sz = H_MIN_SIZE;
+ p->bin_old_vheap_sz = H_MIN_SIZE;
+ p->bin_old_vheap = 0;
+
+ /* No need to initialize p->fcalls. */
+
+ p->current = p->initial+INITIAL_MOD;
+
+ p->i = (Eterm *) beam_apply;
+ p->cp = (Eterm *) beam_apply+1;
+
+ p->arg_reg = p->def_arg_reg;
+ p->max_arg_reg = sizeof(p->def_arg_reg)/sizeof(p->def_arg_reg[0]);
+ p->arg_reg[0] = mod;
+ p->arg_reg[1] = func;
+ BM_STOP_TIMER(system);
+ BM_MESSAGE(args,p,parent);
+ BM_START_TIMER(system);
+#ifdef HYBRID
+ p->arg_reg[2] = args;
+#ifdef INCREMENTAL
+ p->active = 0;
+ if (ptr_val(args) >= inc_fromspc && ptr_val(args) < inc_fromend)
+ INC_ACTIVATE(p);
+#endif
+#else
+ BM_SWAP_TIMER(system,copy);
+ p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap);
+ BM_MESSAGE_COPIED(arg_size);
+ BM_SWAP_TIMER(copy,system);
+#endif
+ p->arity = 3;
+
+ p->fvalue = NIL;
+ p->freason = EXC_NULL;
+ p->ftrace = NIL;
+ p->reds = 0;
+
+#ifdef ERTS_SMP
+ p->u.ptimer = NULL;
+#else
+ sys_memset(&p->u.tm, 0, sizeof(ErlTimer));
+#endif
+
+ p->reg = NULL;
+ p->nlinks = NULL;
+ p->monitors = NULL;
+ p->nodes_monitors = NULL;
+ p->suspend_monitors = NULL;
+
+ ASSERT(is_pid(parent->group_leader));
+
+ if (parent->group_leader == ERTS_INVALID_PID)
+ p->group_leader = p->id;
+ else {
+ /* Needs to be done after the heap has been set up */
+ p->group_leader =
+ IS_CONST(parent->group_leader)
+ ? parent->group_leader
+ : STORE_NC(&p->htop, &p->off_heap.externals, parent->group_leader);
+ }
+
+ erts_get_default_tracing(&p->trace_flags, &p->tracer_proc);
+
+ p->msg.first = NULL;
+ p->msg.last = &p->msg.first;
+ p->msg.save = &p->msg.first;
+ p->msg.len = 0;
+#ifdef ERTS_SMP
+ p->msg_inq.first = NULL;
+ p->msg_inq.last = &p->msg_inq.first;
+ p->msg_inq.len = 0;
+ p->bound_runq = NULL;
+#endif
+ p->bif_timers = NULL;
+ p->mbuf = NULL;
+ p->mbuf_sz = 0;
+ p->psd = NULL;
+ p->dictionary = NULL;
+ p->seq_trace_lastcnt = 0;
+ p->seq_trace_clock = 0;
+ SEQ_TRACE_TOKEN(p) = NIL;
+ p->parent = parent->id == ERTS_INVALID_PID ? NIL : parent->id;
+
+#ifdef HYBRID
+ p->rrma = NULL;
+ p->rrsrc = NULL;
+ p->nrr = 0;
+ p->rrsz = 0;
+#endif
+
+ INIT_HOLE_CHECK(p);
+#ifdef DEBUG
+ p->last_old_htop = NULL;
+#endif
+
+ if (IS_TRACED(parent)) {
+ if (parent->trace_flags & F_TRACE_SOS) {
+ p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
+ p->tracer_proc = parent->tracer_proc;
+ }
+ if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
+ trace_proc_spawn(parent, p->id, mod, func, args);
+ }
+ if (parent->trace_flags & F_TRACE_SOS1) { /* Overrides TRACE_CHILDREN */
+ p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
+ p->tracer_proc = parent->tracer_proc;
+ p->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
+ parent->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
+ }
+ }
+
+ /*
+ * Check if this process should be initially linked to its parent.
+ */
+
+ if (so->flags & SPO_LINK) {
+#ifdef DEBUG
+ int ret;
+#endif
+ if (IS_TRACED_FL(parent, F_TRACE_PROCS)) {
+ trace_proc(parent, parent, am_link, p->id);
+ }
+
+#ifdef DEBUG
+ ret = erts_add_link(&(parent->nlinks), LINK_PID, p->id);
+ ASSERT(ret == 0);
+ ret = erts_add_link(&(p->nlinks), LINK_PID, parent->id);
+ ASSERT(ret == 0);
+#else
+ erts_add_link(&(parent->nlinks), LINK_PID, p->id);
+ erts_add_link(&(p->nlinks), LINK_PID, parent->id);
+#endif
+
+ if (IS_TRACED(parent)) {
+ if (parent->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
+ p->tracer_proc = parent->tracer_proc; /* maybe steal */
+
+ if (parent->trace_flags & F_TRACE_SOL1) { /* maybe override */
+ p ->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ parent->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ }
+ }
+ }
+ }
+
+ /*
+ * Test whether this process should be initially monitored by its parent.
+ */
+ if (so->flags & SPO_MONITOR) {
+ Eterm mref;
+
+ mref = erts_make_ref(parent);
+ erts_add_monitor(&(parent->monitors), MON_ORIGIN, mref, p->id, NIL);
+ erts_add_monitor(&(p->monitors), MON_TARGET, mref, parent->id, NIL);
+ so->mref = mref;
+ }
+
+#ifdef HYBRID
+ /*
+ * Add process to the array of active processes.
+ */
+ ACTIVATE(p);
+ p->active_index = erts_num_active_procs++;
+ erts_active_procs[p->active_index] = p;
+#endif
+
+#ifdef ERTS_SMP
+ p->scheduler_data = NULL;
+ p->is_exiting = 0;
+ p->status_flags = 0;
+ p->runq_flags = 0;
+ p->suspendee = NIL;
+ p->pending_suspenders = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ p->pending_exit.bp = NULL;
+#endif
+
+#if !defined(NO_FPE_SIGNALS)
+ p->fp_exception = 0;
+#endif
+
+ /*
+ * Schedule process for execution.
+ */
+
+ if (!((so->flags & SPO_USE_ARGS) && so->scheduler))
+ rq = erts_get_runq_proc(parent);
+ else {
+ int ix = so->scheduler-1;
+ ASSERT(0 <= ix && ix < erts_no_run_queues);
+ rq = ERTS_RUNQ_IX(ix);
+ p->bound_runq = rq;
+ }
+
+ erts_smp_runq_lock(rq);
+
+#ifdef ERTS_SMP
+ p->run_queue = rq;
+#endif
+
+ p->status = P_WAITING;
+ internal_add_to_runq(rq, p);
+
+ erts_smp_runq_unlock(rq);
+
+ res = p->id;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+
+ VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->id));
+
+ error:
+
+ erts_smp_proc_unlock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
+
+ return res;
+}
+
+/*
+ * Initiates a pseudo process that can be used
+ * for arithmetic BIFs.
+ */
+
+void erts_init_empty_process(Process *p)
+{
+ p->htop = NULL;
+ p->stop = NULL;
+ p->hend = NULL;
+ p->heap = NULL;
+ p->gen_gcs = 0;
+ p->max_gen_gcs = 0;
+ p->min_heap_size = 0;
+ p->status = P_RUNABLE;
+ p->gcstatus = P_RUNABLE;
+ p->rstatus = P_RUNABLE;
+ p->rcount = 0;
+ p->id = ERTS_INVALID_PID;
+ p->prio = PRIORITY_NORMAL;
+ p->reds = 0;
+ p->tracer_proc = NIL;
+ p->trace_flags = F_INITIAL_TRACE_FLAGS;
+ p->group_leader = ERTS_INVALID_PID;
+ p->flags = 0;
+ p->fvalue = NIL;
+ p->freason = EXC_NULL;
+ p->ftrace = NIL;
+ p->fcalls = 0;
+
+ p->bin_vheap_sz=H_MIN_SIZE;
+ p->bin_old_vheap_sz=H_MIN_SIZE;
+ p->bin_old_vheap = 0;
+#ifdef ERTS_SMP
+ p->u.ptimer = NULL;
+ p->bound_runq = NULL;
+#else
+ memset(&(p->u.tm), 0, sizeof(ErlTimer));
+#endif
+ p->next = NULL;
+ p->off_heap.mso = NULL;
+#ifndef HYBRID /* FIND ME! */
+ p->off_heap.funs = NULL;
+#endif
+ p->off_heap.externals = NULL;
+ p->off_heap.overhead = 0;
+ p->reg = NULL;
+ p->heap_sz = 0;
+ p->high_water = NULL;
+#ifdef INCREMENTAL
+ p->scan_top = NULL;
+#endif
+ p->old_hend = NULL;
+ p->old_htop = NULL;
+ p->old_heap = NULL;
+ p->mbuf = NULL;
+ p->mbuf_sz = 0;
+ p->psd = NULL;
+ p->monitors = NULL;
+ p->nlinks = NULL; /* List of links */
+ p->nodes_monitors = NULL;
+ p->suspend_monitors = NULL;
+ p->msg.first = NULL;
+ p->msg.last = &p->msg.first;
+ p->msg.save = &p->msg.first;
+ p->msg.len = 0;
+ p->bif_timers = NULL;
+ p->dictionary = NULL;
+ p->seq_trace_clock = 0;
+ p->seq_trace_lastcnt = 0;
+ p->seq_trace_token = NIL;
+ p->initial[0] = 0;
+ p->initial[1] = 0;
+ p->initial[2] = 0;
+ p->catches = 0;
+ p->cp = NULL;
+ p->i = NULL;
+ p->current = NULL;
+
+ /*
+ * Saved x registers.
+ */
+ p->arity = 0;
+ p->arg_reg = NULL;
+ p->max_arg_reg = 0;
+ p->def_arg_reg[0] = 0;
+ p->def_arg_reg[1] = 0;
+ p->def_arg_reg[2] = 0;
+ p->def_arg_reg[3] = 0;
+ p->def_arg_reg[4] = 0;
+ p->def_arg_reg[5] = 0;
+
+ p->parent = NIL;
+ p->started.tv_sec = 0;
+ p->started.tv_usec = 0;
+
+#ifdef HIPE
+ hipe_init_process(&p->hipe);
+#ifdef ERTS_SMP
+ hipe_init_process_smp(&p->hipe_smp);
+#endif
+#endif
+
+ ACTIVATE(p);
+
+#ifdef HYBRID
+ p->rrma = NULL;
+ p->rrsrc = NULL;
+ p->nrr = 0;
+ p->rrsz = 0;
+#endif
+ INIT_HOLE_CHECK(p);
+#ifdef DEBUG
+ p->last_old_htop = NULL;
+#endif
+
+
+#ifdef ERTS_SMP
+ p->scheduler_data = NULL;
+ p->is_exiting = 0;
+ p->status_flags = 0;
+ p->runq_flags = 0;
+ p->msg_inq.first = NULL;
+ p->msg_inq.last = &p->msg_inq.first;
+ p->msg_inq.len = 0;
+ p->suspendee = NIL;
+ p->pending_suspenders = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ p->pending_exit.bp = NULL;
+ erts_proc_lock_init(p);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ p->run_queue = ERTS_RUNQ_IX(0);
+#endif
+
+#if !defined(NO_FPE_SIGNALS)
+ p->fp_exception = 0;
+#endif
+
+}
+
+#ifdef DEBUG
+
+void
+erts_debug_verify_clean_empty_process(Process* p)
+{
+ /* Things that erts_cleanup_empty_process() will *not* cleanup... */
+ ASSERT(p->htop == NULL);
+ ASSERT(p->stop == NULL);
+ ASSERT(p->hend == NULL);
+ ASSERT(p->heap == NULL);
+ ASSERT(p->id == ERTS_INVALID_PID);
+ ASSERT(p->tracer_proc == NIL);
+ ASSERT(p->trace_flags == F_INITIAL_TRACE_FLAGS);
+ ASSERT(p->group_leader == ERTS_INVALID_PID);
+ ASSERT(p->next == NULL);
+ ASSERT(p->reg == NULL);
+ ASSERT(p->heap_sz == 0);
+ ASSERT(p->high_water == NULL);
+#ifdef INCREMENTAL
+ ASSERT(p->scan_top == NULL);
+#endif
+ ASSERT(p->old_hend == NULL);
+ ASSERT(p->old_htop == NULL);
+ ASSERT(p->old_heap == NULL);
+
+ ASSERT(p->monitors == NULL);
+ ASSERT(p->nlinks == NULL);
+ ASSERT(p->nodes_monitors == NULL);
+ ASSERT(p->suspend_monitors == NULL);
+ ASSERT(p->msg.first == NULL);
+ ASSERT(p->msg.len == 0);
+ ASSERT(p->bif_timers == NULL);
+ ASSERT(p->dictionary == NULL);
+ ASSERT(p->catches == 0);
+ ASSERT(p->cp == NULL);
+ ASSERT(p->i == NULL);
+ ASSERT(p->current == NULL);
+
+ ASSERT(p->parent == NIL);
+
+#ifdef ERTS_SMP
+ ASSERT(p->msg_inq.first == NULL);
+ ASSERT(p->msg_inq.len == 0);
+ ASSERT(p->suspendee == NIL);
+ ASSERT(p->pending_suspenders == NULL);
+ ASSERT(p->pending_exit.reason == THE_NON_VALUE);
+ ASSERT(p->pending_exit.bp == NULL);
+#endif
+
+ /* Thing that erts_cleanup_empty_process() cleans up */
+
+ ASSERT(p->off_heap.mso == NULL);
+#ifndef HYBRID /* FIND ME! */
+ ASSERT(p->off_heap.funs == NULL);
+#endif
+ ASSERT(p->off_heap.externals == NULL);
+ ASSERT(p->off_heap.overhead == 0);
+
+ ASSERT(p->mbuf == NULL);
+}
+
+#endif
+
+void
+erts_cleanup_empty_process(Process* p)
+{
+ ErlHeapFragment* mbufp;
+
+ /* We only check fields that are known to be used... */
+
+ erts_cleanup_offheap(&p->off_heap);
+ p->off_heap.mso = NULL;
+#ifndef HYBRID /* FIND ME! */
+ p->off_heap.funs = NULL;
+#endif
+ p->off_heap.externals = NULL;
+ p->off_heap.overhead = 0;
+
+ mbufp = p->mbuf;
+ while (mbufp) {
+ ErlHeapFragment *next = mbufp->next;
+ free_message_buffer(mbufp);
+ mbufp = next;
+ }
+ p->mbuf = NULL;
+#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+ erts_lcnt_proc_lock_destroy(p);
+#endif
+#ifdef DEBUG
+ erts_debug_verify_clean_empty_process(p);
+#endif
+}
+
+/*
+ * p must be the currently executing process.
+ */
+static void
+delete_process(Process* p)
+{
+ ErlMessage* mp;
+ ErlHeapFragment* bp;
+
+ VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->id));
+
+ /* Cleanup psd */
+
+ if (p->psd)
+ erts_free(ERTS_ALC_T_PSD, p->psd);
+
+ /* Clean binaries and funs */
+ erts_cleanup_offheap(&p->off_heap);
+
+ /*
+ * The mso list should not be used anymore, but if it is, make sure that
+ * we'll notice.
+ */
+ p->off_heap.mso = (void *) 0x8DEFFACD;
+
+ if (p->arg_reg != p->def_arg_reg) {
+ erts_free(ERTS_ALC_T_ARG_REG, p->arg_reg);
+ }
+
+ /*
+ * Release heaps. Clobber contents in DEBUG build.
+ */
+
+
+#ifdef DEBUG
+ sys_memset(p->heap, DEBUG_BAD_BYTE, p->heap_sz*sizeof(Eterm));
+#endif
+
+#ifdef HIPE
+ hipe_delete_process(&p->hipe);
+#endif
+
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) p->heap, p->heap_sz*sizeof(Eterm));
+ if (p->old_heap != NULL) {
+
+#ifdef DEBUG
+ sys_memset(p->old_heap, DEBUG_BAD_BYTE,
+ (p->old_hend-p->old_heap)*sizeof(Eterm));
+#endif
+ ERTS_HEAP_FREE(ERTS_ALC_T_OLD_HEAP,
+ p->old_heap,
+ (p->old_hend-p->old_heap)*sizeof(Eterm));
+ }
+
+ /*
+ * Free all pending message buffers.
+ */
+ bp = p->mbuf;
+ while (bp != NULL) {
+ ErlHeapFragment* next_bp = bp->next;
+ free_message_buffer(bp);
+ bp = next_bp;
+ }
+
+ erts_erase_dicts(p);
+
+ /* free all pending messages */
+ mp = p->msg.first;
+ while(mp != NULL) {
+ ErlMessage* next_mp = mp->next;
+ if (mp->data.attached) {
+ if (is_value(mp->m[0]))
+ free_message_buffer(mp->data.heap_frag);
+ else {
+ if (is_not_nil(mp->m[1])) {
+ ErlHeapFragment *heap_frag;
+ heap_frag = (ErlHeapFragment *) mp->data.dist_ext->ext_endp;
+ erts_cleanup_offheap(&heap_frag->off_heap);
+ }
+ erts_free_dist_ext_copy(mp->data.dist_ext);
+ }
+ }
+ free_message(mp);
+ mp = next_mp;
+ }
+
+ ASSERT(!p->monitors);
+ ASSERT(!p->nlinks);
+ ASSERT(!p->nodes_monitors);
+ ASSERT(!p->suspend_monitors);
+
+ p->fvalue = NIL;
+
+#ifdef HYBRID
+ erts_active_procs[p->active_index] =
+ erts_active_procs[--erts_num_active_procs];
+ erts_active_procs[p->active_index]->active_index = p->active_index;
+#ifdef INCREMENTAL
+ if (INC_IS_ACTIVE(p))
+ INC_DEACTIVATE(p);
+#endif
+
+ if (p->rrma != NULL) {
+ erts_free(ERTS_ALC_T_ROOTSET,p->rrma);
+ erts_free(ERTS_ALC_T_ROOTSET,p->rrsrc);
+ }
+#endif
+
+}
+
+static ERTS_INLINE void
+set_proc_exiting(Process *p, Eterm reason, ErlHeapFragment *bp)
+{
+#ifdef ERTS_SMP
+ erts_pix_lock_t *pix_lock = ERTS_PID2PIXLOCK(p->id);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL);
+ /*
+ * You are required to have all proc locks and the pix lock when going
+ * to status P_EXITING. This makes it is enough to take any lock when
+ * looking up a process (pid2proc()) to prevent the looked up process
+ * from exiting until the lock has been released.
+ */
+
+ erts_pix_lock(pix_lock);
+ p->is_exiting = 1;
+#endif
+ p->status = P_EXITING;
+#ifdef ERTS_SMP
+ erts_pix_unlock(pix_lock);
+#endif
+ p->fvalue = reason;
+ if (bp)
+ erts_link_mbuf_to_proc(p, bp);
+ /*
+ * We used to set freason to EXC_EXIT here, but there is no need to
+ * save the stack trace since this process irreversibly is going to
+ * exit.
+ */
+ p->freason = EXTAG_EXIT;
+ KILL_CATCHES(p);
+ cancel_timer(p);
+ p->i = (Eterm *) beam_exit;
+}
+
+
+#ifdef ERTS_SMP
+
+void
+erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
+{
+ ErtsProcLocks xlocks;
+ ASSERT(is_value(c_p->pending_exit.reason));
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks);
+ ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_LC_ASSERT(c_p->status != P_EXITING);
+ ERTS_SMP_LC_ASSERT(c_p->status != P_FREE);
+
+ /* Ensure that all locks on c_p are locked before proceeding... */
+ if (locks == ERTS_PROC_LOCKS_ALL)
+ xlocks = 0;
+ else {
+ xlocks = ~locks & ERTS_PROC_LOCKS_ALL;
+ if (erts_smp_proc_trylock(c_p, xlocks) == EBUSY) {
+ erts_smp_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+
+ set_proc_exiting(c_p, c_p->pending_exit.reason, c_p->pending_exit.bp);
+ c_p->pending_exit.reason = THE_NON_VALUE;
+ c_p->pending_exit.bp = NULL;
+
+ if (xlocks)
+ erts_smp_proc_unlock(c_p, xlocks);
+}
+
+static void
+handle_pending_exiters(ErtsProcList *pnd_xtrs)
+{
+ ErtsProcList *plp = pnd_xtrs;
+ ErtsProcList *free_plp;
+ while (plp) {
+ Process *p = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCKS_ALL);
+ if (p) {
+ if (proclist_same(plp, p)
+ && !(p->status_flags & ERTS_PROC_SFLG_RUNNING)) {
+ ASSERT(p->status_flags & ERTS_PROC_SFLG_INRUNQ);
+ ASSERT(ERTS_PROC_PENDING_EXIT(p));
+ erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ }
+ free_plp = plp;
+ plp = plp->next;
+ proclist_destroy(free_plp);
+ }
+}
+
+static void
+save_pending_exiter(Process *p)
+{
+ ErtsProcList *plp;
+ ErtsRunQueue *rq;
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ rq = erts_get_runq_current(NULL);
+
+ plp = proclist_create(p);
+
+ erts_smp_runq_lock(rq);
+
+ plp->next = rq->procs.pending_exiters;
+ rq->procs.pending_exiters = plp;
+
+ erts_smp_runq_unlock(rq);
+
+}
+
+#endif
+
+/*
+ * This function delivers an EXIT message to a process
+ * which is trapping EXITs.
+ */
+
+static ERTS_INLINE void
+send_exit_message(Process *to, ErtsProcLocks *to_locksp,
+ Eterm exit_term, Uint term_size, Eterm token)
+{
+ if (token == NIL) {
+ Eterm* hp;
+ Eterm mess;
+ ErlHeapFragment* bp;
+ ErlOffHeap *ohp;
+
+ hp = erts_alloc_message_heap(term_size, &bp, &ohp, to, to_locksp);
+ mess = copy_struct(exit_term, term_size, &hp, ohp);
+ erts_queue_message(to, to_locksp, bp, mess, NIL);
+ } else {
+ ErlHeapFragment* bp;
+ Eterm* hp;
+ Eterm mess;
+ Eterm temp_token;
+ Uint sz_token;
+
+ ASSERT(is_tuple(token));
+ sz_token = size_object(token);
+ bp = new_message_buffer(term_size+sz_token);
+ hp = bp->mem;
+ mess = copy_struct(exit_term, term_size, &hp, &bp->off_heap);
+ /* the trace token must in this case be updated by the caller */
+ seq_trace_output(token, mess, SEQ_TRACE_SEND, to->id, NULL);
+ temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap);
+ erts_queue_message(to, to_locksp, bp, mess, temp_token);
+ }
+}
+
+/*
+ *
+ * *** Exit signal behavior ***
+ *
+ * Exit signals are asynchronous (truly asynchronous in the
+ * SMP emulator). When the signal is received the receiver receives an
+ * 'EXIT' message if it is trapping exits; otherwise, it will either
+ * ignore the signal if the exit reason is normal, or go into an
+ * exiting state (status P_EXITING). When a process has gone into the
+ * exiting state it will not execute any more Erlang code, but it might
+ * take a while before it actually exits. The exit signal is being
+ * received when the 'EXIT' message is put in the message queue, the
+ * signal is dropped, or when it changes state into exiting. The time it
+ * is in the exiting state before actually exiting is undefined (it
+ * might take a really long time under certain conditions). The
+ * receiver of the exit signal does not break links or trigger monitors
+ * until it actually exits.
+ *
+ * Exit signals and other signals, e.g. messages, have to be received
+ * by a receiver in the same order as sent by a sender.
+ *
+ *
+ *
+ * Exit signal implementation in the SMP emulator:
+ *
+ * If the receiver is trapping exits, the signal is transformed
+ * into an 'EXIT' message and sent as a normal message, if the
+ * reason is normal the signal is dropped; otherwise, the process
+ * is determined to be exited. The interesting case is when the
+ * process is to be exited and this is what is described below.
+ *
+ * If it is possible, the receiver is set in the exiting state straight
+ * away and we are done; otherwise, the sender places the exit reason
+ * in the pending_exit field of the process struct and if necessary
+ * adds the receiver to the run queue. It is typically not possible
+ * to set a scheduled process or a process which we cannot get all locks
+ * on without releasing locks on it in an exiting state straight away.
+ *
+ * The receiver will poll the pending_exit field when it reach certain
+ * places during it's execution. When it discovers the pending exit
+ * it will change state into the exiting state. If the receiver wasn't
+ * scheduled when the pending exit was set, the first scheduler that
+ * schedules a new process will set the receiving process in the exiting
+ * state just before it schedules next process.
+ *
+ * When the exit signal is placed in the pending_exit field, the signal
+ * is considered as being in transit on the Erlang level. The signal is
+ * actually in some kind of semi transit state, since we have already
+ * determined how it should be received. It will exit the process no
+ * matter what if it is received (the process may exit by itself before
+ * reception of the exit signal). The signal is received when it is
+ * discovered in the pending_exit field by the receiver.
+ *
+ * The receiver have to poll the pending_exit field at least before:
+ * - moving messages from the message in queue to the private message
+ * queue. This in order to preserve signal order.
+ * - unlink. Otherwise the process might get exited on a link that
+ * have been removed.
+ * - changing the trap_exit flag to true. This in order to simplify the
+ * implementation; otherwise, we would have to transform the signal
+ * into an 'EXIT' message when setting the trap_exit flag to true. We
+ * would also have to maintain a queue of exit signals in transit.
+ * - being scheduled in or out.
+ */
+
+static ERTS_INLINE int
+send_exit_signal(Process *c_p, /* current process if and only
+ if reason is stored on it */
+ Eterm from, /* Id of sender of signal */
+ Process *rp, /* receiving process */
+ ErtsProcLocks *rp_locks,/* current locks on receiver */
+ Eterm reason, /* exit reason */
+ Eterm exit_tuple, /* Prebuild exit tuple
+ or THE_NON_VALUE */
+ Uint exit_tuple_sz, /* Size of prebuilt exit tuple
+ (if exit_tuple != THE_NON_VALUE) */
+ Eterm token, /* token */
+ Process *token_update, /* token updater */
+ Uint32 flags /* flags */
+ )
+{
+ Eterm rsn = reason == am_kill ? am_killed : reason;
+
+ ERTS_SMP_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp));
+ ERTS_SMP_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND)
+ == ERTS_PROC_LOCKS_XSIG_SEND);
+
+ ASSERT(reason != THE_NON_VALUE);
+
+ if (ERTS_PROC_IS_TRAPPING_EXITS(rp)
+ && (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
+ if (is_not_nil(token) && token_update)
+ seq_trace_update_send(token_update);
+ if (is_value(exit_tuple))
+ send_exit_message(rp, rp_locks, exit_tuple, exit_tuple_sz, token);
+ else
+ erts_deliver_exit_message(from, rp, rp_locks, rsn, token);
+ return 1; /* Receiver will get a message */
+ }
+ else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) {
+#ifdef ERTS_SMP
+ if (!ERTS_PROC_PENDING_EXIT(rp) && !rp->is_exiting) {
+ ASSERT(rp->status != P_EXITING);
+ ASSERT(rp->status != P_FREE);
+ ASSERT(!rp->pending_exit.bp);
+
+ if (rp == c_p && (*rp_locks & ERTS_PROC_LOCK_MAIN)) {
+ /* Ensure that all locks on c_p are locked before
+ proceeding... */
+ if (*rp_locks != ERTS_PROC_LOCKS_ALL) {
+ ErtsProcLocks need_locks = (~(*rp_locks)
+ & ERTS_PROC_LOCKS_ALL);
+ if (erts_smp_proc_trylock(c_p, need_locks) == EBUSY) {
+ erts_smp_proc_unlock(c_p,
+ *rp_locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ *rp_locks = ERTS_PROC_LOCKS_ALL;
+ }
+ set_proc_exiting(c_p, rsn, NULL);
+ }
+ else if (!(rp->status_flags & ERTS_PROC_SFLG_RUNNING)) {
+ /* Process not running ... */
+ ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
+ if (need_locks
+ && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
+ /* ... but we havn't got all locks on it ... */
+ save_pending_exiter(rp);
+ /*
+ * The pending exit will be discovered when next
+ * process is scheduled in
+ */
+ goto set_pending_exit;
+ }
+ else {
+ /* ...and we have all locks on it... */
+ *rp_locks = ERTS_PROC_LOCKS_ALL;
+ set_proc_exiting(rp,
+ (is_immed(rsn)
+ ? rsn
+ : copy_object(rsn, rp)),
+ NULL);
+ }
+ }
+ else { /* Process running... */
+
+ /*
+ * The pending exit will be discovered when the process
+ * is scheduled out if not discovered earlier.
+ */
+
+ set_pending_exit:
+ if (is_immed(rsn)) {
+ rp->pending_exit.reason = rsn;
+ }
+ else {
+ Eterm *hp;
+ Uint sz = size_object(rsn);
+ ErlHeapFragment *bp = new_message_buffer(sz);
+
+ hp = &bp->mem[0];
+ rp->pending_exit.reason = copy_struct(rsn,
+ sz,
+ &hp,
+ &bp->off_heap);
+ rp->pending_exit.bp = bp;
+ }
+ ASSERT(ERTS_PROC_PENDING_EXIT(rp));
+ }
+ if (!(rp->status_flags
+ & (ERTS_PROC_SFLG_INRUNQ|ERTS_PROC_SFLG_RUNNING)))
+ erts_add_to_runq(rp);
+ }
+ /* else:
+ *
+ * The receiver already has a pending exit (or is exiting)
+ * so we drop this signal.
+ *
+ * NOTE: dropping this exit signal is based on the assumption
+ * that the receiver *will* exit; either on the pending
+ * exit or by itself before seeing the pending exit.
+ */
+#else /* !ERTS_SMP */
+ if (c_p == rp) {
+ rp->status = P_EXITING;
+ c_p->fvalue = rsn;
+ }
+ else if (rp->status != P_EXITING) { /* No recursive process exits /PaN */
+ Eterm old_status = rp->status;
+ set_proc_exiting(rp,
+ is_immed(rsn) ? rsn : copy_object(rsn, rp),
+ NULL);
+ ACTIVATE(rp);
+ if (old_status != P_RUNABLE && old_status != P_RUNNING)
+ erts_add_to_runq(rp);
+ }
+#endif
+ return -1; /* Receiver will exit */
+ }
+
+ return 0; /* Receiver unaffected */
+}
+
+
+int
+erts_send_exit_signal(Process *c_p,
+ Eterm from,
+ Process *rp,
+ ErtsProcLocks *rp_locks,
+ Eterm reason,
+ Eterm token,
+ Process *token_update,
+ Uint32 flags)
+{
+ return send_exit_signal(c_p,
+ from,
+ rp,
+ rp_locks,
+ reason,
+ THE_NON_VALUE,
+ 0,
+ token,
+ token_update,
+ flags);
+}
+
+typedef struct {
+ Eterm reason;
+ Process *p;
+} ExitMonitorContext;
+
+static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
+{
+ ExitMonitorContext *pcontext = vpcontext;
+ DistEntry *dep;
+ ErtsMonitor *rmon;
+ Process *rp;
+
+ if (mon->type == MON_ORIGIN) {
+ /* We are monitoring someone else, we need to demonitor that one.. */
+ if (is_atom(mon->pid)) { /* remote by name */
+ ASSERT(is_node_name_atom(mon->pid));
+ dep = erts_sysname_to_connected_dist_entry(mon->pid);
+ if (dep) {
+ erts_smp_de_links_lock(dep);
+ rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
+ erts_smp_de_links_unlock(dep);
+ if (rmon) {
+ ErtsDSigData dsd;
+ int code = erts_dsig_prepare(&dsd, dep, NULL,
+ ERTS_DSP_NO_LOCK, 0);
+ if (code == ERTS_DSIG_PREP_CONNECTED) {
+ code = erts_dsig_send_demonitor(&dsd,
+ rmon->pid,
+ mon->name,
+ mon->ref,
+ 1);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ erts_destroy_monitor(rmon);
+ }
+ erts_deref_dist_entry(dep);
+ }
+ } else {
+ ASSERT(is_pid(mon->pid));
+ if (is_internal_pid(mon->pid)) { /* local by pid or name */
+ rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
+ if (!rp) {
+ goto done;
+ }
+ rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (rmon == NULL) {
+ goto done;
+ }
+ erts_destroy_monitor(rmon);
+ } else { /* remote by pid */
+ ASSERT(is_external_pid(mon->pid));
+ dep = external_pid_dist_entry(mon->pid);
+ ASSERT(dep != NULL);
+ if (dep) {
+ erts_smp_de_links_lock(dep);
+ rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
+ erts_smp_de_links_unlock(dep);
+ if (rmon) {
+ ErtsDSigData dsd;
+ int code = erts_dsig_prepare(&dsd, dep, NULL,
+ ERTS_DSP_NO_LOCK, 0);
+ if (code == ERTS_DSIG_PREP_CONNECTED) {
+ code = erts_dsig_send_demonitor(&dsd,
+ rmon->pid,
+ mon->pid,
+ mon->ref,
+ 1);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ erts_destroy_monitor(rmon);
+ }
+ }
+ }
+ }
+ } else { /* type == MON_TARGET */
+ ASSERT(mon->type == MON_TARGET);
+ ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid));
+ if (is_internal_port(mon->pid)) {
+ Port *prt = erts_id2port(mon->pid, NULL, 0);
+ if (prt == NULL) {
+ goto done;
+ }
+ erts_fire_port_monitor(prt, mon->ref);
+ erts_port_release(prt);
+ } else if (is_internal_pid(mon->pid)) {/* local by name or pid */
+ Eterm watched;
+ Eterm lhp[3];
+ ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
+ | ERTS_PROC_LOCKS_MSG_SEND);
+ rp = erts_pid2proc(NULL, 0, mon->pid, rp_locks);
+ if (rp == NULL) {
+ goto done;
+ }
+ rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ if (rmon) {
+ erts_destroy_monitor(rmon);
+ watched = (is_atom(mon->name)
+ ? TUPLE2(lhp, mon->name,
+ erts_this_dist_entry->sysname)
+ : pcontext->p->id);
+ erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
+ watched, pcontext->reason);
+ }
+ /* else: demonitor while we exited, i.e. do nothing... */
+ erts_smp_proc_unlock(rp, rp_locks);
+ } else { /* external by pid or name */
+ ASSERT(is_external_pid(mon->pid));
+ dep = external_pid_dist_entry(mon->pid);
+ ASSERT(dep != NULL);
+ if (dep) {
+ erts_smp_de_links_lock(dep);
+ rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
+ erts_smp_de_links_unlock(dep);
+ if (rmon) {
+ ErtsDSigData dsd;
+ int code = erts_dsig_prepare(&dsd, dep, NULL,
+ ERTS_DSP_NO_LOCK, 0);
+ if (code == ERTS_DSIG_PREP_CONNECTED) {
+ code = erts_dsig_send_m_exit(&dsd,
+ mon->pid,
+ (rmon->name != NIL
+ ? rmon->name
+ : rmon->pid),
+ mon->ref,
+ pcontext->reason);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ erts_destroy_monitor(rmon);
+ }
+ }
+ }
+ }
+ done:
+ /* As the monitors are previously removed from the process,
+ distribution operations will not cause monitors to disappear,
+ we can safely delete it. */
+
+ erts_destroy_monitor(mon);
+}
+
+typedef struct {
+ Process *p;
+ Eterm reason;
+ Eterm exit_tuple;
+ Uint exit_tuple_sz;
+} ExitLinkContext;
+
+static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
+{
+ ExitLinkContext *pcontext = vpcontext;
+ /* Unpack context, it's readonly */
+ Process *p = pcontext->p;
+ Eterm reason = pcontext->reason;
+ Eterm exit_tuple = pcontext->exit_tuple;
+ Uint exit_tuple_sz = pcontext->exit_tuple_sz;
+ Eterm item = lnk->pid;
+ ErtsLink *rlnk;
+ DistEntry *dep;
+ Process *rp;
+
+ switch(lnk->type) {
+ case LINK_PID:
+ if(is_internal_port(item)) {
+ Port *prt = erts_id2port(item, NULL, 0);
+ if (prt) {
+ rlnk = erts_remove_link(&prt->nlinks, p->id);
+ if (rlnk)
+ erts_destroy_link(rlnk);
+ erts_do_exit_port(prt, p->id, reason);
+ erts_port_release(prt);
+ }
+ }
+ else if(is_external_port(item)) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "Erroneous link between %T and external port %T "
+ "found\n",
+ p->id,
+ item);
+ erts_send_error_to_logger_nogl(dsbufp);
+ ASSERT(0); /* It isn't possible to setup such a link... */
+ }
+ else if (is_internal_pid(item)) {
+ ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
+ | ERTS_PROC_LOCKS_XSIG_SEND);
+ rp = erts_pid2proc(NULL, 0, item, rp_locks);
+ if (rp) {
+ rlnk = erts_remove_link(&(rp->nlinks), p->id);
+ /* If rlnk == NULL, we got unlinked while exiting,
+ i.e., do nothing... */
+ if (rlnk) {
+ int xres;
+ erts_destroy_link(rlnk);
+ xres = send_exit_signal(NULL,
+ p->id,
+ rp,
+ &rp_locks,
+ reason,
+ exit_tuple,
+ exit_tuple_sz,
+ SEQ_TRACE_TOKEN(p),
+ p,
+ ERTS_XSIG_FLG_IGN_KILL);
+ if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ /* We didn't exit the process and it is traced */
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ trace_proc(p, rp, am_getting_unlinked, p->id);
+ }
+ }
+ }
+ ASSERT(rp != p);
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ }
+ else if (is_external_pid(item)) {
+ dep = external_pid_dist_entry(item);
+ if(dep != erts_this_dist_entry) {
+ ErtsDSigData dsd;
+ int code;
+ ErtsDistLinkData dld;
+ erts_remove_dist_link(&dld, p->id, item, dep);
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0);
+ if (code == ERTS_DSIG_PREP_CONNECTED) {
+ code = erts_dsig_send_exit_tt(&dsd, p->id, item, reason,
+ SEQ_TRACE_TOKEN(p));
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_destroy_dist_link(&dld);
+ }
+ }
+ break;
+ case LINK_NODE:
+ ASSERT(is_node_name_atom(item));
+ dep = erts_sysname_to_connected_dist_entry(item);
+ if(dep) {
+ /* dist entries have node links in a separate structure to
+ avoid confusion */
+ erts_smp_de_links_lock(dep);
+ rlnk = erts_remove_link(&(dep->node_links), p->id);
+ erts_smp_de_links_unlock(dep);
+ if (rlnk)
+ erts_destroy_link(rlnk);
+ erts_deref_dist_entry(dep);
+ } else {
+#ifndef ERTS_SMP
+ /* XXX Is this possible? Shouldn't this link
+ previously have been removed if the node
+ had previously been disconnected. */
+ ASSERT(0);
+#endif
+ /* This is possible when smp support has been enabled,
+ and dist port and process exits simultaneously. */
+ }
+ break;
+
+ default:
+ erl_exit(1, "bad type in link list\n");
+ break;
+ }
+ erts_destroy_link(lnk);
+}
+
+static void
+resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
+{
+ Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
+ smon->pid, ERTS_PROC_LOCK_STATUS);
+ if (suspendee) {
+ if (smon->active)
+ resume_process(suspendee);
+ erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_destroy_suspend_monitor(smon);
+}
+
+static void
+continue_exit_process(Process *p
+#ifdef ERTS_SMP
+ , erts_pix_lock_t *pix_lock
+#endif
+ );
+
+/* this function fishishes a process and propagates exit messages - called
+ by process_main when a process dies */
+void
+erts_do_exit_process(Process* p, Eterm reason)
+{
+#ifdef ERTS_SMP
+ erts_pix_lock_t *pix_lock = ERTS_PID2PIXLOCK(p->id);
+#endif
+
+ p->arity = 0; /* No live registers */
+ p->fvalue = reason;
+
+#ifdef ERTS_SMP
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ /* By locking all locks (main lock is already locked) when going
+ to status P_EXITING, it is enough to take any lock when
+ looking up a process (erts_pid2proc()) to prevent the looked up
+ process from exiting until the lock has been released. */
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+#endif
+
+ if (erts_system_profile_flags.runnable_procs && (p->status != P_WAITING)) {
+ profile_runnable_proc(p, am_inactive);
+ }
+
+#ifdef ERTS_SMP
+ erts_pix_lock(pix_lock);
+ p->is_exiting = 1;
+#endif
+
+ p->status = P_EXITING;
+
+#ifdef ERTS_SMP
+ erts_pix_unlock(pix_lock);
+
+ if (ERTS_PROC_PENDING_EXIT(p)) {
+ /* Process exited before pending exit was received... */
+ p->pending_exit.reason = THE_NON_VALUE;
+ if (p->pending_exit.bp) {
+ free_message_buffer(p->pending_exit.bp);
+ p->pending_exit.bp = NULL;
+ }
+ }
+
+ cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL);
+
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+#endif
+
+ if (IS_TRACED_FL(p,F_TRACE_PROCS))
+ trace_proc(p, p, am_exit, reason);
+
+ erts_trace_check_exiting(p->id);
+
+ ASSERT((p->trace_flags & F_INITIAL_TRACE_FLAGS) == F_INITIAL_TRACE_FLAGS);
+
+ cancel_timer(p); /* Always cancel timer just in case */
+
+ /*
+ * The timer of this process can *not* be used anymore. The field used
+ * for the timer is now used for misc exiting data.
+ */
+ p->u.exit_data = NULL;
+
+ if (p->bif_timers)
+ erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL);
+
+#ifdef ERTS_SMP
+ if (p->flags & F_HAVE_BLCKD_MSCHED)
+ erts_block_multi_scheduling(p, ERTS_PROC_LOCKS_ALL, 0, 1);
+#endif
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+
+#ifdef ERTS_SMP
+ continue_exit_process(p, pix_lock);
+#else
+ continue_exit_process(p);
+#endif
+}
+
+void
+erts_continue_exit_process(Process *c_p)
+{
+#ifdef ERTS_SMP
+ continue_exit_process(c_p, ERTS_PID2PIXLOCK(c_p->id));
+#else
+ continue_exit_process(c_p);
+#endif
+}
+
+static void
+continue_exit_process(Process *p
+#ifdef ERTS_SMP
+ , erts_pix_lock_t *pix_lock
+#endif
+ )
+{
+ ErtsLink* lnk;
+ ErtsMonitor *mon;
+ ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN;
+ Eterm reason = p->fvalue;
+ DistEntry *dep;
+ struct saved_calls *scb;
+#ifdef DEBUG
+ int yield_allowed = 1;
+#endif
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
+
+#ifdef DEBUG
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ ASSERT(p->status == P_EXITING);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+#endif
+
+ if (p->flags & F_USING_DB) {
+ if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN))
+ goto yield;
+ p->flags &= ~F_USING_DB;
+ }
+
+ if (p->flags & F_USING_DDLL) {
+ erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
+ p->flags &= ~F_USING_DDLL;
+ }
+
+ if (p->nodes_monitors) {
+ erts_delete_nodes_monitors(p, ERTS_PROC_LOCK_MAIN);
+ p->nodes_monitors = NULL;
+ }
+
+
+ if (p->suspend_monitors) {
+ erts_sweep_suspend_monitors(p->suspend_monitors,
+ resume_suspend_monitor,
+ p);
+ p->suspend_monitors = NULL;
+ }
+
+ /*
+ * The registered name *should* be the last "erlang resource" to
+ * cleanup.
+ */
+ if (p->reg) {
+ (void) erts_unregister_name(p, ERTS_PROC_LOCK_MAIN, NULL, THE_NON_VALUE);
+ ASSERT(!p->reg);
+ }
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ curr_locks = ERTS_PROC_LOCKS_ALL;
+
+ /*
+ * From this point on we are no longer allowed to yield
+ * this process.
+ */
+#ifdef DEBUG
+ yield_allowed = 0;
+#endif
+
+ {
+ int pix;
+ /* Do *not* use erts_get_runq_proc() */
+ ErtsRunQueue *rq;
+ rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p));
+
+ ASSERT(internal_pid_index(p->id) < erts_max_processes);
+ pix = internal_pid_index(p->id);
+
+ erts_smp_mtx_lock(&proc_tab_mtx);
+ erts_smp_runq_lock(rq);
+
+#ifdef ERTS_SMP
+ erts_pix_lock(pix_lock);
+
+ ASSERT(p->scheduler_data);
+ ASSERT(p->scheduler_data->current_process == p);
+ ASSERT(p->scheduler_data->free_process == NULL);
+
+ p->scheduler_data->current_process = NULL;
+ p->scheduler_data->free_process = p;
+ p->status_flags = 0;
+#endif
+ process_tab[pix] = NULL; /* Time of death! */
+ ASSERT(erts_smp_atomic_read(&process_count) > 0);
+ erts_smp_atomic_dec(&process_count);
+
+#ifdef ERTS_SMP
+ erts_pix_unlock(pix_lock);
+#endif
+ erts_smp_runq_unlock(rq);
+
+ if (p_next < 0) {
+ if (p_last >= p_next) {
+ p_serial++;
+ p_serial &= p_serial_mask;
+ }
+ p_next = pix;
+ }
+
+ ERTS_MAYBE_SAVE_TERMINATING_PROCESS(p);
+
+ erts_smp_mtx_unlock(&proc_tab_mtx);
+ }
+
+ /*
+ * All "erlang resources" have to be deallocated before this point,
+ * e.g. registered name, so monitoring and linked processes can
+ * be sure that all interesting resources have been deallocated
+ * when the monitors and/or links hit.
+ */
+
+ mon = p->monitors;
+ p->monitors = NULL; /* to avoid recursive deletion during traversal */
+
+ lnk = p->nlinks;
+ p->nlinks = NULL;
+ p->status = P_FREE;
+ dep = ((p->flags & F_DISTRIBUTION)
+ ? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL)
+ : NULL);
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL);
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ processes_busy--;
+
+ if (dep) {
+ erts_do_net_exits(dep, reason);
+ if(dep)
+ erts_deref_dist_entry(dep);
+ }
+
+ /*
+ * Pre-build the EXIT tuple if there are any links.
+ */
+ if (lnk) {
+ Eterm tmp_heap[4];
+ Eterm exit_tuple;
+ Uint exit_tuple_sz;
+ Eterm* hp;
+
+ hp = &tmp_heap[0];
+
+ exit_tuple = TUPLE3(hp, am_EXIT, p->id, reason);
+
+ exit_tuple_sz = size_object(exit_tuple);
+
+ {
+ ExitLinkContext context = {p, reason, exit_tuple, exit_tuple_sz};
+ erts_sweep_links(lnk, &doit_exit_link, &context);
+ }
+ }
+
+ {
+ ExitMonitorContext context = {reason, p};
+ erts_sweep_monitors(mon,&doit_exit_monitor,&context);
+ }
+
+ if (scb)
+ erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
+
+ delete_process(p);
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ return;
+
+ yield:
+
+#ifdef DEBUG
+ ASSERT(yield_allowed);
+#endif
+
+ ERTS_SMP_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p));
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks);
+
+ ASSERT(p->status == P_EXITING);
+
+ p->i = (Eterm *) beam_continue_exit;
+
+ if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ curr_locks |= ERTS_PROC_LOCK_STATUS;
+ }
+
+ erts_add_to_runq(p);
+
+ if (curr_locks != ERTS_PROC_LOCK_MAIN)
+ erts_smp_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks);
+
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
+
+}
+
+/* Callback for process timeout */
+static void
+timeout_proc(Process* p)
+{
+ p->i = (Eterm *) p->def_arg_reg[0];
+ p->flags |= F_TIMO;
+ p->flags &= ~F_INSLPQUEUE;
+
+ if (p->status == P_WAITING)
+ erts_add_to_runq(p);
+ if (p->status == P_SUSPENDED)
+ p->rstatus = P_RUNABLE; /* MUST set resume status to runnable */
+}
+
+
+void
+cancel_timer(Process* p)
+{
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+ p->flags &= ~(F_INSLPQUEUE|F_TIMO);
+#ifdef ERTS_SMP
+ erts_cancel_smp_ptimer(p->u.ptimer);
+#else
+ erl_cancel_timer(&p->u.tm);
+#endif
+}
+
+/*
+ * Insert a process into the time queue, with a timeout 'timeout' in ms.
+ */
+void
+set_timer(Process* p, Uint timeout)
+{
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
+
+ /* check for special case timeout=0 DONT ADD TO time queue */
+ if (timeout == 0) {
+ p->flags |= F_TIMO;
+ return;
+ }
+ p->flags |= F_INSLPQUEUE;
+ p->flags &= ~F_TIMO;
+
+#ifdef ERTS_SMP
+ erts_create_smp_ptimer(&p->u.ptimer,
+ p->id,
+ (ErlTimeoutProc) timeout_proc,
+ timeout);
+#else
+ erl_set_timer(&p->u.tm,
+ (ErlTimeoutProc) timeout_proc,
+ NULL,
+ (void*) p,
+ timeout);
+#endif
+}
+
+/*
+ * Stack dump functions follow.
+ */
+
+void
+erts_stack_dump(int to, void *to_arg, Process *p)
+{
+ Eterm* sp;
+ int yreg = -1;
+
+ if (p->trace_flags & F_SENSITIVE) {
+ return;
+ }
+ erts_program_counter_info(to, to_arg, p);
+ for (sp = p->stop; sp < STACK_START(p); sp++) {
+ yreg = stack_element_dump(to, to_arg, p, sp, yreg);
+ }
+}
+
+void
+erts_program_counter_info(int to, void *to_arg, Process *p)
+{
+ int i;
+
+ erts_print(to, to_arg, "Program counter: %p (", p->i);
+ print_function_from_pc(to, to_arg, p->i);
+ erts_print(to, to_arg, ")\n");
+ erts_print(to, to_arg, "CP: %p (", p->cp);
+ print_function_from_pc(to, to_arg, p->cp);
+ erts_print(to, to_arg, ")\n");
+ if (!((p->status == P_RUNNING) || (p->status == P_GARBING))) {
+ erts_print(to, to_arg, "arity = %d\n",p->arity);
+ if (!ERTS_IS_CRASH_DUMPING) {
+ /*
+ * Only print the arguments if we are not writing a
+ * crash dump file. The arguments cannot be interpreted
+ * by the crashdump_viewer application and will therefore
+ * only cause problems.
+ */
+ for (i = 0; i < p->arity; i++)
+ erts_print(to, to_arg, " %T\n", p->arg_reg[i]);
+ }
+ }
+}
+
+static void
+print_function_from_pc(int to, void *to_arg, Eterm* x)
+{
+ Eterm* addr = find_function_from_pc(x);
+ if (addr == NULL) {
+ if (x == beam_exit) {
+ erts_print(to, to_arg, "<terminate process>");
+ } else if (x == beam_continue_exit) {
+ erts_print(to, to_arg, "<continue terminate process>");
+ } else if (x == beam_apply+1) {
+ erts_print(to, to_arg, "<terminate process normally>");
+ } else if (x == 0) {
+ erts_print(to, to_arg, "invalid");
+ } else {
+ erts_print(to, to_arg, "unknown function");
+ }
+ } else {
+ erts_print(to, to_arg, "%T:%T/%d + %d",
+ addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ }
+}
+
+static int
+stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
+{
+ Eterm x = *sp;
+
+ if (yreg < 0 || is_CP(x)) {
+ erts_print(to, to_arg, "\n%p ", sp);
+ } else {
+ char sbuf[16];
+ sprintf(sbuf, "y(%d)", yreg);
+ erts_print(to, to_arg, "%-8s ", sbuf);
+ yreg++;
+ }
+
+ if (is_CP(x)) {
+ erts_print(to, to_arg, "Return addr %p (", (Eterm *) x);
+ print_function_from_pc(to, to_arg, cp_val(x));
+ erts_print(to, to_arg, ")\n");
+ yreg = 0;
+ } else if is_catch(x) {
+ erts_print(to, to_arg, "Catch %p (", catch_pc(x));
+ print_function_from_pc(to, to_arg, catch_pc(x));
+ erts_print(to, to_arg, ")\n");
+ } else {
+ erts_print(to, to_arg, "%T\n", x);
+ }
+ return yreg;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * The processes/0 BIF implementation. *
+\* */
+
+
+#define ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED 25
+#define ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE 1000
+#define ERTS_PROCESSES_BIF_MIN_START_REDS \
+ (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \
+ / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED)
+
+#define ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS 1
+
+#define ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED 10
+
+#define ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS \
+ (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \
+ / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED)
+
+
+#define ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED 75
+
+#define ERTS_PROCS_DBG_DO_TRACE 0
+
+#ifdef DEBUG
+# define ERTS_PROCESSES_BIF_DEBUGLEVEL 100
+#else
+# define ERTS_PROCESSES_BIF_DEBUGLEVEL 0
+#endif
+
+#define ERTS_PROCS_DBGLVL_CHK_HALLOC 1
+#define ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS 5
+#define ERTS_PROCS_DBGLVL_CHK_PIDS 10
+#define ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST 20
+#define ERTS_PROCS_DBGLVL_CHK_RESLIST 20
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0
+# define ERTS_PROCS_ASSERT(EXP)
+#else
+# define ERTS_PROCS_ASSERT(EXP) \
+ ((void) ((EXP) \
+ ? 1 \
+ : (debug_processes_assert_error(#EXP, __FILE__, __LINE__), 0)))
+#endif
+
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC
+# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ) \
+do { \
+ ERTS_PROCS_ASSERT(!(PBDP)->debug.heap); \
+ ERTS_PROCS_ASSERT(!(PBDP)->debug.heap_size); \
+ (PBDP)->debug.heap = (HP); \
+ (PBDP)->debug.heap_size = (SZ); \
+} while (0)
+# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP) \
+do { \
+ ERTS_PROCS_ASSERT((PBDP)->debug.heap); \
+ ERTS_PROCS_ASSERT((PBDP)->debug.heap_size); \
+ ERTS_PROCS_ASSERT((PBDP)->debug.heap + (PBDP)->debug.heap_size == (HP));\
+ (PBDP)->debug.heap = NULL; \
+ (PBDP)->debug.heap_size = 0; \
+} while (0)
+# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP) \
+do { \
+ (PBDP)->debug.heap = NULL; \
+ (PBDP)->debug.heap_size = 0; \
+} while (0)
+#else
+# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ)
+# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP)
+# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP)
+#endif
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
+# define ERTS_PROCS_DBG_CHK_RESLIST(R) debug_processes_check_res_list((R))
+#else
+# define ERTS_PROCS_DBG_CHK_RESLIST(R)
+#endif
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
+# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP) debug_processes_save_all_pids((PBDP))
+# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP) \
+do { \
+ if (!(PBDP)->debug.correct_pids_verified) \
+ debug_processes_verify_all_pids((PBDP)); \
+} while (0)
+# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP) \
+do { \
+ if ((PBDP)->debug.correct_pids) { \
+ erts_free(ERTS_ALC_T_PROCS_PIDS, \
+ (PBDP)->debug.correct_pids); \
+ (PBDP)->debug.correct_pids = NULL; \
+ } \
+} while(0)
+# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP) \
+do { \
+ (PBDP)->debug.correct_pids_verified = 0; \
+ (PBDP)->debug.correct_pids = NULL; \
+} while (0)
+#else
+# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP)
+# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP)
+# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP)
+# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP)
+#endif
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
+# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, TVP) \
+ debug_processes_check_found_pid((PBDP), (PID), (TVP), 1)
+# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, TVP) \
+ debug_processes_check_found_pid((PBDP), (PID), (TVP), 0)
+#else
+# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, TVP)
+# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, TVP)
+#endif
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
+# define ERTS_PROCS_DBG_CHK_TPLIST() \
+ debug_processes_check_term_proc_list()
+# define ERTS_PROCS_DBG_CHK_FREELIST(FL) \
+ debug_processes_check_term_proc_free_list(FL)
+#else
+# define ERTS_PROCS_DBG_CHK_TPLIST()
+# define ERTS_PROCS_DBG_CHK_FREELIST(FL)
+#endif
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0
+#if ERTS_PROCS_DBG_DO_TRACE
+# define ERTS_PROCS_DBG_INIT(P, PBDP) (PBDP)->debug.caller = (P)->id
+# else
+# define ERTS_PROCS_DBG_INIT(P, PBDP)
+# endif
+# define ERTS_PROCS_DBG_CLEANUP(PBDP)
+#else
+# define ERTS_PROCS_DBG_INIT(P, PBDP) \
+do { \
+ (PBDP)->debug.caller = (P)->id; \
+ ERTS_PROCS_DBG_HEAP_ALLOC_INIT((PBDP)); \
+ ERTS_PROCS_DBG_CHK_PIDS_INIT((PBDP)); \
+} while (0)
+# define ERTS_PROCS_DBG_CLEANUP(PBDP) \
+do { \
+ ERTS_PROCS_DBG_CLEANUP_CHK_PIDS((PBDP)); \
+} while (0)
+#endif
+
+#if ERTS_PROCS_DBG_DO_TRACE
+# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT) \
+ erts_fprintf(stderr, "%T %s:%d:%s(): %s\n", \
+ (PID), __FILE__, __LINE__, #FUNC, #WHAT)
+#else
+# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT)
+#endif
+
+static Uint processes_bif_tab_chunks;
+static Export processes_trap_export;
+
+typedef struct {
+ SysTimeval time;
+} ErtsProcessesBifChunkInfo;
+
+typedef enum {
+ INITIALIZING,
+ INSPECTING_TABLE,
+ INSPECTING_TERMINATED_PROCESSES,
+ BUILDING_RESULT,
+ RETURN_RESULT
+} ErtsProcessesBifState;
+
+typedef struct {
+ ErtsProcessesBifState state;
+ Eterm caller;
+ ErtsProcessesBifChunkInfo *chunk;
+ int tix;
+ int pid_ix;
+ int pid_sz;
+ Eterm *pid;
+ ErtsTermProcElement *bif_invocation; /* Only used when > 1 chunk */
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0 || ERTS_PROCS_DBG_DO_TRACE
+ struct {
+ Eterm caller;
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
+ SysTimeval *pid_started;
+#endif
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC
+ Eterm *heap;
+ Uint heap_size;
+#endif
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
+ int correct_pids_verified;
+ Eterm *correct_pids;
+#endif
+ } debug;
+#endif
+
+} ErtsProcessesBifData;
+
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0
+static void debug_processes_assert_error(char* expr, char* file, int line);
+#endif
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
+static void debug_processes_check_res_list(Eterm list);
+#endif
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
+static void debug_processes_save_all_pids(ErtsProcessesBifData *pbdp);
+static void debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp);
+#endif
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
+static void debug_processes_check_found_pid(ErtsProcessesBifData *pbdp,
+ Eterm pid,
+ SysTimeval *started,
+ int pid_should_be_found);
+#endif
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
+static SysTimeval debug_tv_start;
+static void debug_processes_check_term_proc_list(void);
+static void debug_processes_check_term_proc_free_list(ErtsTermProcElement *tpep);
+#endif
+
+static void
+save_terminating_process(Process *p)
+{
+ ErtsTermProcElement *tpep = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL,
+ sizeof(ErtsTermProcElement));
+ ERTS_PROCS_ASSERT(saved_term_procs.start && saved_term_procs.end);
+ ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx));
+
+ ERTS_PROCS_DBG_CHK_TPLIST();
+
+ tpep->prev = saved_term_procs.end;
+ tpep->next = NULL;
+ tpep->ix = internal_pid_index(p->id);
+ tpep->u.process.pid = p->id;
+ tpep->u.process.spawned = p->started;
+ erts_get_emu_time(&tpep->u.process.exited);
+
+ saved_term_procs.end->next = tpep;
+ saved_term_procs.end = tpep;
+
+ ERTS_PROCS_DBG_CHK_TPLIST();
+
+ ERTS_PROCS_ASSERT((tpep->prev->ix >= 0
+ ? erts_cmp_timeval(&tpep->u.process.exited,
+ &tpep->prev->u.process.exited)
+ : erts_cmp_timeval(&tpep->u.process.exited,
+ &tpep->prev->u.bif_invocation.time)) > 0);
+}
+
+static void
+cleanup_processes_bif_data(Binary *bp)
+{
+ ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(bp);
+
+ ERTS_PROCS_DBG_TRACE(pbdp->debug.caller, cleanup_processes_bif_data, call);
+
+ if (pbdp->state != INITIALIZING) {
+
+ if (pbdp->chunk) {
+ erts_free(ERTS_ALC_T_PROCS_CNKINF, pbdp->chunk);
+ pbdp->chunk = NULL;
+ }
+ if (pbdp->pid) {
+ erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->pid);
+ pbdp->pid = NULL;
+ }
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
+ if (pbdp->debug.pid_started) {
+ erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.pid_started);
+ pbdp->debug.pid_started = NULL;
+ }
+#endif
+
+ if (pbdp->bif_invocation) {
+ ErtsTermProcElement *tpep;
+
+ erts_smp_mtx_lock(&proc_tab_mtx);
+
+ ERTS_PROCS_DBG_TRACE(pbdp->debug.caller,
+ cleanup_processes_bif_data,
+ term_proc_cleanup);
+
+ tpep = pbdp->bif_invocation;
+ pbdp->bif_invocation = NULL;
+
+ ERTS_PROCS_DBG_CHK_TPLIST();
+
+ if (tpep->prev) {
+ /*
+ * Only remove this bif invokation when we
+ * have preceding invokations.
+ */
+ tpep->prev->next = tpep->next;
+ if (tpep->next)
+ tpep->next->prev = tpep->prev;
+ else {
+ /*
+ * At the time of writing this branch cannot be
+ * reached. I don't want to remove this code though
+ * since it may be possible to reach this line
+ * in the future if the cleanup order in
+ * erts_do_exit_process() is changed. The ASSERT(0)
+ * is only here to make us aware that the reorder
+ * has happened. /rickard
+ */
+ ASSERT(0);
+ saved_term_procs.end = tpep->prev;
+ }
+ erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep);
+ }
+ else {
+ /*
+ * Free all elements until next bif invokation
+ * is found.
+ */
+ ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
+ do {
+ ErtsTermProcElement *ftpep = tpep;
+ tpep = tpep->next;
+ erts_free(ERTS_ALC_T_PROCS_TPROC_EL, ftpep);
+ } while (tpep && tpep->ix >= 0);
+ saved_term_procs.start = tpep;
+ if (tpep)
+ tpep->prev = NULL;
+ else
+ saved_term_procs.end = NULL;
+ }
+
+ ERTS_PROCS_DBG_CHK_TPLIST();
+
+ erts_smp_mtx_unlock(&proc_tab_mtx);
+
+ }
+ }
+
+ ERTS_PROCS_DBG_TRACE(pbdp->debug.caller,
+ cleanup_processes_bif_data,
+ return);
+ ERTS_PROCS_DBG_CLEANUP(pbdp);
+}
+
+static int
+processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp)
+{
+ ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp);
+ int have_reds;
+ int reds;
+ int locked = 0;
+
+ do {
+ switch (pbdp->state) {
+ case INITIALIZING:
+ pbdp->chunk = erts_alloc(ERTS_ALC_T_PROCS_CNKINF,
+ (sizeof(ErtsProcessesBifChunkInfo)
+ * processes_bif_tab_chunks));
+ pbdp->tix = 0;
+ pbdp->pid_ix = 0;
+
+ erts_smp_mtx_lock(&proc_tab_mtx);
+ locked = 1;
+
+ ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, init);
+
+ pbdp->pid_sz = erts_process_count();
+ pbdp->pid = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
+ sizeof(Eterm)*pbdp->pid_sz);
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
+ pbdp->debug.pid_started = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
+ sizeof(SysTimeval)*pbdp->pid_sz);
+#endif
+
+ ERTS_PROCS_DBG_SAVE_PIDS(pbdp);
+
+ if (processes_bif_tab_chunks == 1)
+ pbdp->bif_invocation = NULL;
+ else {
+ /*
+ * We will have to access the table multiple times
+ * releasing the table lock in between chunks.
+ */
+ pbdp->bif_invocation = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL,
+ sizeof(ErtsTermProcElement));
+ pbdp->bif_invocation->ix = -1;
+ erts_get_emu_time(&pbdp->bif_invocation->u.bif_invocation.time);
+ ERTS_PROCS_DBG_CHK_TPLIST();
+
+ pbdp->bif_invocation->next = NULL;
+ if (saved_term_procs.end) {
+ pbdp->bif_invocation->prev = saved_term_procs.end;
+ saved_term_procs.end->next = pbdp->bif_invocation;
+ ERTS_PROCS_ASSERT(saved_term_procs.start);
+ }
+ else {
+ pbdp->bif_invocation->prev = NULL;
+ saved_term_procs.start = pbdp->bif_invocation;
+ }
+ saved_term_procs.end = pbdp->bif_invocation;
+
+ ERTS_PROCS_DBG_CHK_TPLIST();
+
+ }
+
+ pbdp->state = INSPECTING_TABLE;
+ /* Fall through */
+
+ case INSPECTING_TABLE: {
+ int ix = pbdp->tix;
+ int indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
+ int cix = ix / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
+ int end_ix = ix + indices;
+ SysTimeval *invocation_timep;
+
+ invocation_timep = (pbdp->bif_invocation
+ ? &pbdp->bif_invocation->u.bif_invocation.time
+ : NULL);
+
+ ERTS_PROCS_ASSERT(is_nil(*res_accp));
+ if (!locked) {
+ erts_smp_mtx_lock(&proc_tab_mtx);
+ locked = 1;
+ }
+
+ ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx));
+ ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_table);
+
+ if (cix != 0)
+ erts_get_emu_time(&pbdp->chunk[cix].time);
+ else if (pbdp->bif_invocation)
+ pbdp->chunk[0].time = *invocation_timep;
+ /* else: Time is irrelevant */
+
+ if (end_ix >= erts_max_processes) {
+ ERTS_PROCS_ASSERT(cix+1 == processes_bif_tab_chunks);
+ end_ix = erts_max_processes;
+ indices = end_ix - ix;
+ /* What to do when done with this chunk */
+ pbdp->state = (processes_bif_tab_chunks == 1
+ ? BUILDING_RESULT
+ : INSPECTING_TERMINATED_PROCESSES);
+ }
+
+ for (; ix < end_ix; ix++) {
+ Process *rp = process_tab[ix];
+ if (rp
+ && (!invocation_timep
+ || erts_cmp_timeval(&rp->started,
+ invocation_timep) < 0)) {
+ ERTS_PROCS_ASSERT(is_internal_pid(rp->id));
+ pbdp->pid[pbdp->pid_ix] = rp->id;
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
+ pbdp->debug.pid_started[pbdp->pid_ix] = rp->started;
+#endif
+
+ pbdp->pid_ix++;
+ ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz);
+ }
+ }
+
+ pbdp->tix = end_ix;
+
+ erts_smp_mtx_unlock(&proc_tab_mtx);
+ locked = 0;
+
+ reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED;
+ BUMP_REDS(p, reds);
+
+ have_reds = ERTS_BIF_REDS_LEFT(p);
+
+ if (have_reds && pbdp->state == INSPECTING_TABLE) {
+ ix = pbdp->tix;
+ indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
+ end_ix = ix + indices;
+ if (end_ix > erts_max_processes) {
+ end_ix = erts_max_processes;
+ indices = end_ix - ix;
+ }
+
+ reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED;
+
+ /* Pretend we have no reds left if we haven't got enough
+ reductions to complete next chunk */
+ if (reds > have_reds)
+ have_reds = 0;
+ }
+
+ break;
+ }
+
+ case INSPECTING_TERMINATED_PROCESSES: {
+ int i;
+ int max_reds;
+ int free_term_procs = 0;
+ SysTimeval *invocation_timep;
+ ErtsTermProcElement *tpep;
+ ErtsTermProcElement *free_list = NULL;
+
+ tpep = pbdp->bif_invocation;
+ ERTS_PROCS_ASSERT(tpep);
+ invocation_timep = &tpep->u.bif_invocation.time;
+
+ max_reds = have_reds = ERTS_BIF_REDS_LEFT(p);
+ if (max_reds > ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS)
+ max_reds = ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS;
+
+ reds = 0;
+ erts_smp_mtx_lock(&proc_tab_mtx);
+ ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_term_procs);
+
+ ERTS_PROCS_DBG_CHK_TPLIST();
+
+ if (tpep->prev)
+ tpep->prev->next = tpep->next;
+ else {
+ ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
+ saved_term_procs.start = tpep->next;
+
+ if (saved_term_procs.start && saved_term_procs.start->ix >= 0) {
+ free_list = saved_term_procs.start;
+ free_term_procs = 1;
+ }
+ }
+
+ if (tpep->next)
+ tpep->next->prev = tpep->prev;
+ else
+ saved_term_procs.end = tpep->prev;
+
+ tpep = tpep->next;
+
+ i = 0;
+ while (reds < max_reds && tpep) {
+ if (tpep->ix < 0) {
+ if (free_term_procs) {
+ ERTS_PROCS_ASSERT(free_list);
+ ERTS_PROCS_ASSERT(tpep->prev);
+
+ tpep->prev->next = NULL; /* end of free_list */
+ saved_term_procs.start = tpep;
+ tpep->prev = NULL;
+ free_term_procs = 0;
+ }
+ }
+ else {
+ int cix = tpep->ix/ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
+ SysTimeval *chunk_timep = &pbdp->chunk[cix].time;
+ Eterm pid = tpep->u.process.pid;
+ ERTS_PROCS_ASSERT(is_internal_pid(pid));
+
+ if (erts_cmp_timeval(&tpep->u.process.spawned,
+ invocation_timep) < 0) {
+ if (erts_cmp_timeval(&tpep->u.process.exited,
+ chunk_timep) < 0) {
+ ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp,
+ pid,
+ &tpep->u.process.spawned);
+ pbdp->pid[pbdp->pid_ix] = pid;
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
+ pbdp->debug.pid_started[pbdp->pid_ix] = tpep->u.process.spawned;
+#endif
+ pbdp->pid_ix++;
+ ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz);
+ }
+ else {
+ ERTS_PROCS_DBG_CHK_PID_FOUND(pbdp,
+ pid,
+ &tpep->u.process.spawned);
+ }
+ }
+ else {
+ ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp,
+ pid,
+ &tpep->u.process.spawned);
+ }
+
+ i++;
+ if (i == ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED) {
+ reds++;
+ i = 0;
+ }
+ if (free_term_procs)
+ reds += ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS;
+ }
+ tpep = tpep->next;
+ }
+
+ if (free_term_procs) {
+ ERTS_PROCS_ASSERT(free_list);
+ saved_term_procs.start = tpep;
+ if (!tpep)
+ saved_term_procs.end = NULL;
+ else {
+ ERTS_PROCS_ASSERT(tpep->prev);
+ tpep->prev->next = NULL; /* end of free_list */
+ tpep->prev = NULL;
+ }
+ }
+
+ if (!tpep) {
+ /* Done */
+ ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz);
+ pbdp->state = BUILDING_RESULT;
+ pbdp->bif_invocation->next = free_list;
+ free_list = pbdp->bif_invocation;
+ pbdp->bif_invocation = NULL;
+ }
+ else {
+ /* Link in bif_invocation again where we left off */
+ pbdp->bif_invocation->prev = tpep->prev;
+ pbdp->bif_invocation->next = tpep;
+ tpep->prev = pbdp->bif_invocation;
+ if (pbdp->bif_invocation->prev)
+ pbdp->bif_invocation->prev->next = pbdp->bif_invocation;
+ else {
+ ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
+ saved_term_procs.start = pbdp->bif_invocation;
+ }
+ }
+
+ ERTS_PROCS_DBG_CHK_TPLIST();
+ ERTS_PROCS_DBG_CHK_FREELIST(free_list);
+ erts_smp_mtx_unlock(&proc_tab_mtx);
+
+ /*
+ * We do the actual free of term proc structures now when we
+ * have released the table lock instead of when we encountered
+ * them. This since free() isn't for free and we don't want to
+ * unnecessarily block other schedulers.
+ */
+ while (free_list) {
+ tpep = free_list;
+ free_list = tpep->next;
+ erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep);
+ }
+
+ have_reds -= reds;
+ if (have_reds < 0)
+ have_reds = 0;
+ BUMP_REDS(p, reds);
+ break;
+ }
+
+ case BUILDING_RESULT: {
+ int conses, ix, min_ix;
+ Eterm *hp;
+ Eterm res = *res_accp;
+
+ ERTS_PROCS_DBG_VERIFY_PIDS(pbdp);
+ ERTS_PROCS_DBG_CHK_RESLIST(res);
+
+ ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, begin_build_res);
+
+ have_reds = ERTS_BIF_REDS_LEFT(p);
+ conses = ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED*have_reds;
+ min_ix = pbdp->pid_ix - conses;
+ if (min_ix < 0) {
+ min_ix = 0;
+ conses = pbdp->pid_ix;
+ }
+
+ hp = HAlloc(p, conses*2);
+ ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, conses*2);
+
+ for (ix = pbdp->pid_ix - 1; ix >= min_ix; ix--) {
+ ERTS_PROCS_ASSERT(is_internal_pid(pbdp->pid[ix]));
+ res = CONS(hp, pbdp->pid[ix], res);
+ hp += 2;
+ }
+
+ ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp);
+
+ pbdp->pid_ix = min_ix;
+ if (min_ix == 0)
+ pbdp->state = RETURN_RESULT;
+ else {
+ pbdp->pid_sz = min_ix;
+ pbdp->pid = erts_realloc(ERTS_ALC_T_PROCS_PIDS,
+ pbdp->pid,
+ sizeof(Eterm)*pbdp->pid_sz);
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
+ pbdp->debug.pid_started = erts_realloc(ERTS_ALC_T_PROCS_PIDS,
+ pbdp->debug.pid_started,
+ sizeof(SysTimeval)*pbdp->pid_sz);
+#endif
+ }
+ reds = conses/ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED;
+ BUMP_REDS(p, reds);
+ have_reds -= reds;
+
+ ERTS_PROCS_DBG_CHK_RESLIST(res);
+ ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, end_build_res);
+ *res_accp = res;
+ break;
+ }
+ case RETURN_RESULT:
+ cleanup_processes_bif_data(mbp);
+ return 1;
+
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "erlang:processes/0: Invalid state: %d\n",
+ (int) pbdp->state);
+ }
+
+
+ } while (have_reds || pbdp->state == RETURN_RESULT);
+
+ return 0;
+}
+
+/*
+ * processes_trap/2 is a hidden BIF that processes/0 traps to.
+ */
+
+static BIF_RETTYPE processes_trap(BIF_ALIST_2)
+{
+ Eterm res_acc;
+ Binary *mbp;
+
+ /*
+ * This bif cannot be called from erlang code. It can only be
+ * trapped to from processes/0; therefore, a bad argument
+ * is a processes/0 internal error.
+ */
+
+ ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, call);
+ ERTS_PROCS_ASSERT(is_nil(BIF_ARG_1) || is_list(BIF_ARG_1));
+
+ res_acc = BIF_ARG_1;
+
+ ERTS_PROCS_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
+
+ mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+
+ ERTS_PROCS_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
+ == cleanup_processes_bif_data);
+ ERTS_PROCS_ASSERT(
+ ((ErtsProcessesBifData *) ERTS_MAGIC_BIN_DATA(mbp))->debug.caller
+ == BIF_P->id);
+
+ if (processes_bif_engine(BIF_P, &res_acc, mbp)) {
+ ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, return);
+ BIF_RET(res_acc);
+ }
+ else {
+ ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, trap);
+ ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, BIF_ARG_2);
+ }
+}
+
+
+
+/*
+ * The actual processes/0 BIF.
+ */
+
+BIF_RETTYPE processes_0(BIF_ALIST_0)
+{
+ /*
+ * A requirement: The list of pids returned should be a consistent
+ * snapshot of all processes existing at some point
+ * in time during the execution of processes/0. Since
+ * processes might terminate while processes/0 is
+ * executing, we have to keep track of terminated
+ * processes and add them to the result. We also
+ * ignore processes created after processes/0 has
+ * begun executing.
+ */
+ Eterm res_acc = NIL;
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsProcessesBifData),
+ cleanup_processes_bif_data);
+ ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp);
+
+ ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, call);
+ pbdp->state = INITIALIZING;
+ ERTS_PROCS_DBG_INIT(BIF_P, pbdp);
+
+ if (ERTS_BIF_REDS_LEFT(BIF_P) >= ERTS_PROCESSES_BIF_MIN_START_REDS
+ && processes_bif_engine(BIF_P, &res_acc, mbp)) {
+ erts_bin_free(mbp);
+ ERTS_PROCS_DBG_CHK_RESLIST(res_acc);
+ ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, return);
+ BIF_RET(res_acc);
+ }
+ else {
+ Eterm *hp;
+ Eterm magic_bin;
+ ERTS_PROCS_DBG_CHK_RESLIST(res_acc);
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, PROC_BIN_SIZE);
+ magic_bin = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mbp);
+ ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp);
+ ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, trap);
+ ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, magic_bin);
+ }
+}
+
+static void
+init_processes_bif(void)
+{
+ saved_term_procs.start = NULL;
+ saved_term_procs.end = NULL;
+ processes_bif_tab_chunks = (((erts_max_processes - 1)
+ / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE)
+ + 1);
+
+ /* processes_trap/2 is a hidden BIF that the processes/0 BIF traps to. */
+ sys_memset((void *) &processes_trap_export, 0, sizeof(Export));
+ processes_trap_export.address = &processes_trap_export.code[3];
+ processes_trap_export.code[0] = am_erlang;
+ processes_trap_export.code[1] = am_processes_trap;
+ processes_trap_export.code[2] = 2;
+ processes_trap_export.code[3] = (Eterm) em_apply_bif;
+ processes_trap_export.code[4] = (Eterm) &processes_trap;
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
+ erts_get_emu_time(&debug_tv_start);
+#endif
+
+}
+
+/*
+ * Debug stuff
+ */
+
+Eterm
+erts_debug_processes(Process *c_p)
+{
+ /* This is the old processes/0 BIF. */
+ int i;
+ Uint need;
+ Eterm res;
+ Eterm* hp;
+ Process *p;
+#ifdef DEBUG
+ Eterm *hp_end;
+#endif
+
+ erts_smp_mtx_lock(&proc_tab_mtx);
+
+ res = NIL;
+ need = erts_process_count() * 2;
+ hp = HAlloc(c_p, need); /* we need two heap words for each pid */
+#ifdef DEBUG
+ hp_end = hp + need;
+#endif
+
+ /* make the list by scanning bakward */
+
+
+ for (i = erts_max_processes-1; i >= 0; i--) {
+ if ((p = process_tab[i]) != NULL) {
+ res = CONS(hp, process_tab[i]->id, res);
+ hp += 2;
+ }
+ }
+ ASSERT(hp == hp_end);
+
+ erts_smp_mtx_unlock(&proc_tab_mtx);
+
+ return res;
+}
+
+Eterm
+erts_debug_processes_bif_info(Process *c_p)
+{
+ ERTS_DECL_AM(processes_bif_info);
+ Eterm elements[] = {
+ AM_processes_bif_info,
+ make_small((Uint) ERTS_PROCESSES_BIF_MIN_START_REDS),
+ make_small((Uint) processes_bif_tab_chunks),
+ make_small((Uint) ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE),
+ make_small((Uint) ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED),
+ make_small((Uint) ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS),
+ make_small((Uint) ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED),
+ make_small((Uint) ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS),
+ make_small((Uint) ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED),
+ make_small((Uint) ERTS_PROCESSES_BIF_DEBUGLEVEL)
+ };
+ Uint sz = 0;
+ Eterm *hp;
+ (void) erts_bld_tuplev(NULL, &sz, sizeof(elements)/sizeof(Eterm), elements);
+ hp = HAlloc(c_p, sz);
+ return erts_bld_tuplev(&hp, NULL, sizeof(elements)/sizeof(Eterm), elements);
+}
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
+static void
+debug_processes_check_found_pid(ErtsProcessesBifData *pbdp,
+ Eterm pid,
+ SysTimeval *tvp,
+ int pid_should_be_found)
+{
+ int i;
+ for (i = 0; i < pbdp->pid_ix; i++) {
+ if (pbdp->pid[i] == pid
+ && pbdp->debug.pid_started[i].tv_sec == tvp->tv_sec
+ && pbdp->debug.pid_started[i].tv_usec == tvp->tv_usec) {
+ ERTS_PROCS_ASSERT(pid_should_be_found);
+ return;
+ }
+ }
+ ERTS_PROCS_ASSERT(!pid_should_be_found);
+}
+#endif
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
+static void
+debug_processes_check_res_list(Eterm list)
+{
+ while (is_list(list)) {
+ Eterm* consp = list_val(list);
+ Eterm hd = CAR(consp);
+ ERTS_PROCS_ASSERT(is_internal_pid(hd));
+ list = CDR(consp);
+ }
+
+ ERTS_PROCS_ASSERT(is_nil(list));
+}
+#endif
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
+
+static void
+debug_processes_save_all_pids(ErtsProcessesBifData *pbdp)
+{
+ int ix, tix, cpix;
+ pbdp->debug.correct_pids_verified = 0;
+ pbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
+ sizeof(Eterm)*pbdp->pid_sz);
+
+ for (tix = 0, cpix = 0; tix < erts_max_processes; tix++) {
+ Process *rp = process_tab[tix];
+ if (rp) {
+ ERTS_PROCS_ASSERT(is_internal_pid(rp->id));
+ pbdp->debug.correct_pids[cpix++] = rp->id;
+ ERTS_PROCS_ASSERT(cpix <= pbdp->pid_sz);
+ }
+ }
+ ERTS_PROCS_ASSERT(cpix == pbdp->pid_sz);
+
+ for (ix = 0; ix < pbdp->pid_sz; ix++)
+ pbdp->pid[ix] = make_small(ix);
+}
+
+static void
+debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp)
+{
+ int ix, cpix;
+
+ ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz);
+
+ for (ix = 0; ix < pbdp->pid_sz; ix++) {
+ int found = 0;
+ Eterm pid = pbdp->pid[ix];
+ ERTS_PROCS_ASSERT(is_internal_pid(pid));
+ for (cpix = ix; cpix < pbdp->pid_sz; cpix++) {
+ if (pbdp->debug.correct_pids[cpix] == pid) {
+ pbdp->debug.correct_pids[cpix] = NIL;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ for (cpix = 0; cpix < ix; cpix++) {
+ if (pbdp->debug.correct_pids[cpix] == pid) {
+ pbdp->debug.correct_pids[cpix] = NIL;
+ found = 1;
+ break;
+ }
+ }
+ }
+ ERTS_PROCS_ASSERT(found);
+ }
+ pbdp->debug.correct_pids_verified = 1;
+
+ erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.correct_pids);
+ pbdp->debug.correct_pids = NULL;
+}
+#endif /* ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS */
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
+static void
+debug_processes_check_term_proc_list(void)
+{
+ ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx));
+ if (!saved_term_procs.start)
+ ERTS_PROCS_ASSERT(!saved_term_procs.end);
+ else {
+ SysTimeval tv_now;
+ SysTimeval *prev_xtvp = NULL;
+ ErtsTermProcElement *tpep;
+ erts_get_emu_time(&tv_now);
+
+ for (tpep = saved_term_procs.start; tpep; tpep = tpep->next) {
+ if (!tpep->prev)
+ ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
+ else
+ ERTS_PROCS_ASSERT(tpep->prev->next == tpep);
+ if (!tpep->next)
+ ERTS_PROCS_ASSERT(saved_term_procs.end == tpep);
+ else
+ ERTS_PROCS_ASSERT(tpep->next->prev == tpep);
+ if (tpep->ix < 0) {
+ SysTimeval *tvp = &tpep->u.bif_invocation.time;
+ ERTS_PROCS_ASSERT(erts_cmp_timeval(&debug_tv_start, tvp) < 0
+ && erts_cmp_timeval(tvp, &tv_now) < 0);
+ }
+ else {
+ SysTimeval *stvp = &tpep->u.process.spawned;
+ SysTimeval *xtvp = &tpep->u.process.exited;
+
+ ERTS_PROCS_ASSERT(erts_cmp_timeval(&debug_tv_start,
+ stvp) < 0);
+ ERTS_PROCS_ASSERT(erts_cmp_timeval(stvp, xtvp) < 0);
+ if (prev_xtvp)
+ ERTS_PROCS_ASSERT(erts_cmp_timeval(prev_xtvp, xtvp) < 0);
+ prev_xtvp = xtvp;
+ ERTS_PROCS_ASSERT(is_internal_pid(tpep->u.process.pid));
+ ERTS_PROCS_ASSERT(tpep->ix
+ == internal_pid_index(tpep->u.process.pid));
+ }
+ }
+
+ }
+}
+
+static void
+debug_processes_check_term_proc_free_list(ErtsTermProcElement *free_list)
+{
+ if (saved_term_procs.start) {
+ ErtsTermProcElement *ftpep;
+ ErtsTermProcElement *tpep;
+
+ for (ftpep = free_list; ftpep; ftpep = ftpep->next) {
+ for (tpep = saved_term_procs.start; tpep; tpep = tpep->next)
+ ERTS_PROCS_ASSERT(ftpep != tpep);
+ }
+ }
+}
+
+#endif
+
+#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0
+
+static void
+debug_processes_assert_error(char* expr, char* file, int line)
+{
+ fflush(stdout);
+ erts_fprintf(stderr, "%s:%d: Assertion failed: %s\n", file, line, expr);
+ fflush(stderr);
+ abort();
+}
+
+#endif
+
+/* *\
+ * End of the processes/0 BIF implementation. *
+\* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
new file mode 100644
index 0000000000..7bae1e4efc
--- /dev/null
+++ b/erts/emulator/beam/erl_process.h
@@ -0,0 +1,1495 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __PROCESS_H__
+#define __PROCESS_H__
+
+#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
+#if (defined(ERL_PROCESS_C__) \
+ || defined(ERL_PORT_TASK_C__) \
+ || (ERTS_GLB_INLINE_INCL_FUNC_DEF \
+ && defined(ERTS_DO_INCL_GLB_INLINE_FUNC_DEF)))
+#define ERTS_INCLUDE_SCHEDULER_INTERNALS
+#endif
+
+typedef struct process Process;
+
+#include "sys.h"
+
+#define ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
+#include "erl_process_lock.h" /* Only pull out important types... */
+#undef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
+
+#include "erl_vm.h"
+#include "erl_smp.h"
+#include "erl_message.h"
+#include "erl_process_dict.h"
+#include "erl_node_container_utils.h"
+#include "erl_node_tables.h"
+#include "erl_monitors.h"
+#include "erl_bif_timer.h"
+#include "erl_time.h"
+#include "erl_atom_table.h"
+#include "external.h"
+
+#ifdef HIPE
+#include "hipe_process.h"
+#endif
+
+struct ErtsNodesMonitor_;
+struct port;
+
+#define ERTS_MAX_NO_OF_SCHEDULERS 1024
+
+#define ERTS_DEFAULT_MAX_PROCESSES (1 << 15)
+
+#define ERTS_HEAP_ALLOC(Type, Size) \
+ erts_alloc((Type), (Size))
+
+#define ERTS_HEAP_REALLOC(Type, Ptr, OldSize, NewSize) \
+ erts_realloc((Type), (Ptr), (NewSize))
+
+#define ERTS_HEAP_FREE(Type, Ptr, Size) \
+ erts_free((Type), (Ptr))
+
+#define INITIAL_MOD 0
+#define INITIAL_FUN 1
+#define INITIAL_ARI 2
+
+#include "export.h"
+
+struct saved_calls {
+ int len;
+ int n;
+ int cur;
+ Export *ct[1];
+};
+
+extern Export exp_send, exp_receive, exp_timeout;
+extern Uint erts_no_schedulers;
+extern Uint erts_no_run_queues;
+extern int erts_sched_thread_suggested_stack_size;
+#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 4 /* Kilo words */
+#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
+
+#ifdef ERTS_SMP
+#include "erl_bits.h"
+#endif
+
+/* process priorities */
+#define PRIORITY_MAX 0
+#define PRIORITY_HIGH 1
+#define PRIORITY_NORMAL 2
+#define PRIORITY_LOW 3
+#define ERTS_NO_PROC_PRIO_LEVELS 4
+
+#define ERTS_PORT_PRIO_LEVEL ERTS_NO_PROC_PRIO_LEVELS
+
+#define ERTS_RUNQ_FLGS_PROCS_QMASK \
+ ((((Uint32) 1) << ERTS_NO_PROC_PRIO_LEVELS) - 1)
+
+#define ERTS_NO_PRIO_LEVELS (ERTS_NO_PROC_PRIO_LEVELS + 1)
+#define ERTS_RUNQ_FLGS_MIGRATE_QMASK \
+ ((((Uint32) 1) << ERTS_NO_PRIO_LEVELS) - 1)
+
+#define ERTS_RUNQ_FLGS_EMIGRATE_SHFT \
+ ERTS_NO_PROC_PRIO_LEVELS
+#define ERTS_RUNQ_FLGS_IMMIGRATE_SHFT \
+ (ERTS_RUNQ_FLGS_EMIGRATE_SHFT + ERTS_NO_PRIO_LEVELS)
+#define ERTS_RUNQ_FLGS_EVACUATE_SHFT \
+ (ERTS_RUNQ_FLGS_IMMIGRATE_SHFT + ERTS_NO_PRIO_LEVELS)
+#define ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
+ (ERTS_RUNQ_FLGS_MIGRATE_QMASK << ERTS_RUNQ_FLGS_EMIGRATE_SHFT)
+#define ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
+ (ERTS_RUNQ_FLGS_MIGRATE_QMASK << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)
+#define ERTS_RUNQ_FLGS_EVACUATE_QMASK \
+ (ERTS_RUNQ_FLGS_MIGRATE_QMASK << ERTS_RUNQ_FLGS_EVACUATE_SHFT)
+
+#define ERTS_RUNQ_FLG_BASE2 \
+ (ERTS_RUNQ_FLGS_EVACUATE_SHFT + ERTS_NO_PRIO_LEVELS)
+
+#define ERTS_RUNQ_FLG_OUT_OF_WORK \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 0))
+#define ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 1))
+#define ERTS_RUNQ_FLG_SUSPENDED \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 2))
+#define ERTS_RUNQ_FLG_SHARED_RUNQ \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 3))
+#define ERTS_RUNQ_FLG_CHK_CPU_BIND \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 4))
+#define ERTS_RUNQ_FLG_INACTIVE \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 5))
+
+#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
+ (ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
+ | ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
+ | ERTS_RUNQ_FLGS_EVACUATE_QMASK)
+#define ERTS_RUNQ_FLGS_MIGRATION_INFO \
+ (ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
+ | ERTS_RUNQ_FLG_INACTIVE \
+ | ERTS_RUNQ_FLG_OUT_OF_WORK \
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)
+
+#define ERTS_RUNQ_FLG_EMIGRATE(PRIO) \
+ (((Uint32) 1) << (ERTS_RUNQ_FLGS_EMIGRATE_SHFT + (PRIO)))
+#define ERTS_CHK_RUNQ_FLG_EMIGRATE(FLGS, PRIO) \
+ ((FLGS) & ERTS_RUNQ_FLG_EMIGRATE((PRIO)))
+#define ERTS_SET_RUNQ_FLG_EMIGRATE(FLGS, PRIO) \
+ ((FLGS) |= ERTS_RUNQ_FLG_EMIGRATE((PRIO)))
+#define ERTS_UNSET_RUNQ_FLG_EMIGRATE(FLGS, PRIO) \
+ ((FLGS) &= ~ERTS_RUNQ_FLG_EMIGRATE((PRIO)))
+
+#define ERTS_RUNQ_FLG_IMMIGRATE(PRIO) \
+ (((Uint32) 1) << (ERTS_RUNQ_FLGS_IMMIGRATE_SHFT + (PRIO)))
+#define ERTS_CHK_RUNQ_FLG_IMMIGRATE(FLGS, PRIO) \
+ ((FLGS) & ERTS_RUNQ_FLG_IMMIGRATE((PRIO)))
+#define ERTS_SET_RUNQ_FLG_IMMIGRATE(FLGS, PRIO) \
+ ((FLGS) |= ERTS_RUNQ_FLG_IMMIGRATE((PRIO)))
+#define ERTS_UNSET_RUNQ_FLG_IMMIGRATE(FLGS, PRIO) \
+ ((FLGS) &= ~ERTS_RUNQ_FLG_IMMIGRATE((PRIO)))
+
+#define ERTS_RUNQ_FLG_EVACUATE(PRIO) \
+ (((Uint32) 1) << (ERTS_RUNQ_FLGS_EVACUATE_SHFT + (PRIO)))
+#define ERTS_CHK_RUNQ_FLG_EVACUATE(FLGS, PRIO) \
+ ((FLGS) & ERTS_RUNQ_FLG_EVACUATE((PRIO)))
+#define ERTS_SET_RUNQ_FLG_EVACUATE(FLGS, PRIO) \
+ ((FLGS) |= ERTS_RUNQ_FLG_EVACUATE((PRIO)))
+#define ERTS_UNSET_RUNQ_FLG_EVACUATE(FLGS, PRIO) \
+ ((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO)))
+
+#define ERTS_RUNQ_IFLG_SUSPENDED (((long) 1) << 0)
+#define ERTS_RUNQ_IFLG_NONEMPTY (((long) 1) << 1)
+
+
+#ifdef DEBUG
+# ifdef ARCH_64
+# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
+ (*((char **) &(RQP)) = (char *) (0xdeadbeefdead0003 | ((N) << 4)))
+# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
+do { \
+ ASSERT((RQP) != NULL); \
+ ASSERT(((((Uint) (RQP)) & ((Uint) 0x3))) == ((Uint) 0)); \
+ ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdeadbeefdead0000));\
+} while (0)
+# else
+# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
+ (*((char **) &(RQP)) = (char *) (0xdead0003 | ((N) << 4)))
+# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
+do { \
+ ASSERT((RQP) != NULL); \
+ ASSERT(((((Uint) (RQP)) & ((Uint) 1))) == ((Uint) 0)); \
+ ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdead0000)); \
+} while (0)
+# endif
+#else
+# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N)
+# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP)
+#endif
+
+typedef enum {
+ ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED,
+ ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED,
+ ERTS_SCHDLR_SSPND_DONE,
+ ERTS_SCHDLR_SSPND_YIELD_RESTART,
+ ERTS_SCHDLR_SSPND_YIELD_DONE,
+ ERTS_SCHDLR_SSPND_EINVAL
+} ErtsSchedSuspendResult;
+
+typedef enum {
+ ERTS_MIGRATE_SUCCESS,
+ ERTS_MIGRATE_FAILED_NOT_IN_RUNQ,
+ ERTS_MIGRATE_FAILED_RUNQ_CHANGED,
+ ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED
+} ErtsMigrateResult;
+
+/* times to reschedule low prio process before running */
+#define RESCHEDULE_LOW 8
+
+#define ERTS_MAX_MISC_OPS 5
+
+#define ERTS_FULL_REDS_HISTORY_AVG_SHFT 3
+#define ERTS_FULL_REDS_HISTORY_SIZE \
+ ((1 << ERTS_FULL_REDS_HISTORY_AVG_SHFT) - 1)
+
+typedef struct ErtsProcList_ ErtsProcList;
+struct ErtsProcList_ {
+ Eterm pid;
+ SysTimeval started;
+ ErtsProcList* next;
+};
+
+typedef struct ErtsMiscOpList_ ErtsMiscOpList;
+struct ErtsMiscOpList_ {
+ ErtsMiscOpList *next;
+ void (*func)(void *arg);
+ void *arg;
+};
+
+typedef struct {
+ Process* first;
+ Process* last;
+} ErtsRunPrioQueue;
+
+typedef struct ErtsSchedulerData_ ErtsSchedulerData;
+
+typedef struct ErtsRunQueue_ ErtsRunQueue;
+
+typedef struct {
+ int len;
+ int max_len;
+ int reds;
+ struct {
+ struct {
+ int this;
+ int other;
+ } limit;
+ ErtsRunQueue *runq;
+ } migrate;
+} ErtsRunQueueInfo;
+
+struct ErtsRunQueue_ {
+ int ix;
+ erts_smp_atomic_t info_flags;
+
+ erts_smp_mtx_t mtx;
+ erts_smp_cnd_t cnd;
+
+ erts_smp_atomic_t spin_waiter;
+ erts_smp_atomic_t spin_wake;
+
+ ErtsSchedulerData *scheduler;
+ int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
+ int woken;
+ Uint32 flags;
+ int check_balance_reds;
+ int full_reds_history_sum;
+ int full_reds_history[ERTS_FULL_REDS_HISTORY_SIZE];
+ int out_of_work_count;
+ int max_len;
+ int len;
+ int wakeup_other;
+ int wakeup_other_reds;
+
+ struct {
+ int len;
+ ErtsProcList *pending_exiters;
+ Uint context_switches;
+ Uint reductions;
+
+ ErtsRunQueueInfo prio_info[ERTS_NO_PROC_PRIO_LEVELS];
+
+ /* We use the same prio queue for low and
+ normal prio processes */
+ ErtsRunPrioQueue prio[ERTS_NO_PROC_PRIO_LEVELS-1];
+ } procs;
+
+ struct {
+ ErtsMiscOpList *start;
+ ErtsMiscOpList *end;
+ ErtsRunQueue *evac_runq;
+ } misc;
+
+ struct {
+ ErtsRunQueueInfo info;
+ struct port *start;
+ struct port *end;
+ } ports;
+};
+
+typedef union {
+ ErtsRunQueue runq;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsRunQueue))];
+} ErtsAlignedRunQueue;
+
+extern ErtsAlignedRunQueue *erts_aligned_run_queues;
+extern ErtsRunQueue *erts_common_run_queue;
+
+#define ERTS_PROC_REDUCTIONS_EXECUTED(RQ, PRIO, REDS, AREDS) \
+do { \
+ (RQ)->procs.reductions += (AREDS); \
+ (RQ)->procs.prio_info[p->prio].reds += (REDS); \
+ (RQ)->check_balance_reds -= (REDS); \
+ (RQ)->wakeup_other_reds += (AREDS); \
+} while (0)
+
+#define ERTS_PORT_REDUCTIONS_EXECUTED(RQ, REDS) \
+do { \
+ (RQ)->ports.info.reds += (REDS); \
+ (RQ)->check_balance_reds -= (REDS); \
+ (RQ)->wakeup_other_reds += (REDS); \
+} while (0)
+
+struct ErtsSchedulerData_ {
+
+#ifdef ERTS_SMP
+ ethr_tid tid; /* Thread id */
+ Eterm save_reg[ERTS_X_REGS_ALLOCATED]; /* X registers */
+ FloatDef freg[MAX_REG]; /* Floating point registers. */
+ struct erl_bits_state erl_bits_state; /* erl_bits.c state */
+ void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
+ Process *free_process;
+#endif
+
+ Process *current_process;
+ Uint no; /* Scheduler number */
+ struct port *current_port;
+ ErtsRunQueue *run_queue;
+ int virtual_reds;
+ int cpu_id; /* >= 0 when bound */
+
+ ErtsAtomCacheMap atom_cache_map;
+
+#ifdef ERTS_SMP
+ /* NOTE: These fields are modified under held mutexes by other threads */
+#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+ int check_children; /* run queue mutex */
+ int blocked_check_children; /* schdlr_sspnd mutex */
+#endif
+ erts_smp_atomic_t suspended; /* Only used when common run queue */
+ erts_smp_atomic_t chk_cpu_bind; /* Only used when common run queue */
+#endif
+};
+
+#ifndef ERTS_SMP
+extern ErtsSchedulerData *erts_scheduler_data;
+#endif
+
+/*
+ * Process Specific Data.
+ *
+ * NOTE: Only use PSD for very rarely used data.
+ */
+
+#define ERTS_PSD_ERROR_HANDLER 0
+#define ERTS_PSD_SAVED_CALLS_BUF 1
+#define ERTS_PSD_SCHED_ID 2
+#define ERTS_PSD_DIST_ENTRY 3
+
+#define ERTS_PSD_SIZE 4
+
+typedef struct {
+ void *data[ERTS_PSD_SIZE];
+} ErtsPSD;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+#define ERTS_LC_PSD_ANY_LOCK (~ERTS_PROC_LOCKS_ALL)
+
+#define ERTS_PSD_ERROR_HANDLER_BUF_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_ERROR_HANDLER_BUF_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+#define ERTS_PSD_SAVED_CALLS_BUF_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_SAVED_CALLS_BUF_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+#define ERTS_PSD_SCHED_ID_GET_LOCKS ERTS_PROC_LOCK_STATUS
+#define ERTS_PSD_SCHED_ID_SET_LOCKS ERTS_PROC_LOCK_STATUS
+
+#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
+typedef struct {
+ ErtsProcLocks get_locks;
+ ErtsProcLocks set_locks;
+} ErtsLcPSDLocks;
+
+extern ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
+
+#endif
+
+#define ERTS_SCHED_STAT_MODIFY_DISABLE 1
+#define ERTS_SCHED_STAT_MODIFY_ENABLE 2
+#define ERTS_SCHED_STAT_MODIFY_CLEAR 3
+
+typedef struct {
+ erts_smp_spinlock_t lock;
+ int enabled;
+ struct {
+ Eterm name;
+ Uint total_executed;
+ Uint executed;
+ Uint total_migrated;
+ Uint migrated;
+ } prio[ERTS_NO_PRIO_LEVELS];
+} erts_sched_stat_t;
+
+extern erts_sched_stat_t erts_sched_stat;
+
+typedef struct {
+ Eterm reason;
+ ErlHeapFragment *bp;
+} ErtsPendExit;
+
+#ifdef ERTS_SMP
+
+typedef struct ErtsPendingSuspend_ ErtsPendingSuspend;
+struct ErtsPendingSuspend_ {
+ ErtsPendingSuspend *next;
+ ErtsPendingSuspend *end;
+ Eterm pid;
+ void (*handle_func)(Process *suspendee,
+ ErtsProcLocks suspendee_locks,
+ int suspendee_alive,
+ Eterm pid);
+};
+
+#endif
+
+/* Defines to ease the change of memory architecture */
+# define HEAP_START(p) (p)->heap
+# define HEAP_TOP(p) (p)->htop
+# define HEAP_LIMIT(p) (p)->stop
+# define HEAP_END(p) (p)->hend
+# define HEAP_SIZE(p) (p)->heap_sz
+# define STACK_START(p) (p)->hend
+# define STACK_TOP(p) (p)->stop
+# define STACK_END(p) (p)->htop
+# define HIGH_WATER(p) (p)->high_water
+# define OLD_HEND(p) (p)->old_hend
+# define OLD_HTOP(p) (p)->old_htop
+# define OLD_HEAP(p) (p)->old_heap
+# define GEN_GCS(p) (p)->gen_gcs
+# define MAX_GEN_GCS(p) (p)->max_gen_gcs
+# define FLAGS(p) (p)->flags
+# define MBUF(p) (p)->mbuf
+# define HALLOC_MBUF(p) (p)->halloc_mbuf
+# define MBUF_SIZE(p) (p)->mbuf_sz
+# define MSO(p) (p)->off_heap
+# define MIN_HEAP_SIZE(p) (p)->min_heap_size
+
+# define BIN_VHEAP_SZ(p) (p)->bin_vheap_sz
+# define BIN_OLD_VHEAP_SZ(p) (p)->bin_old_vheap_sz
+# define BIN_OLD_VHEAP(p) (p)->bin_old_vheap
+
+struct process {
+ /* All fields in the PCB that differs between different heap
+ * architectures, have been moved to the end of this struct to
+ * make sure that as few offsets as possible differ. Different
+ * offsets between memory architectures in this struct, means that
+ * native code have to use functions instead of constants.
+ */
+
+ Eterm* htop; /* Heap top */
+ Eterm* stop; /* Stack top */
+ Eterm* heap; /* Heap start */
+ Eterm* hend; /* Heap end */
+ Uint heap_sz; /* Size of heap in words */
+ Uint min_heap_size; /* Minimum size of heap (in words). */
+
+#if !defined(NO_FPE_SIGNALS)
+ volatile unsigned long fp_exception;
+#endif
+
+#ifdef HIPE
+ /* HiPE-specific process fields. Put it early in struct process,
+ to enable smaller & faster addressing modes on the x86. */
+ struct hipe_process_state hipe;
+#endif
+
+ /*
+ * Saved x registers.
+ */
+ Uint arity; /* Number of live argument registers (only valid
+ * when process is *not* running).
+ */
+ Eterm* arg_reg; /* Pointer to argument registers. */
+ unsigned max_arg_reg; /* Maximum number of argument registers available. */
+ Eterm def_arg_reg[6]; /* Default array for argument registers. */
+
+ Eterm* cp; /* Continuation pointer (for threaded code). */
+ Eterm* i; /* Program counter for threaded code. */
+ Sint catches; /* Number of catches on stack */
+ Sint fcalls; /*
+ * Number of reductions left to execute.
+ * Only valid for the current process.
+ */
+ Uint32 status; /* process STATE */
+ Uint32 gcstatus; /* process gc STATE */
+ Uint32 rstatus; /* process resume STATE */
+ Uint32 rcount; /* suspend count */
+ Eterm id; /* The pid of this process */
+ int prio; /* Priority of process */
+ int skipped; /* Times a low prio process has been rescheduled */
+ Uint reds; /* No of reductions for this process */
+ Eterm tracer_proc; /* If proc is traced, this is the tracer
+ (can NOT be boxed) */
+ Uint trace_flags; /* Trace flags (used to be in flags) */
+ Eterm group_leader; /* Pid in charge
+ (can be boxed) */
+ Uint flags; /* Trap exit, etc (no trace flags anymore) */
+ Eterm fvalue; /* Exit & Throw value (failure reason) */
+ Uint freason; /* Reason for detected failure */
+ Eterm ftrace; /* Latest exception stack trace dump */
+
+ Process *next; /* Pointer to next process in run queue */
+ Process *prev; /* Pointer to prev process in run queue */
+
+ struct reg_proc *reg; /* NULL iff not registered */
+ ErtsLink *nlinks;
+ ErtsMonitor *monitors; /* The process monitors, both ends */
+
+ struct ErtsNodesMonitor_ *nodes_monitors;
+
+ ErtsSuspendMonitor *suspend_monitors; /* Processes suspended by
+ this process via
+ erlang:suspend_process/1 */
+
+ ErlMessageQueue msg; /* Message queue */
+
+ ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
+
+ ProcDict *dictionary; /* Process dictionary, may be NULL */
+
+ Uint seq_trace_clock;
+ Uint seq_trace_lastcnt;
+ Eterm seq_trace_token; /* Sequential trace token (tuple size 5 see below) */
+
+ Eterm initial[3]; /* Initial module(0), function(1), arity(2) */
+ Eterm* current; /* Current Erlang function:
+ * module(0), function(1), arity(2)
+ * (module and functions are tagged atoms;
+ * arity an untagged integer).
+ */
+
+ /*
+ * Information mainly for post-mortem use (erl crash dump).
+ */
+ Eterm parent; /* Pid of process that created this process. */
+ SysTimeval started; /* Time when started. */
+
+
+ /* This is the place, where all fields that differs between memory
+ * architectures, have gone to.
+ */
+
+ Eterm *high_water;
+ Eterm *old_hend; /* Heap pointers for generational GC. */
+ Eterm *old_htop;
+ Eterm *old_heap;
+ Uint16 gen_gcs; /* Number of (minor) generational GCs. */
+ Uint16 max_gen_gcs; /* Max minor gen GCs before fullsweep. */
+ ErlOffHeap off_heap; /* Off-heap data updated by copy_struct(). */
+ ErlHeapFragment* mbuf; /* Pointer to message buffer list */
+ Uint mbuf_sz; /* Size of all message buffers */
+ ErtsPSD *psd; /* Rarely used process specific data */
+
+ Uint bin_vheap_sz; /* Virtual heap block size for binaries */
+ Uint bin_old_vheap_sz; /* Virtual old heap block size for binaries */
+ Uint bin_old_vheap; /* Virtual old heap size for binaries */
+
+ union {
+#ifdef ERTS_SMP
+ ErtsSmpPTimer *ptimer;
+#else
+ ErlTimer tm; /* Timer entry */
+#endif
+ void *exit_data; /* Misc data referred during termination */
+ } u;
+
+ ErtsRunQueue *bound_runq;
+
+#ifdef ERTS_SMP
+ erts_proc_lock_t lock;
+ ErtsSchedulerData *scheduler_data;
+ int is_exiting;
+ Uint32 runq_flags;
+ Uint32 status_flags;
+ ErlMessageInQueue msg_inq;
+ Eterm suspendee;
+ ErtsPendingSuspend *pending_suspenders;
+ ErtsPendExit pending_exit;
+ ErtsRunQueue *run_queue;
+#ifdef HIPE
+ struct hipe_process_state_smp hipe_smp;
+#endif
+#endif
+
+#ifdef HYBRID
+ Eterm *rrma; /* Remembered roots to Message Area */
+ Eterm **rrsrc; /* The source of the root */
+ Uint nrr; /* Number of remembered roots */
+ Uint rrsz; /* Size of root array */
+#endif
+
+#ifdef HYBRID
+ Uint active; /* Active since last major collection? */
+ Uint active_index; /* Index in the active process array */
+#endif
+
+#ifdef INCREMENTAL
+ Process *active_next; /* Active processes to scan for roots */
+ Process *active_prev; /* in collection of the message area */
+ Eterm *scan_top;
+#endif
+
+#ifdef CHECK_FOR_HOLES
+ Eterm* last_htop; /* No need to scan the heap below this point. */
+ ErlHeapFragment* last_mbuf; /* No need to scan beyond this mbuf. */
+#endif
+
+#ifdef DEBUG
+ Eterm* last_old_htop; /*
+ * No need to scan the old heap below this point
+ * when looking for invalid pointers into the new heap or
+ * heap fragments.
+ */
+#endif
+};
+
+#ifdef CHECK_FOR_HOLES
+# define INIT_HOLE_CHECK(p) \
+do { \
+ (p)->last_htop = 0; \
+ (p)->last_mbuf = 0; \
+} while (0)
+
+# define ERTS_HOLE_CHECK(p) erts_check_for_holes((p))
+void erts_check_for_holes(Process* p);
+#else
+# define INIT_HOLE_CHECK(p)
+# define ERTS_HOLE_CHECK(p)
+#endif
+
+/*
+ * The MBUF_GC_FACTOR decides how easily a process is subject to GC
+ * due to message buffers allocated outside the heap.
+ * The larger the factor, the easier the process gets GCed.
+ * On a small memory system with lots of processes, this makes a significant
+ * difference, especially since the GCs help fragmentation quite a bit too.
+ */
+#if defined(SMALL_MEMORY)
+#define MBUF_GC_FACTOR 4
+#else
+#define MBUF_GC_FACTOR 1
+#endif
+
+#define SEQ_TRACE_TOKEN(p) ((p)->seq_trace_token)
+
+/* The sequential tracing token is a tuple of size 5:
+ *
+ * {Flags, Label, Serial, Sender}
+ */
+
+#define SEQ_TRACE_TOKEN_ARITY(p) (arityval(*(tuple_val(SEQ_TRACE_TOKEN(p)))))
+#define SEQ_TRACE_TOKEN_FLAGS(p) (*(tuple_val(SEQ_TRACE_TOKEN(p)) + 1))
+#define SEQ_TRACE_TOKEN_LABEL(p) (*(tuple_val(SEQ_TRACE_TOKEN(p)) + 2))
+#define SEQ_TRACE_TOKEN_SERIAL(p) (*(tuple_val(SEQ_TRACE_TOKEN(p)) + 3))
+#define SEQ_TRACE_TOKEN_SENDER(p) (*(tuple_val(SEQ_TRACE_TOKEN(p)) + 4))
+#define SEQ_TRACE_TOKEN_LASTCNT(p) (*(tuple_val(SEQ_TRACE_TOKEN(p)) + 5))
+
+/* used when we have unit32 token */
+#define SEQ_TRACE_T_ARITY(token) (arityval(*(tuple_val(token))))
+#define SEQ_TRACE_T_FLAGS(token) (*(tuple_val(token) + 1))
+#define SEQ_TRACE_T_LABEL(token) (*(tuple_val(token) + 2))
+#define SEQ_TRACE_T_SERIAL(token) (*(tuple_val(token) + 3))
+#define SEQ_TRACE_T_SENDER(token) (*(tuple_val(token) + 4))
+#define SEQ_TRACE_T_LASTCNT(token) (*(tuple_val(token) + 5))
+
+/*
+ * Possible flags for the flags field in ErlSpawnOpts below.
+ */
+
+#define SPO_LINK 1
+#define SPO_USE_ARGS 2
+#define SPO_MONITOR 4
+
+/*
+ * The following struct contains options for a process to be spawned.
+ */
+typedef struct {
+ Uint flags;
+ int error_code; /* Error code returned from create_process(). */
+ Eterm mref; /* Monitor ref returned (if SPO_MONITOR was given). */
+
+ /*
+ * The following items are only initialized if the SPO_USE_ARGS flag is set.
+ */
+ Uint min_heap_size; /* Minimum heap size (must be a valued returned
+ * from next_heap_size()).
+ */
+ int priority; /* Priority for process. */
+ Uint16 max_gen_gcs; /* Maximum number of gen GCs before fullsweep. */
+ int scheduler;
+} ErlSpawnOpts;
+
+/*
+ * The KILL_CATCHES(p) macro kills pending catches for process p.
+ */
+
+#define KILL_CATCHES(p) (p)->catches = -1
+
+void erts_arith_shrink(Process* p, Eterm* hp);
+Eterm* erts_heap_alloc(Process* p, Uint need);
+#ifdef CHECK_FOR_HOLES
+Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz);
+#endif
+
+extern Process** process_tab;
+#ifdef HYBRID
+extern Uint erts_num_active_procs;
+extern Process** erts_active_procs;
+#endif
+extern Uint erts_max_processes;
+extern Uint erts_process_tab_index_mask;
+extern Uint erts_default_process_flags;
+extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
+/* If any of the erts_system_monitor_* variables are set (enabled),
+** erts_system_monitor must be != NIL, to allow testing on just
+** the erts_system_monitor_* variables.
+*/
+extern Eterm erts_system_monitor;
+extern Uint erts_system_monitor_long_gc;
+extern Uint erts_system_monitor_large_heap;
+struct erts_system_monitor_flags_t {
+ unsigned int busy_port : 1;
+ unsigned int busy_dist_port : 1;
+};
+extern struct erts_system_monitor_flags_t erts_system_monitor_flags;
+
+/* system_profile, same rules as for system_monitor.
+ erts_profile must be != NIL when
+ erts_profile_* is set. */
+
+extern Eterm erts_system_profile;
+struct erts_system_profile_flags_t {
+ unsigned int scheduler : 1;
+ unsigned int runnable_procs : 1;
+ unsigned int runnable_ports : 1;
+ unsigned int exclusive : 1;
+};
+extern struct erts_system_profile_flags_t erts_system_profile_flags;
+
+#define INVALID_PID(p, pid) ((p) == NULL \
+ || (p)->id != (pid) \
+ || (p)->status == P_EXITING)
+
+#define IS_TRACED(p) ( (p)->tracer_proc != NIL )
+#define ARE_TRACE_FLAGS_ON(p,tf) ( ((p)->trace_flags & (tf|F_SENSITIVE)) == (tf) )
+#define IS_TRACED_FL(p,tf) ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) )
+
+/* process flags */
+#define F_TRAPEXIT (1 << 0)
+#define F_INSLPQUEUE (1 << 1) /* Set if in timer queue */
+#define F_TIMO (1 << 2) /* Set if timeout */
+#define F_HEAP_GROW (1 << 3)
+#define F_NEED_FULLSWEEP (1 << 4) /* If process has old binaries & funs. */
+#define F_USING_DB (1 << 5) /* If have created tables */
+#define F_DISTRIBUTION (1 << 6) /* Process used in distribution */
+#define F_USING_DDLL (1 << 7) /* Process has used the DDLL interface */
+#define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */
+#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */
+#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
+
+/* process trace_flags */
+#define F_SENSITIVE (1 << 0)
+#define F_TRACE_SEND (1 << 1)
+#define F_TRACE_RECEIVE (1 << 2)
+#define F_TRACE_SOS (1 << 3) /* Set on spawn */
+#define F_TRACE_SOS1 (1 << 4) /* Set on first spawn */
+#define F_TRACE_SOL (1 << 5) /* Set on link */
+#define F_TRACE_SOL1 (1 << 6) /* Set on first link */
+#define F_TRACE_CALLS (1 << 7)
+#define F_TIMESTAMP (1 << 8)
+#define F_TRACE_PROCS (1 << 9)
+#define F_TRACE_FIRST_CHILD (1 << 10)
+#define F_TRACE_SCHED (1 << 11)
+#define F_TRACE_GC (1 << 12)
+#define F_TRACE_ARITY_ONLY (1 << 13)
+#define F_TRACE_RETURN_TO (1 << 14) /* Return_to trace when breakpoint tracing */
+#define F_TRACE_SILENT (1 << 15) /* No call trace msg suppress */
+#define F_TRACER (1 << 16) /* May be (has been) tracer */
+#define F_EXCEPTION_TRACE (1 << 17) /* May have exception trace on stack */
+
+/* port trace flags, currently the same as process trace flags */
+#define F_TRACE_SCHED_PORTS (1 << 18) /* Trace of port scheduling */
+#define F_TRACE_SCHED_PROCS (1 << 19) /* With virtual scheduling */
+#define F_TRACE_PORTS (1 << 20) /* Ports equivalent to F_TRACE_PROCS */
+#define F_TRACE_SCHED_NO (1 << 21) /* Trace with scheduler id */
+#define F_TRACE_SCHED_EXIT (1 << 22)
+
+#define F_NUM_FLAGS 23
+#ifdef DEBUG
+# define F_INITIAL_TRACE_FLAGS (5 << F_NUM_FLAGS)
+#else
+# define F_INITIAL_TRACE_FLAGS 0
+#endif
+
+
+
+#define TRACEE_FLAGS ( F_TRACE_PROCS | F_TRACE_CALLS \
+ | F_TRACE_SOS | F_TRACE_SOS1| F_TRACE_RECEIVE \
+ | F_TRACE_SOL | F_TRACE_SOL1 | F_TRACE_SEND \
+ | F_TRACE_SCHED | F_TIMESTAMP | F_TRACE_GC \
+ | F_TRACE_ARITY_ONLY | F_TRACE_RETURN_TO \
+ | F_TRACE_SILENT | F_TRACE_SCHED_PROCS | F_TRACE_PORTS \
+ | F_TRACE_SCHED_PORTS | F_TRACE_SCHED_NO \
+ | F_TRACE_SCHED_EXIT)
+
+#define ERTS_TRACEE_MODIFIER_FLAGS \
+ (F_TRACE_SILENT | F_TIMESTAMP | F_TRACE_SCHED_NO)
+#define ERTS_PORT_TRACEE_FLAGS \
+ (ERTS_TRACEE_MODIFIER_FLAGS | F_TRACE_PORTS | F_TRACE_SCHED_PORTS)
+#define ERTS_PROC_TRACEE_FLAGS \
+ ((TRACEE_FLAGS & ~ERTS_PORT_TRACEE_FLAGS) | ERTS_TRACEE_MODIFIER_FLAGS)
+
+/* Sequential trace flags */
+#define SEQ_TRACE_SEND (1 << 0)
+#define SEQ_TRACE_RECEIVE (1 << 1)
+#define SEQ_TRACE_PRINT (1 << 2)
+#define SEQ_TRACE_TIMESTAMP (1 << 3)
+
+#ifdef ERTS_SMP
+/* Status flags ... */
+#define ERTS_PROC_SFLG_PENDADD2SCHEDQ (((Uint32) 1) << 0) /* Pending
+ add to
+ schedule q */
+#define ERTS_PROC_SFLG_INRUNQ (((Uint32) 1) << 1) /* Process is
+ in run q */
+#define ERTS_PROC_SFLG_TRAPEXIT (((Uint32) 1) << 2) /* Process is
+ trapping
+ exit */
+#define ERTS_PROC_SFLG_RUNNING (((Uint32) 1) << 3) /* Process is
+ running */
+/* Scheduler flags in process struct... */
+#define ERTS_PROC_RUNQ_FLG_RUNNING (((Uint32) 1) << 0) /* Process is
+ running */
+
+#endif
+
+
+#ifdef ERTS_SMP
+#define ERTS_PROC_IS_TRAPPING_EXITS(P) \
+ (ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((P)) \
+ & ERTS_PROC_LOCK_STATUS), \
+ (P)->status_flags & ERTS_PROC_SFLG_TRAPEXIT)
+
+#define ERTS_PROC_SET_TRAP_EXIT(P) \
+ (ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS) \
+ & erts_proc_lc_my_proc_locks((P))) \
+ == (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)), \
+ (P)->status_flags |= ERTS_PROC_SFLG_TRAPEXIT, \
+ (P)->flags |= F_TRAPEXIT, \
+ 1)
+
+#define ERTS_PROC_UNSET_TRAP_EXIT(P) \
+ (ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS) \
+ & erts_proc_lc_my_proc_locks((P))) \
+ == (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)), \
+ (P)->status_flags &= ~ERTS_PROC_SFLG_TRAPEXIT, \
+ (P)->flags &= ~F_TRAPEXIT, \
+ 0)
+#else
+#define ERTS_PROC_IS_TRAPPING_EXITS(P) ((P)->flags & F_TRAPEXIT)
+#define ERTS_PROC_SET_TRAP_EXIT(P) ((P)->flags |= F_TRAPEXIT, 1)
+#define ERTS_PROC_UNSET_TRAP_EXIT(P) ((P)->flags &= ~F_TRAPEXIT, 0)
+#endif
+
+/* Option flags to erts_send_exit_signal() */
+#define ERTS_XSIG_FLG_IGN_KILL (((Uint32) 1) << 0)
+#define ERTS_XSIG_FLG_NO_IGN_NORMAL (((Uint32) 1) << 1)
+
+
+/* Process status values */
+#define P_FREE 0
+#define P_RUNABLE 1
+#define P_WAITING 2
+#define P_RUNNING 3
+#define P_EXITING 4
+#define P_GARBING 5
+#define P_SUSPENDED 6
+
+#define CANCEL_TIMER(p) \
+ do { \
+ if ((p)->flags & (F_INSLPQUEUE)) \
+ cancel_timer(p); \
+ else \
+ (p)->flags &= ~F_TIMO; \
+ } while (0)
+
+
+#define ERTS_INIT_SCHED_BIND_TYPE_SUCCESS 0
+#define ERTS_INIT_SCHED_BIND_TYPE_NOT_SUPPORTED 1
+#define ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY 2
+#define ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_BAD_TYPE 3
+
+int erts_init_scheduler_bind_type(char *how);
+
+#define ERTS_INIT_CPU_TOPOLOGY_OK 0
+#define ERTS_INIT_CPU_TOPOLOGY_INVALID_ID 1
+#define ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_RANGE 2
+#define ERTS_INIT_CPU_TOPOLOGY_INVALID_HIERARCHY 3
+#define ERTS_INIT_CPU_TOPOLOGY_INVALID_ID_TYPE 4
+#define ERTS_INIT_CPU_TOPOLOGY_INVALID_NODES 5
+#define ERTS_INIT_CPU_TOPOLOGY_MISSING_LID 6
+#define ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_LIDS 7
+#define ERTS_INIT_CPU_TOPOLOGY_NOT_UNIQUE_ENTITIES 8
+#define ERTS_INIT_CPU_TOPOLOGY_MISSING 9
+
+int erts_init_cpu_topology(char *topology_str);
+
+void erts_pre_init_process(void);
+void erts_late_init_process(void);
+void erts_early_init_scheduling(void);
+void erts_init_scheduling(int, int, int);
+
+ErtsProcList *erts_proclist_create(Process *);
+void erts_proclist_destroy(ErtsProcList *);
+int erts_proclist_same(ErtsProcList *, Process *);
+
+#ifdef DEBUG
+void erts_dbg_multi_scheduling_return_trap(Process *, Eterm);
+#endif
+#ifdef ERTS_SMP
+ErtsSchedSuspendResult
+erts_schedulers_state(Uint *, Uint *, Uint *, int);
+ErtsSchedSuspendResult
+erts_set_schedulers_online(Process *p,
+ ErtsProcLocks plocks,
+ Sint new_no,
+ Sint *old_no);
+ErtsSchedSuspendResult
+erts_block_multi_scheduling(Process *, ErtsProcLocks, int, int);
+int erts_is_multi_scheduling_blocked(void);
+Eterm erts_multi_scheduling_blockers(Process *);
+void erts_start_schedulers(void);
+void erts_smp_notify_check_children_needed(void);
+#endif
+Uint erts_active_schedulers(void);
+void erts_init_process(void);
+Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm);
+Uint erts_run_queues_len(Uint *);
+void erts_add_to_runq(Process *);
+Eterm erts_bound_schedulers_term(Process *c_p);
+Eterm erts_get_cpu_topology_term(Process *c_p, Eterm which);
+Eterm erts_get_schedulers_binds(Process *c_p);
+Eterm erts_set_cpu_topology(Process *c_p, Eterm term);
+Eterm erts_bind_schedulers(Process *c_p, Eterm how);
+ErtsRunQueue *erts_schedid2runq(Uint);
+#ifdef ERTS_SMP
+ErtsMigrateResult erts_proc_migrate(Process *,
+ ErtsProcLocks *,
+ ErtsRunQueue *,
+ int *,
+ ErtsRunQueue *,
+ int *);
+#endif
+Process *schedule(Process*, int);
+void erts_schedule_misc_op(void (*)(void *), void *);
+Eterm erl_create_process(Process*, Eterm, Eterm, Eterm, ErlSpawnOpts*);
+void erts_do_exit_process(Process*, Eterm);
+void erts_continue_exit_process(Process *);
+void set_timer(Process*, Uint);
+void cancel_timer(Process*);
+/* Begin System profile */
+Uint erts_runnable_process_count(void);
+Uint erts_process_count(void);
+/* End System profile */
+void erts_init_empty_process(Process *p);
+void erts_cleanup_empty_process(Process* p);
+#ifdef DEBUG
+void erts_debug_verify_clean_empty_process(Process* p);
+#endif
+void erts_stack_dump(int to, void *to_arg, Process *);
+void erts_program_counter_info(int to, void *to_arg, Process *);
+
+Eterm erts_get_process_priority(Process *p);
+Eterm erts_set_process_priority(Process *p, Eterm prio);
+
+Uint erts_get_total_context_switches(void);
+void erts_get_total_reductions(Uint *, Uint *);
+void erts_get_exact_total_reductions(Process *, Uint *, Uint *);
+
+Eterm erts_fake_scheduler_bindings(Process *p, Eterm how);
+
+void erts_sched_stat_modify(int what);
+Eterm erts_sched_stat_term(Process *p, int total);
+
+void erts_free_proc(Process *);
+
+void erts_suspend(Process*, ErtsProcLocks, struct port*);
+void erts_resume(Process*, ErtsProcLocks);
+int erts_resume_processes(ErtsProcList *);
+
+int erts_send_exit_signal(Process *,
+ Eterm,
+ Process *,
+ ErtsProcLocks *,
+ Eterm,
+ Eterm,
+ Process *,
+ Uint32);
+#ifdef ERTS_SMP
+void erts_handle_pending_exit(Process *, ErtsProcLocks);
+#define ERTS_PROC_PENDING_EXIT(P) \
+ (ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((P)) & ERTS_PROC_LOCK_STATUS),\
+ (P)->pending_exit.reason != THE_NON_VALUE)
+#else
+#define ERTS_PROC_PENDING_EXIT(P) 0
+#endif
+
+void erts_deep_process_dump(int, void *);
+
+Sint erts_test_next_pid(int, Uint);
+Eterm erts_debug_processes(Process *c_p);
+Eterm erts_debug_processes_bif_info(Process *c_p);
+Uint erts_debug_nbalance(void);
+
+#ifdef ERTS_SMP
+# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) ((PROC)->scheduler_data)
+# define ERTS_PROC_GET_SCHDATA(PROC) ((PROC)->scheduler_data)
+#else
+# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) (erts_scheduler_data)
+# define ERTS_PROC_GET_SCHDATA(PROC) (erts_scheduler_data)
+#endif
+
+#if defined(ERTS_SMP) || defined(USE_THREADS)
+ErtsSchedulerData *erts_get_scheduler_data(void);
+#else
+ERTS_GLB_INLINE ErtsSchedulerData *erts_get_scheduler_data(void);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+ErtsSchedulerData *erts_get_scheduler_data(void)
+{
+ return erts_scheduler_data;
+}
+#endif
+#endif
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+
+#define ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__
+#include "erl_process_lock.h"
+#undef ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__
+
+int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
+#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) \
+do { \
+ if ((L)) \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked((RQ))); \
+ else \
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked((RQ))); \
+} while (0)
+#else
+#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L)
+#endif
+
+void *erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data);
+
+ERTS_GLB_INLINE void *
+erts_psd_get(Process *p, int ix);
+ERTS_GLB_INLINE void *
+erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *new);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void *
+erts_psd_get(Process *p, int ix)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
+ if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].get_locks)
+ ERTS_SMP_LC_ASSERT(locks
+ || erts_is_system_blocked(0)
+ || (ERTS_IS_CRASH_DUMPING
+ && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ else {
+ locks &= erts_psd_required_locks[ix].get_locks;
+ ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks
+ || erts_is_system_blocked(0)
+ || (ERTS_IS_CRASH_DUMPING
+ && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ }
+#endif
+ ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
+ return p->psd ? p->psd->data[ix] : NULL;
+}
+
+
+/*
+ * NOTE: erts_psd_set() might release and reacquire locks on 'p'.
+ */
+ERTS_GLB_INLINE void *
+erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
+ if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
+ ERTS_SMP_LC_ASSERT(locks
+ || erts_is_system_blocked(0)
+ || (ERTS_IS_CRASH_DUMPING
+ && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ else {
+ locks &= erts_psd_required_locks[ix].set_locks;
+ ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
+ || erts_is_system_blocked(0)
+ || (ERTS_IS_CRASH_DUMPING
+ && erts_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));
+ }
+#endif
+ ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
+ if (p->psd) {
+ void *old = p->psd->data[ix];
+ p->psd->data[ix] = data;
+ return old;
+ }
+ else {
+ if (!data)
+ return NULL;
+ else
+ return erts_psd_set_init(p, plocks, ix, data);
+ }
+}
+
+#endif
+
+#define ERTS_PROC_SCHED_ID(P, L, ID) \
+ ((Uint) erts_psd_set((P), (L), ERTS_PSD_SCHED_ID, (void *) (ID)))
+
+#define ERTS_PROC_GET_DIST_ENTRY(P) \
+ ((DistEntry *) erts_psd_get((P), ERTS_PSD_DIST_ENTRY))
+#define ERTS_PROC_SET_DIST_ENTRY(P, L, D) \
+ ((DistEntry *) erts_psd_set((P), (L), ERTS_PSD_DIST_ENTRY, (void *) (D)))
+
+#define ERTS_PROC_GET_SAVED_CALLS_BUF(P) \
+ ((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SAVED_CALLS_BUF))
+#define ERTS_PROC_SET_SAVED_CALLS_BUF(P, L, SCB) \
+ ((struct saved_calls *) erts_psd_set((P), (L), ERTS_PSD_SAVED_CALLS_BUF, (void *) (SCB)))
+
+ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
+ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p,
+ ErtsProcLocks plocks,
+ Eterm handler);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE Eterm
+erts_proc_get_error_handler(Process *p)
+{
+ void *val = erts_psd_get(p, ERTS_PSD_ERROR_HANDLER);
+ if (!val)
+ return am_error_handler;
+ else {
+ ASSERT(is_atom(((Eterm) val)));
+ return (Eterm) val;
+ }
+}
+
+ERTS_GLB_INLINE Eterm
+erts_proc_set_error_handler(Process *p, ErtsProcLocks plocks, Eterm handler)
+{
+ void *old_val;
+ void *new_val;
+ ASSERT(is_atom(handler));
+ new_val = handler == am_error_handler ? NULL : (void *) handler;
+ old_val = erts_psd_set(p, plocks, ERTS_PSD_ERROR_HANDLER, new_val);
+ if (!old_val)
+ return am_error_handler;
+ else {
+ ASSERT(is_atom(((Eterm) old_val)));
+ return (Eterm) old_val;
+ }
+}
+
+#endif
+
+#ifdef ERTS_SMP
+ErtsRunQueue *erts_prepare_emigrate(ErtsRunQueue *c_rq,
+ ErtsRunQueueInfo *c_rqi,
+ int prio);
+
+ERTS_GLB_INLINE ErtsRunQueue *erts_check_emigration_need(ErtsRunQueue *c_rq,
+ int prio);
+#endif
+
+ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp);
+ERTS_GLB_INLINE Process *erts_get_current_process(void);
+ERTS_GLB_INLINE Eterm erts_get_current_pid(void);
+ERTS_GLB_INLINE Uint erts_get_scheduler_id(void);
+ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p);
+ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp);
+ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq);
+ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq);
+ERTS_GLB_INLINE void erts_smp_runq_unlock(ErtsRunQueue *rq);
+ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
+ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq);
+ERTS_GLB_INLINE void erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
+ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ERTS_SMP
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
+{
+ ErtsRunQueueInfo *c_rqi;
+
+ if (!ERTS_CHK_RUNQ_FLG_EMIGRATE(c_rq->flags, prio))
+ return NULL;
+
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ c_rqi = &c_rq->ports.info;
+ else
+ c_rqi = &c_rq->procs.prio_info[prio];
+
+ if (!ERTS_CHK_RUNQ_FLG_EVACUATE(c_rq->flags, prio)
+ && !(c_rq->flags & ERTS_RUNQ_FLG_INACTIVE)
+ && c_rqi->len <= c_rqi->migrate.limit.this)
+ return NULL;
+
+ return erts_prepare_emigrate(c_rq, c_rqi, prio);
+}
+#endif
+
+ERTS_GLB_INLINE
+int erts_is_scheduler_bound(ErtsSchedulerData *esdp)
+{
+ if (!esdp)
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+ return esdp->cpu_id >= 0;
+}
+
+ERTS_GLB_INLINE
+Process *erts_get_current_process(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ return esdp ? esdp->current_process : NULL;
+}
+
+ERTS_GLB_INLINE
+Eterm erts_get_current_pid(void)
+{
+ Process *proc = erts_get_current_process();
+ return proc ? proc->id : THE_NON_VALUE;
+}
+
+ERTS_GLB_INLINE
+Uint erts_get_scheduler_id(void)
+{
+#ifdef ERTS_SMP
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ return esdp ? esdp->no : (Uint) 0;
+#else
+ return erts_get_scheduler_data() ? (Uint) 1 : (Uint) 0;
+#endif
+}
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_get_runq_proc(Process *p)
+{
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+#ifdef ERTS_SMP
+ ASSERT(p->run_queue);
+ return p->run_queue;
+#else
+ ASSERT(erts_common_run_queue);
+ return erts_common_run_queue;
+#endif
+}
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_get_runq_current(ErtsSchedulerData *esdp)
+{
+ ASSERT(!esdp || esdp == erts_get_scheduler_data());
+#ifdef ERTS_SMP
+ if (!esdp)
+ esdp = erts_get_scheduler_data();
+ return esdp->run_queue;
+#else
+ ASSERT(erts_common_run_queue);
+ return erts_common_run_queue;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_runq_lock(ErtsRunQueue *rq)
+{
+#ifdef ERTS_SMP
+ erts_smp_mtx_lock(&rq->mtx);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_runq_trylock(ErtsRunQueue *rq)
+{
+#ifdef ERTS_SMP
+ return erts_smp_mtx_trylock(&rq->mtx);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_runq_unlock(ErtsRunQueue *rq)
+{
+#ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&rq->mtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
+{
+#ifdef ERTS_SMP
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx));
+ if (xrq != rq) {
+ if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) {
+ if (rq < xrq)
+ erts_smp_mtx_lock(&xrq->mtx);
+ else {
+ erts_smp_mtx_unlock(&rq->mtx);
+ erts_smp_mtx_lock(&xrq->mtx);
+ erts_smp_mtx_lock(&rq->mtx);
+ }
+ }
+ }
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
+{
+#ifdef ERTS_SMP
+ if (xrq != rq)
+ erts_smp_mtx_unlock(&xrq->mtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
+{
+#ifdef ERTS_SMP
+ ASSERT(rq1 && rq2);
+ if (rq1 == rq2)
+ erts_smp_mtx_lock(&rq1->mtx);
+ else if (rq1 < rq2) {
+ erts_smp_mtx_lock(&rq1->mtx);
+ erts_smp_mtx_lock(&rq2->mtx);
+ }
+ else {
+ erts_smp_mtx_lock(&rq2->mtx);
+ erts_smp_mtx_lock(&rq1->mtx);
+ }
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
+{
+#ifdef ERTS_SMP
+ ASSERT(rq1 && rq2);
+ erts_smp_mtx_unlock(&rq1->mtx);
+ if (rq1 != rq2)
+ erts_smp_mtx_unlock(&rq2->mtx);
+#endif
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+ERTS_GLB_INLINE ErtsAtomCacheMap *erts_get_atom_cache_map(Process *c_p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE ErtsAtomCacheMap *
+erts_get_atom_cache_map(Process *c_p)
+{
+ ErtsSchedulerData *esdp = (c_p
+ ? ERTS_PROC_GET_SCHDATA(c_p)
+ : erts_get_scheduler_data());
+ ASSERT(esdp);
+ return &esdp->atom_cache_map;
+}
+#endif
+
+#ifdef ERTS_SMP
+
+Process *erts_pid2proc_not_running(Process *,
+ ErtsProcLocks,
+ Eterm,
+ ErtsProcLocks);
+Process *erts_pid2proc_nropt(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ Eterm pid,
+ ErtsProcLocks pid_locks);
+extern int erts_disable_proc_not_running_opt;
+
+#ifdef DEBUG
+#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) \
+ do { ASSERT(!(P)->is_exiting); } while (0)
+#else
+#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
+#endif
+
+/* NOTE: At least one process lock has to be held on P! */
+#ifdef ERTS_ENABLE_LOCK_CHECK
+#define ERTS_PROC_IS_EXITING(P) \
+ (ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((P)) != 0 \
+ || erts_lc_pix_lock_is_locked(ERTS_PID2PIXLOCK((P)->id))),\
+ (P)->is_exiting)
+#else
+#define ERTS_PROC_IS_EXITING(P) ((P)->is_exiting)
+#endif
+
+#else /* !ERTS_SMP */
+
+#define ERTS_PROC_IS_EXITING(P) ((P)->status == P_EXITING)
+
+#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
+
+#define erts_pid2proc_not_running erts_pid2proc
+#define erts_pid2proc_nropt erts_pid2proc
+
+#endif
+
+/* Minimum NUMBER of processes for a small system to start */
+#ifdef ERTS_SMP
+#define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS
+#else
+#define ERTS_MIN_PROCESSES 16
+#endif
+
+#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+ERTS_GLB_INLINE void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
+void erts_smp_notify_inc_runq__(ErtsRunQueue *runq);
+#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+
+ERTS_GLB_INLINE void
+erts_smp_notify_inc_runq(ErtsRunQueue *runq)
+{
+#ifdef ERTS_SMP
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+ if (runq->waiting)
+ erts_smp_notify_inc_runq__(runq);
+#endif
+}
+
+#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#include "erl_process_lock.h"
+
+#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
+
+#endif
+
+
+
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
new file mode 100644
index 0000000000..93466da3aa
--- /dev/null
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -0,0 +1,1001 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Code for process dictionaries.
+ *
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h" /* Will include erl_process_dict.h */
+#include "error.h"
+#include "erl_driver.h"
+#include "bif.h"
+#include "big.h"
+#include "dist.h"
+#include "erl_version.h"
+
+/* #define HARDDEBUG */
+
+/*
+** Utility macros
+*/
+
+/* Flags to pd_get_hash */
+#define PD_GET_OTHER_PROCESS 1UL
+
+/* Hash constant macros */
+#define MAX_HASH 1342177280UL
+#define INITIAL_SIZE 10
+
+/* Hash utility macros */
+#define HASH_RANGE(PDict) ((PDict)->homeSize + (PDict)->splitPosition)
+
+#define MAKE_HASH(Term) \
+((is_small(Term)) ? unsigned_val(Term) : \
+ ((is_atom(Term)) ? \
+ (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
+ make_hash2(Term)))
+
+#define PD_SZ2BYTES(Sz) (sizeof(ProcDict) + ((Sz) - 1)*sizeof(Eterm))
+
+/* Memory allocation macros */
+#define PD_ALLOC(Sz) \
+ erts_alloc(ERTS_ALC_T_PROC_DICT, (Sz))
+#define PD_FREE(P, Sz) \
+ erts_free(ERTS_ALC_T_PROC_DICT, (P))
+#define PD_REALLOC(P, OSz, NSz) \
+ erts_realloc(ERTS_ALC_T_PROC_DICT, (P), (NSz))
+
+
+#define TCAR(Term) CAR(list_val(Term))
+#define TCDR(Term) CDR(list_val(Term))
+
+/* Array access macro */
+#define ARRAY_GET(PDict, Index) (((PDict)->size > (Index)) ? \
+ (PDict)->data[Index] : NIL)
+
+/*
+ * Forward decalarations
+ */
+static void pd_hash_erase(Process *p, Eterm id, Eterm *ret);
+static void pd_hash_erase_all(Process *p);
+static Eterm pd_hash_get_keys(Process *p, Eterm value);
+static Eterm pd_hash_get_all(Process *p, ProcDict *pd);
+static Eterm pd_hash_put(Process *p, Eterm id, Eterm value);
+
+static void shrink(Process *p, Eterm* ret);
+static void grow(Process *p);
+
+static void array_shrink(ProcDict **ppd, unsigned int need);
+static Eterm array_put(ProcDict **ppdict, unsigned int ndx, Eterm term);
+
+static unsigned int pd_hash_value(ProcDict *pdict, Eterm term);
+static unsigned int next_array_size(unsigned int need);
+
+/*
+** Debugging prototypes and macros
+*/
+#ifdef HARDDEBUG
+
+#include <stdarg.h>
+
+static int hdebugf(char *format, ...);
+static char *hdebugf_file = "";
+static int hdebugf_line;
+#define HDEBUGF(X) ((int) hdebugf_file = __FILE__, hdebugf_line = __LINE__, \
+ hdebugf X)
+#ifndef DEBUG
+#define DEBUG 1
+#endif
+
+#else /* !HARDDEBUG */
+
+#define HDEBUGF(X) /* Nothing */
+
+#endif /* HARDDEBUG (else) */
+
+#ifdef DEBUG
+
+static void pd_check(ProcDict *pd);
+
+#define PD_CHECK(PD) pd_check(PD)
+
+#else /* !DEBUG */
+
+#define PD_CHECK(PD) /* Nothing */
+
+#endif /* DEBUG (else) */
+
+/*
+** External interface
+*/
+
+/*
+ * Called from break handler
+ */
+void
+erts_dictionary_dump(int to, void *to_arg, ProcDict *pd)
+{
+ unsigned int i;
+#ifdef DEBUG
+
+ /*PD_CHECK(pd);*/
+ if (pd == NULL)
+ return;
+ erts_print(to, to_arg, "(size = %d, used = %d, homeSize = %d, "
+ "splitPosition = %d, numElements = %d)\n",
+ pd->size, pd->used, pd->homeSize,
+ pd->splitPosition, (unsigned int) pd->numElements);
+ for (i = 0; i < HASH_RANGE(pd); ++i) {
+ erts_print(to, to_arg, "%d: %T\n", i, ARRAY_GET(pd, i));
+ }
+
+#else /* !DEBUG */
+
+ int written = 0;
+ Eterm t;
+
+ erts_print(to, to_arg, "[");
+ if (pd != NULL) {
+ for (i = 0; i < HASH_RANGE(pd); ++i) {
+ t = ARRAY_GET(pd, i);
+ if (is_list(t)) {
+ for (; t != NIL; t = TCDR(t)) {
+ erts_print(to, to_arg, written++ ? ",%T" : "%T", TCAR(t));
+ }
+ } else if (is_tuple(t)) {
+ erts_print(to, to_arg, written++ ? ",%T" : "%T", t);
+ }
+ }
+ }
+ erts_print(to, to_arg, "]");
+
+#endif /* DEBUG (else) */
+}
+
+void
+erts_deep_dictionary_dump(int to, void *to_arg,
+ ProcDict* pd, void (*cb)(int, void *, Eterm))
+{
+ unsigned int i;
+ Eterm t;
+
+ if (pd != NULL) {
+ for (i = 0; i < HASH_RANGE(pd); ++i) {
+ t = ARRAY_GET(pd, i);
+ if (is_list(t)) {
+ for (; t != NIL; t = TCDR(t)) {
+ (*cb)(to, to_arg, TCAR(t));
+ }
+ } else if (is_tuple(t)) {
+ (*cb)(to, to_arg, t);
+ }
+ }
+ }
+}
+
+Uint
+erts_dicts_mem_size(Process *p)
+{
+ Uint size = 0;
+ if (p->dictionary)
+ size += PD_SZ2BYTES(p->dictionary->size);
+ return size;
+}
+
+void
+erts_erase_dicts(Process *p)
+{
+ if (p->dictionary) {
+ pd_hash_erase_all(p);
+ p->dictionary = NULL;
+ }
+}
+
+/*
+ * Called from process_info/1,2.
+ */
+Eterm erts_dictionary_copy(Process *p, ProcDict *pd)
+{
+ Eterm* hp;
+ Eterm* heap_start;
+ Eterm res = NIL;
+ Eterm tmp, tmp2;
+ unsigned int i, num;
+
+ if (pd == NULL) {
+ return res;
+ }
+
+ PD_CHECK(pd);
+ num = HASH_RANGE(pd);
+ heap_start = hp = (Eterm *) erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(Eterm) * pd->numElements * 2);
+ for (i = 0; i < num; ++i) {
+ tmp = ARRAY_GET(pd, i);
+ if (is_boxed(tmp)) {
+ ASSERT(is_tuple(tmp));
+ res = CONS(hp, tmp, res);
+ hp += 2;
+ } else if (is_list(tmp)) {
+ while (tmp != NIL) {
+ tmp2 = TCAR(tmp);
+ res = CONS(hp, tmp2, res);
+ hp += 2;
+ tmp = TCDR(tmp);
+ }
+ }
+ }
+ res = copy_object(res, p);
+ erts_free(ERTS_ALC_T_TMP, (void *) heap_start);
+ return res;
+}
+
+
+/*
+** BIF interface
+*/
+BIF_RETTYPE get_0(BIF_ALIST_0)
+{
+ Eterm ret;
+ PD_CHECK(BIF_P->dictionary);
+ ret = pd_hash_get_all(BIF_P, BIF_P->dictionary);
+ PD_CHECK(BIF_P->dictionary);
+ BIF_RET(ret);
+}
+
+BIF_RETTYPE get_1(BIF_ALIST_1)
+{
+ Eterm ret;
+ PD_CHECK(BIF_P->dictionary);
+ ret = erts_pd_hash_get(BIF_P, BIF_ARG_1);
+ PD_CHECK(BIF_P->dictionary);
+ BIF_RET(ret);
+}
+
+BIF_RETTYPE get_keys_1(BIF_ALIST_1)
+{
+ Eterm ret;
+
+ PD_CHECK(BIF_P->dictionary);
+ ret = pd_hash_get_keys(BIF_P, BIF_ARG_1);
+ PD_CHECK(BIF_P->dictionary);
+ BIF_RET(ret);
+}
+
+BIF_RETTYPE put_2(BIF_ALIST_2)
+{
+ Eterm ret;
+
+ PD_CHECK(BIF_P->dictionary);
+ ret = pd_hash_put(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ PD_CHECK(BIF_P->dictionary);
+ BIF_RET(ret);
+}
+
+BIF_RETTYPE erase_0(BIF_ALIST_0)
+{
+ Eterm ret;
+ PD_CHECK(BIF_P->dictionary);
+ ret = pd_hash_get_all(BIF_P, BIF_P->dictionary);
+ pd_hash_erase_all(BIF_P);
+ PD_CHECK(BIF_P->dictionary);
+ BIF_RET(ret);
+}
+
+BIF_RETTYPE erase_1(BIF_ALIST_1)
+{
+ Eterm ret;
+ PD_CHECK(BIF_P->dictionary);
+ pd_hash_erase(BIF_P, BIF_ARG_1, &ret);
+ PD_CHECK(BIF_P->dictionary);
+ BIF_RET(ret);
+}
+
+/*
+ * BIF implementations
+ */
+static void pd_hash_erase(Process *p, Eterm id, Eterm *ret)
+{
+ unsigned int hval;
+ Eterm old;
+ Eterm tmp;
+ unsigned int range;
+
+ *ret = am_undefined;
+ if (p->dictionary == NULL) {
+ return;
+ }
+ hval = pd_hash_value(p->dictionary, id);
+ old = ARRAY_GET(p->dictionary, hval);
+ if (is_boxed(old)) { /* Tuple */
+ ASSERT(is_tuple(old));
+ if (EQ(tuple_val(old)[1], id)) {
+ array_put(&(p->dictionary), hval, NIL);
+ --(p->dictionary->numElements);
+ *ret = tuple_val(old)[2];
+ }
+ } else if (is_list(old)) {
+ /* Find cons cell for identical value */
+ Eterm* prev = &p->dictionary->data[hval];
+
+ for (tmp = *prev; tmp != NIL; prev = &TCDR(tmp), tmp = *prev) {
+ if (EQ(tuple_val(TCAR(tmp))[1], id)) {
+ *prev = TCDR(tmp);
+ *ret = tuple_val(TCAR(tmp))[2];
+ --(p->dictionary->numElements);
+ }
+ }
+
+ /* If there is only one element left in the list we must remove the list. */
+ old = ARRAY_GET(p->dictionary, hval);
+ ASSERT(is_list(old));
+ if (is_nil(TCDR(old))) {
+ array_put(&p->dictionary, hval, TCAR(old));
+ }
+ } else if (is_not_nil(old)) {
+#ifdef DEBUG
+ erts_fprintf(stderr,
+ "Process dictionary for process %T is broken, trying to "
+ "display term found in line %d:\n"
+ "%T\n", p->id, __LINE__, old);
+#endif
+ erl_exit(1, "Damaged process dictionary found during erase/1.");
+ }
+ if ((range = HASH_RANGE(p->dictionary)) > INITIAL_SIZE &&
+ range / 2 > (p->dictionary->numElements)) {
+ shrink(p, ret);
+ }
+}
+
+static void pd_hash_erase_all(Process *p)
+{
+ if (p->dictionary != NULL) {
+ PD_FREE(p->dictionary, PD_SZ2BYTES(p->dictionary->size));
+ p->dictionary = NULL;
+ }
+}
+
+Eterm erts_pd_hash_get(Process *p, Eterm id)
+{
+ unsigned int hval;
+ Eterm tmp;
+ ProcDict *pd = p->dictionary;
+
+ if (pd == NULL)
+ return am_undefined;
+ hval = pd_hash_value(pd, id);
+ tmp = ARRAY_GET(pd, hval);
+ if (is_boxed(tmp)) { /* Tuple */
+ ASSERT(is_tuple(tmp));
+ if (EQ(tuple_val(tmp)[1], id)) {
+ return tuple_val(tmp)[2];
+ }
+ } else if (is_list(tmp)) {
+ for (; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) {
+ ;
+ }
+ if (tmp != NIL) {
+ return tuple_val(TCAR(tmp))[2];
+ }
+ } else if (is_not_nil(tmp)) {
+#ifdef DEBUG
+ erts_fprintf(stderr,
+ "Process dictionary for process %T is broken, trying to "
+ "display term found in line %d:\n"
+ "%T\n", p->id, __LINE__, tmp);
+#endif
+ erl_exit(1, "Damaged process dictionary found during get/1.");
+ }
+ return am_undefined;
+}
+
+static Eterm pd_hash_get_keys(Process *p, Eterm value)
+{
+ Eterm *hp;
+ Eterm res = NIL;
+ ProcDict *pd = p->dictionary;
+ unsigned int i, num;
+ Eterm tmp, tmp2;
+
+ if (pd == NULL) {
+ return res;
+ }
+
+ num = HASH_RANGE(pd);
+ for (i = 0; i < num; ++i) {
+ tmp = ARRAY_GET(pd, i);
+ if (is_boxed(tmp)) {
+ ASSERT(is_tuple(tmp));
+ if (EQ(tuple_val(tmp)[2], value)) {
+ hp = HAlloc(p, 2);
+ res = CONS(hp, tuple_val(tmp)[1], res);
+ }
+ } else if (is_list(tmp)) {
+ while (tmp != NIL) {
+ tmp2 = TCAR(tmp);
+ if (EQ(tuple_val(tmp2)[2], value)) {
+ hp = HAlloc(p, 2);
+ res = CONS(hp, tuple_val(tmp2)[1], res);
+ }
+ tmp = TCDR(tmp);
+ }
+ }
+ }
+ return res;
+}
+
+
+static Eterm
+pd_hash_get_all(Process *p, ProcDict *pd)
+{
+ Eterm* hp;
+ Eterm res = NIL;
+ Eterm tmp, tmp2;
+ unsigned int i;
+ unsigned int num;
+
+ if (pd == NULL) {
+ return res;
+ }
+ num = HASH_RANGE(pd);
+ hp = HAlloc(p, pd->numElements * 2);
+
+ for (i = 0; i < num; ++i) {
+ tmp = ARRAY_GET(pd, i);
+ if (is_boxed(tmp)) {
+ ASSERT(is_tuple(tmp));
+ res = CONS(hp, tmp, res);
+ hp += 2;
+ } else if (is_list(tmp)) {
+ while (tmp != NIL) {
+ tmp2 = TCAR(tmp);
+ res = CONS(hp, tmp2, res);
+ hp += 2;
+ tmp = TCDR(tmp);
+ }
+ }
+ }
+ return res;
+}
+
+static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
+{
+ unsigned int hval;
+ Eterm *hp;
+ Eterm tpl;
+ Eterm old;
+ Eterm tmp;
+ int needed;
+ int i = 0;
+#ifdef DEBUG
+ Eterm *hp_limit;
+#endif
+
+ if (p->dictionary == NULL) {
+ /* Create it */
+ array_put(&(p->dictionary), INITIAL_SIZE - 1, NIL);
+ p->dictionary->homeSize = INITIAL_SIZE;
+ }
+ hval = pd_hash_value(p->dictionary, id);
+ old = ARRAY_GET(p->dictionary, hval);
+
+ /*
+ * Calculate the number of heap words needed and garbage
+ * collect if necessary. (Might be a slight overestimation.)
+ */
+ needed = 3; /* {Key,Value} tuple */
+ if (is_boxed(old)) {
+ /*
+ * We don't want to compare keys twice, so we'll always
+ * reserve the space for two CONS cells.
+ */
+ needed += 2+2;
+ } else if (is_list(old)) {
+ i = 0;
+ for (tmp = old; tmp != NIL && !EQ(tuple_val(TCAR(tmp))[1], id); tmp = TCDR(tmp)) {
+ ++i;
+ }
+ if (is_nil(tmp)) {
+ i = -1;
+ needed += 2;
+ } else {
+ needed += 2*(i+1);
+ }
+ }
+ if (HeapWordsLeft(p) < needed) {
+ Eterm root[3];
+ root[0] = id;
+ root[1] = value;
+ root[2] = old;
+ BUMP_REDS(p, erts_garbage_collect(p, needed, root, 3));
+ id = root[0];
+ value = root[1];
+ old = root[2];
+ }
+#ifdef DEBUG
+ hp_limit = p->htop + needed;
+#endif
+
+ /*
+ * Create the {Key,Value} tuple.
+ */
+ hp = HeapOnlyAlloc(p, 3);
+ tpl = TUPLE2(hp, id, value);
+
+ /*
+ * Update the dictionary.
+ */
+ if (is_nil(old)) {
+ array_put(&(p->dictionary), hval, tpl);
+ ++(p->dictionary->numElements);
+ } else if (is_boxed(old)) {
+ ASSERT(is_tuple(old));
+ if (EQ(tuple_val(old)[1],id)) {
+ array_put(&(p->dictionary), hval, tpl);
+ return tuple_val(old)[2];
+ } else {
+ hp = HeapOnlyAlloc(p, 4);
+ tmp = CONS(hp, old, NIL);
+ hp += 2;
+ ++(p->dictionary->numElements);
+ array_put(&(p->dictionary), hval, CONS(hp, tpl, tmp));
+ hp += 2;
+ ASSERT(hp <= hp_limit);
+ }
+ } else if (is_list(old)) {
+ if (i == -1) {
+ /*
+ * New key. Simply prepend the tuple to the beginning of the list.
+ */
+ hp = HeapOnlyAlloc(p, 2);
+ array_put(&(p->dictionary), hval, CONS(hp, tpl, old));
+ hp += 2;
+ ASSERT(hp <= hp_limit);
+ ++(p->dictionary->numElements);
+ } else {
+ /*
+ * i = Number of CDRs to skip to reach the changed element in the list.
+ *
+ * Replace old value in list. To avoid pointers from the old generation
+ * to the new, we must rebuild the list from the beginning up to and
+ * including the changed element.
+ */
+ Eterm nlist;
+ int j;
+
+ hp = HeapOnlyAlloc(p, (i+1)*2);
+
+ /* Find the list element to change. */
+ for (j = 0, nlist = old; j < i; j++, nlist = TCDR(nlist)) {
+ ;
+ }
+ ASSERT(EQ(tuple_val(TCAR(nlist))[1], id));
+ nlist = TCDR(nlist); /* Unchanged part of list. */
+
+ /* Rebuild list before the updated element. */
+ for (tmp = old; i-- > 0; tmp = TCDR(tmp)) {
+ nlist = CONS(hp, TCAR(tmp), nlist);
+ hp += 2;
+ }
+ ASSERT(EQ(tuple_val(TCAR(tmp))[1], id));
+
+ /* Put the updated element first in the new list. */
+ nlist = CONS(hp, tpl, nlist);
+ hp += 2;
+ ASSERT(hp <= hp_limit);
+ array_put(&(p->dictionary), hval, nlist);
+ return tuple_val(TCAR(tmp))[2];
+ }
+ } else {
+#ifdef DEBUG
+ erts_fprintf(stderr,
+ "Process dictionary for process %T is broken, trying to "
+ "display term found in line %d:\n"
+ "%T\n", p->id, __LINE__, old);
+#endif
+
+ erl_exit(1, "Damaged process dictionary found during put/2.");
+ }
+ if (HASH_RANGE(p->dictionary) <= p->dictionary->numElements) {
+ grow(p);
+ }
+ return am_undefined;
+}
+
+/*
+ * Hash table utilities, rehashing
+ */
+
+static void shrink(Process *p, Eterm* ret)
+{
+ unsigned int range = HASH_RANGE(p->dictionary);
+ unsigned int steps = (range*3) / 10;
+ Eterm hi, lo, tmp;
+ unsigned int i;
+ Eterm *hp;
+#ifdef DEBUG
+ Eterm *hp_limit;
+#endif
+
+ if (range - steps < INITIAL_SIZE) {
+ steps = range - INITIAL_SIZE;
+ }
+
+ for (i = 0; i < steps; ++i) {
+ ProcDict *pd = p->dictionary;
+ if (pd->splitPosition == 0) {
+ pd->homeSize /= 2;
+ pd->splitPosition = pd->homeSize;
+ }
+ --(pd->splitPosition);
+ hi = ARRAY_GET(pd, (pd->splitPosition + pd->homeSize));
+ lo = ARRAY_GET(pd, pd->splitPosition);
+ if (hi != NIL) {
+ if (lo == NIL) {
+ array_put(&(p->dictionary), pd->splitPosition, hi);
+ } else {
+ int needed = 4;
+ if (is_list(hi) && is_list(lo)) {
+ needed = 2*list_length(hi);
+ }
+ if (HeapWordsLeft(p) < needed) {
+ BUMP_REDS(p, erts_garbage_collect(p, needed, ret, 1));
+ hi = pd->data[(pd->splitPosition + pd->homeSize)];
+ lo = pd->data[pd->splitPosition];
+ }
+#ifdef DEBUG
+ hp_limit = p->htop + needed;
+#endif
+ if (is_tuple(lo)) {
+ if (is_tuple(hi)) {
+ hp = HeapOnlyAlloc(p, 4);
+ tmp = CONS(hp, hi, NIL);
+ hp += 2;
+ array_put(&(p->dictionary), pd->splitPosition,
+ CONS(hp,lo,tmp));
+ hp += 2;
+ ASSERT(hp <= hp_limit);
+ } else { /* hi is a list */
+ hp = HeapOnlyAlloc(p, 2);
+ array_put(&(p->dictionary), pd->splitPosition,
+ CONS(hp, lo, hi));
+ hp += 2;
+ ASSERT(hp <= hp_limit);
+ }
+ } else { /* lo is a list */
+ if (is_tuple(hi)) {
+ hp = HeapOnlyAlloc(p, 2);
+ array_put(&(p->dictionary), pd->splitPosition,
+ CONS(hp, hi, lo));
+ hp += 2;
+ ASSERT(hp <= hp_limit);
+
+ } else { /* Two lists */
+ hp = HeapOnlyAlloc(p, needed);
+ for (tmp = hi; tmp != NIL; tmp = TCDR(tmp)) {
+ lo = CONS(hp, TCAR(tmp), lo);
+ hp += 2;
+ }
+ ASSERT(hp <= hp_limit);
+ array_put(&(p->dictionary), pd->splitPosition, lo);
+ }
+ }
+ }
+ }
+ array_put(&(p->dictionary), (pd->splitPosition + pd->homeSize), NIL);
+ }
+ if (HASH_RANGE(p->dictionary) <= (p->dictionary->size / 4)) {
+ array_shrink(&(p->dictionary), (HASH_RANGE(p->dictionary) * 3) / 2);
+ }
+}
+
+static void grow(Process *p)
+{
+ unsigned int i,j;
+ unsigned int steps = p->dictionary->homeSize / 5;
+ Eterm l1,l2;
+ Eterm l;
+ Eterm *hp;
+ unsigned int pos;
+ unsigned int homeSize;
+ int needed = 0;
+ ProcDict *pd;
+#ifdef DEBUG
+ Eterm *hp_limit;
+#endif
+
+ HDEBUGF(("grow: steps = %d", steps));
+ if (steps == 0)
+ steps = 1;
+ /* Dont grow over MAX_HASH */
+ if ((MAX_HASH - steps) <= HASH_RANGE(p->dictionary)) {
+ return;
+ }
+
+ /*
+ * Calculate total number of heap words needed, and garbage collect
+ * if necessary.
+ */
+
+ pd = p->dictionary;
+ pos = pd->splitPosition;
+ homeSize = pd->homeSize;
+ for (i = 0; i < steps; ++i) {
+ if (pos == homeSize) {
+ homeSize *= 2;
+ pos = 0;
+ }
+ l = ARRAY_GET(pd, pos);
+ pos++;
+ if (is_not_tuple(l)) {
+ while (l != NIL) {
+ needed += 2;
+ l = TCDR(l);
+ }
+ }
+ }
+ if (HeapWordsLeft(p) < needed) {
+ BUMP_REDS(p, erts_garbage_collect(p, needed, 0, 0));
+ }
+#ifdef DEBUG
+ hp_limit = p->htop + needed;
+#endif
+
+ /*
+ * Now grow.
+ */
+
+ for (i = 0; i < steps; ++i) {
+ ProcDict *pd = p->dictionary;
+ if (pd->splitPosition == pd->homeSize) {
+ pd->homeSize *= 2;
+ pd->splitPosition = 0;
+ }
+ pos = pd->splitPosition;
+ ++pd->splitPosition; /* For the hashes */
+ l = ARRAY_GET(pd, pos);
+ if (is_tuple(l)) {
+ if (pd_hash_value(pd, tuple_val(l)[1]) != pos) {
+ array_put(&(p->dictionary), pos +
+ p->dictionary->homeSize, l);
+ array_put(&(p->dictionary), pos, NIL);
+ }
+ } else {
+ l2 = NIL;
+ l1 = l;
+ for (j = 0; l1 != NIL; l1 = TCDR(l1))
+ j += 2;
+ hp = HeapOnlyAlloc(p, j);
+
+ while (l != NIL) {
+ if (pd_hash_value(pd, tuple_val(TCAR(l))[1]) == pos)
+ l1 = CONS(hp, TCAR(l), l1);
+ else
+ l2 = CONS(hp, TCAR(l), l2);
+ hp += 2;
+ l = TCDR(l);
+ }
+ if (l1 != NIL && TCDR(l1) == NIL)
+ l1 = TCAR(l1);
+ if (l2 != NIL && TCDR(l2) == NIL)
+ l2 = TCAR(l2);
+ ASSERT(hp <= hp_limit);
+ /* After array_put pd is no longer valid */
+ array_put(&(p->dictionary), pos, l1);
+ array_put(&(p->dictionary), pos +
+ p->dictionary->homeSize, l2);
+ }
+ }
+
+#ifdef HARDDEBUG
+ dictionary_dump(p->dictionary,CERR);
+#endif
+}
+
+/*
+** Array oriented operations
+*/
+
+static void array_shrink(ProcDict **ppd, unsigned int need)
+{
+ unsigned int siz = next_array_size(need);
+
+ HDEBUGF(("array_shrink: size = %d, used = %d, need = %d",
+ (*ppd)->size, (*ppd)->used, need));
+
+ if (siz > (*ppd)->size)
+ return; /* Only shrink */
+
+ *ppd = PD_REALLOC(((void *) *ppd),
+ PD_SZ2BYTES((*ppd)->size),
+ PD_SZ2BYTES(siz));
+
+ (*ppd)->size = siz;
+ if ((*ppd)->size < (*ppd)->used)
+ (*ppd)->used = (*ppd)->size;
+}
+
+
+static Eterm array_put(ProcDict **ppdict, unsigned int ndx, Eterm term)
+{
+ unsigned int i;
+ Eterm ret;
+ if (*ppdict == NULL) {
+ Uint siz = next_array_size(ndx+1);
+ ProcDict *p;
+
+ p = PD_ALLOC(PD_SZ2BYTES(siz));
+ for (i = 0; i < siz; ++i)
+ p->data[i] = NIL;
+ p->size = siz;
+ p->homeSize = p->splitPosition = p->numElements = p->used = 0;
+ *ppdict = p;
+ } else if (ndx >= (*ppdict)->size) {
+ Uint osize = (*ppdict)->size;
+ Uint nsize = next_array_size(ndx+1);
+ *ppdict = PD_REALLOC(((void *) *ppdict),
+ PD_SZ2BYTES(osize),
+ PD_SZ2BYTES(nsize));
+ for (i = osize; i < nsize; ++i)
+ (*ppdict)->data[i] = NIL;
+ (*ppdict)->size = nsize;
+ }
+ ret = (*ppdict)->data[ndx];
+ (*ppdict)->data[ndx] = term;
+ if ((ndx + 1) > (*ppdict)->used)
+ (*ppdict)->used = ndx + 1;
+#ifdef HARDDEBUG
+ HDEBUGF(("array_put: (*ppdict)->size = %d, (*ppdict)->used = %d, ndx = %d",
+ (*ppdict)->size, (*ppdict)->used, ndx));
+ erts_fprintf(stderr, "%T", term);
+#endif /* HARDDEBUG */
+ return ret;
+}
+
+/*
+** Basic utilities
+*/
+
+static unsigned int pd_hash_value(ProcDict *pdict, Eterm term)
+{
+ Uint hash, high;
+
+ hash = MAKE_HASH(term);
+ high = hash % (pdict->homeSize*2);
+ if (high >= HASH_RANGE(pdict))
+ return hash % pdict->homeSize;
+ return high;
+}
+
+static unsigned int next_array_size(unsigned int need)
+{
+ static unsigned int tab[] =
+ {
+ 10UL,
+ 20UL,
+ 40UL,
+ 80UL,
+ 160UL,
+ 320UL,
+ 640UL,
+ 1280UL,
+ 2560UL,
+ 5120UL,
+ 10240UL,
+ 20480UL,
+ 40960UL,
+ 81920UL,
+ 163840UL,
+ 327680UL,
+ 655360UL,
+ 1310720UL,
+ 2621440UL,
+ 5242880UL,
+ 10485760UL,
+ 20971520UL,
+ 41943040UL,
+ 83886080UL,
+ 167772160UL,
+ 335544320UL,
+ 671088640UL,
+ 1342177280UL,
+ 2684354560UL
+ };
+ int hi = sizeof(tab) / sizeof(Uint) - 1;
+ int lo = 1;
+ int cur = 4;
+
+ while (hi >= lo) {
+ if (tab[cur] >= need && tab[cur - 1] < need)
+ return tab[cur];
+ if (tab[cur] > need)
+ hi = cur - 1;
+ else
+ lo = cur + 1;
+ cur = (hi + lo) / 2;
+ }
+ return need;
+}
+
+
+/*
+** Debug functions
+*/
+#ifdef DEBUG
+
+static void pd_check(ProcDict *pd)
+{
+ unsigned int i;
+ Uint num;
+ if (pd == NULL)
+ return;
+ ASSERT(pd->size >= pd->used);
+ ASSERT(HASH_RANGE(pd) <= MAX_HASH);
+ for (i = 0, num = 0; i < pd->used; ++i) {
+ Eterm t = pd->data[i];
+ if (is_nil(t)) {
+ continue;
+ } else if (is_tuple(t)) {
+ ++num;
+ ASSERT(arityval(*tuple_val(t)) == 2);
+ continue;
+ } else if (is_list(t)) {
+ while (t != NIL) {
+ ++num;
+ ASSERT(is_tuple(TCAR(t)));
+ ASSERT(arityval(*(tuple_val(TCAR(t)))) == 2);
+ t = TCDR(t);
+ }
+ continue;
+ } else {
+ erl_exit(1,
+ "Found tag 0x%08x in process dictionary at position %d",
+ (unsigned long) t, (int) i);
+ }
+ }
+ ASSERT(num == pd->numElements);
+ ASSERT(pd->splitPosition <= pd->homeSize);
+}
+
+#endif /* DEBUG */
+
+
+#ifdef HARDDEBUG
+
+static int hdebugf(char *format, ...)
+{
+ va_list ap;
+
+ erts_fprintf(stderr, "DEBUG: %s:%d :", hdebugf_file, hdebugf_line);
+ va_start(ap, format);
+ erts_vfprintf(stderr, format, ap);
+ va_end(ap);
+ erts_fprintf(stderr, "\n");
+ return 0;
+}
+
+#endif /* HARDDEBUG */
+
diff --git a/erts/emulator/beam/erl_process_dict.h b/erts/emulator/beam/erl_process_dict.h
new file mode 100644
index 0000000000..8fad2a67ab
--- /dev/null
+++ b/erts/emulator/beam/erl_process_dict.h
@@ -0,0 +1,42 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef _ERL_PROCESS_DICT_H
+#define _ERL_PROCESS_DICT_H
+#include "sys.h"
+
+typedef struct proc_dict {
+ unsigned int size;
+ unsigned int used;
+ unsigned int homeSize;
+ unsigned int splitPosition;
+ Uint numElements;
+ Eterm data[1]; /* The beginning of an array of erlang terms */
+} ProcDict;
+
+Uint erts_dicts_mem_size(struct process *p);
+void erts_erase_dicts(struct process *p);
+void erts_dictionary_dump(int to, void *to_arg, ProcDict *pd);
+void erts_deep_dictionary_dump(int to, void *to_arg,
+ ProcDict* pd, void (*cb)(int, void *, Eterm obj));
+Eterm erts_dictionary_copy(struct process *p, ProcDict *pd);
+
+Eterm erts_pd_hash_get(struct process *p, Eterm id);
+
+#endif
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
new file mode 100644
index 0000000000..1666509c72
--- /dev/null
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -0,0 +1,454 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "erl_db.h"
+#include "dist.h"
+#include "beam_catches.h"
+#include "erl_binary.h"
+#define ERTS_WANT_EXTERNAL_TAGS
+#include "external.h"
+
+#define WORD_FMT "%X"
+#define ADDR_FMT "%X"
+
+#define OUR_NIL _make_header(0,_TAG_HEADER_FLOAT)
+
+static void dump_process_info(int to, void *to_arg, Process *p);
+static void dump_element(int to, void *to_arg, Eterm x);
+static void dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep);
+static void dump_element_nl(int to, void *to_arg, Eterm x);
+static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
+ int yreg);
+static void print_function_from_pc(int to, void *to_arg, Eterm* x);
+static void heap_dump(int to, void *to_arg, Eterm x);
+static void dump_binaries(int to, void *to_arg, Binary* root);
+static void dump_externally(int to, void *to_arg, Eterm term);
+
+static Binary* all_binaries;
+
+extern Eterm beam_apply[];
+extern Eterm beam_exit[];
+extern Eterm beam_continue_exit[];
+
+
+void
+erts_deep_process_dump(int to, void *to_arg)
+{
+ int i;
+
+ all_binaries = NULL;
+
+ for (i = 0; i < erts_max_processes; i++) {
+ if ((process_tab[i] != NULL) && (process_tab[i]->i != ENULL)) {
+ if (process_tab[i]->status != P_EXITING) {
+ Process* p = process_tab[i];
+
+ if (p->status != P_GARBING) {
+ dump_process_info(to, to_arg, p);
+ }
+ }
+ }
+ }
+
+ dump_binaries(to, to_arg, all_binaries);
+}
+
+static void
+dump_process_info(int to, void *to_arg, Process *p)
+{
+ Eterm* sp;
+ ErlMessage* mp;
+ int yreg = -1;
+
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
+
+ if ((p->trace_flags & F_SENSITIVE) == 0 && p->msg.first) {
+ erts_print(to, to_arg, "=proc_messages:%T\n", p->id);
+ for (mp = p->msg.first; mp != NULL; mp = mp->next) {
+ Eterm mesg = ERL_MESSAGE_TERM(mp);
+ if (is_value(mesg))
+ dump_element(to, to_arg, mesg);
+ else
+ dump_dist_ext(to, to_arg, mp->data.dist_ext);
+ mesg = ERL_MESSAGE_TOKEN(mp);
+ erts_print(to, to_arg, ":");
+ dump_element(to, to_arg, mesg);
+ erts_print(to, to_arg, "\n");
+ }
+ }
+
+ if ((p->trace_flags & F_SENSITIVE) == 0) {
+ if (p->dictionary) {
+ erts_print(to, to_arg, "=proc_dictionary:%T\n", p->id);
+ erts_deep_dictionary_dump(to, to_arg,
+ p->dictionary, dump_element_nl);
+ }
+ }
+
+ if ((p->trace_flags & F_SENSITIVE) == 0) {
+ erts_print(to, to_arg, "=proc_stack:%T\n", p->id);
+ for (sp = p->stop; sp < STACK_START(p); sp++) {
+ yreg = stack_element_dump(to, to_arg, p, sp, yreg);
+ }
+
+ erts_print(to, to_arg, "=proc_heap:%T\n", p->id);
+ for (sp = p->stop; sp < STACK_START(p); sp++) {
+ Eterm term = *sp;
+
+ if (!is_catch(term) && !is_CP(term)) {
+ heap_dump(to, to_arg, term);
+ }
+ }
+ for (mp = p->msg.first; mp != NULL; mp = mp->next) {
+ Eterm mesg = ERL_MESSAGE_TERM(mp);
+ if (is_value(mesg))
+ heap_dump(to, to_arg, mesg);
+ mesg = ERL_MESSAGE_TOKEN(mp);
+ heap_dump(to, to_arg, mesg);
+ }
+ if (p->dictionary) {
+ erts_deep_dictionary_dump(to, to_arg, p->dictionary, heap_dump);
+ }
+ }
+}
+
+static void
+dump_dist_ext(int to, void *to_arg, ErtsDistExternal *edep)
+{
+ if (!edep)
+ erts_print(to, to_arg, "D0:E0:");
+ else {
+ byte *e;
+ size_t sz;
+ if (!(edep->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB))
+ erts_print(to, to_arg, "D0:");
+ else {
+ int i;
+ erts_print(to, to_arg, "D%X:", edep->attab.size);
+ for (i = 0; i < edep->attab.size; i++)
+ dump_element(to, to_arg, edep->attab.atom[i]);
+ }
+ sz = edep->ext_endp - edep->extp;
+ e = edep->extp;
+ if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) {
+ ASSERT(*e != VERSION_MAGIC);
+ sz++;
+ }
+ else {
+ ASSERT(*e == VERSION_MAGIC);
+ }
+
+ erts_print(to, to_arg, "E%X:", sz);
+ if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR)
+ erts_print(to, to_arg, "%02X", VERSION_MAGIC);
+ while (e < edep->ext_endp)
+ erts_print(to, to_arg, "%02X", *e++);
+ }
+}
+
+static void
+dump_element(int to, void *to_arg, Eterm x)
+{
+ if (is_list(x)) {
+ erts_print(to, to_arg, "H" WORD_FMT, list_val(x));
+ } else if (is_boxed(x)) {
+ erts_print(to, to_arg, "H" WORD_FMT, boxed_val(x));
+ } else if (is_immed(x)) {
+ if (is_atom(x)) {
+ unsigned char* s = atom_tab(atom_val(x))->name;
+ int len = atom_tab(atom_val(x))->len;
+ int i;
+
+ erts_print(to, to_arg, "A%X:", atom_tab(atom_val(x))->len);
+ for (i = 0; i < len; i++) {
+ erts_putc(to, to_arg, *s++);
+ }
+ } else if (is_small(x)) {
+ erts_print(to, to_arg, "I%T", x);
+ } else if (is_pid(x)) {
+ erts_print(to, to_arg, "P%T", x);
+ } else if (is_port(x)) {
+ erts_print(to, to_arg, "p<%bpu.%bpu>",
+ port_channel_no(x), port_number(x));
+ } else if (is_nil(x)) {
+ erts_putc(to, to_arg, 'N');
+ }
+ }
+}
+
+static void
+dump_element_nl(int to, void *to_arg, Eterm x)
+{
+ dump_element(to, to_arg, x);
+ erts_putc(to, to_arg, '\n');
+}
+
+
+static int
+stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
+{
+ Eterm x = *sp;
+
+ if (yreg < 0 || is_CP(x)) {
+ erts_print(to, to_arg, "%p:", sp);
+ } else {
+ erts_print(to, to_arg, "y%d:", yreg);
+ yreg++;
+ }
+
+ if (is_CP(x)) {
+ erts_print(to, to_arg, "SReturn addr 0x%X (", (Eterm *) x);
+ print_function_from_pc(to, to_arg, cp_val(x));
+ erts_print(to, to_arg, ")\n");
+ yreg = 0;
+ } else if is_catch(x) {
+ erts_print(to, to_arg, "SCatch 0x%X (", catch_pc(x));
+ print_function_from_pc(to, to_arg, catch_pc(x));
+ erts_print(to, to_arg, ")\n");
+ } else {
+ dump_element(to, to_arg, x);
+ erts_putc(to, to_arg, '\n');
+ }
+ return yreg;
+}
+
+static void
+print_function_from_pc(int to, void *to_arg, Eterm* x)
+{
+ Eterm* addr = find_function_from_pc(x);
+ if (addr == NULL) {
+ if (x == beam_exit) {
+ erts_print(to, to_arg, "<terminate process>");
+ } else if (x == beam_continue_exit) {
+ erts_print(to, to_arg, "<continue terminate process>");
+ } else if (x == beam_apply+1) {
+ erts_print(to, to_arg, "<terminate process normally>");
+ } else {
+ erts_print(to, to_arg, "unknown function");
+ }
+ } else {
+ erts_print(to, to_arg, "%T:%T/%bpu + %bpu",
+ addr[0], addr[1], addr[2], ((x-addr)-2) * sizeof(Eterm));
+ }
+}
+
+static void
+heap_dump(int to, void *to_arg, Eterm x)
+{
+ Eterm* ptr;
+ Eterm last = OUR_NIL;
+ Eterm* next = &last;
+
+ if (is_immed(x) || is_CP(x)) {
+ return;
+ }
+
+ again:
+ if (x == OUR_NIL) { /* We are done. */
+ return;
+ } if (is_CP(x)) {
+ next = (Eterm *) x;
+ } else if (is_list(x)) {
+ ptr = list_val(x);
+ if (ptr[0] != OUR_NIL) {
+ erts_print(to, to_arg, ADDR_FMT ":l", ptr);
+ dump_element(to, to_arg, ptr[0]);
+ erts_putc(to, to_arg, '|');
+ dump_element(to, to_arg, ptr[1]);
+ erts_putc(to, to_arg, '\n');
+ if (is_immed(ptr[1])) {
+ ptr[1] = make_small(0);
+ }
+ x = ptr[0];
+ ptr[0] = (Eterm) next;
+ next = ptr + 1;
+ goto again;
+ }
+ } else if (is_boxed(x)) {
+ Eterm hdr;
+
+ ptr = boxed_val(x);
+ hdr = *ptr;
+ if (hdr != OUR_NIL) { /* If not visited */
+ erts_print(to, to_arg, ADDR_FMT ":", ptr);
+ if (is_arity_value(hdr)) {
+ Uint i;
+ Uint arity = arityval(hdr);
+
+ erts_print(to, to_arg, "t" WORD_FMT ":", arity);
+ for (i = 1; i <= arity; i++) {
+ dump_element(to, to_arg, ptr[i]);
+ if (is_immed(ptr[i])) {
+ ptr[i] = make_small(0);
+ }
+ if (i < arity) {
+ erts_putc(to, to_arg, ',');
+ }
+ }
+ erts_putc(to, to_arg, '\n');
+ if (arity == 0) {
+ ptr[0] = OUR_NIL;
+ } else {
+ x = ptr[arity];
+ ptr[0] = (Eterm) next;
+ next = ptr + arity - 1;
+ goto again;
+ }
+ } else if (hdr == HEADER_FLONUM) {
+ FloatDef f;
+ char sbuf[31];
+ int i;
+
+ GET_DOUBLE_DATA((ptr+1), f);
+ i = sys_double_to_chars(f.fd, (char*) sbuf);
+ sys_memset(sbuf+i, 0, 31-i);
+ erts_print(to, to_arg, "F%X:%s\n", i, sbuf);
+ *ptr = OUR_NIL;
+ } else if (_is_bignum_header(hdr)) {
+ erts_print(to, to_arg, "B%T\n", x);
+ *ptr = OUR_NIL;
+ } else if (is_binary_header(hdr)) {
+ Uint tag = thing_subtag(hdr);
+ Uint size = binary_size(x);
+ Uint i;
+
+ if (tag == HEAP_BINARY_SUBTAG) {
+ byte* p;
+
+ erts_print(to, to_arg, "Yh%X:", size);
+ p = binary_bytes(x);
+ for (i = 0; i < size; i++) {
+ erts_print(to, to_arg, "%02X", p[i]);
+ }
+ } else if (tag == REFC_BINARY_SUBTAG) {
+ ProcBin* pb = (ProcBin *) binary_val(x);
+ Binary* val = pb->val;
+
+ if (erts_smp_atomic_xchg(&val->refc, 0) != 0) {
+ val->flags = (Uint) all_binaries;
+ all_binaries = val;
+ }
+ erts_print(to, to_arg, "Yc%X:%X:%X", val,
+ pb->bytes - (byte *)val->orig_bytes,
+ size);
+ } else if (tag == SUB_BINARY_SUBTAG) {
+ ErlSubBin* Sb = (ErlSubBin *) binary_val(x);
+ Eterm* real_bin = binary_val(Sb->orig);
+ void* val;
+
+ if (thing_subtag(*real_bin) == REFC_BINARY_SUBTAG) {
+ ProcBin* pb = (ProcBin *) real_bin;
+ val = pb->val;
+ } else { /* Heap binary */
+ val = real_bin;
+ }
+ erts_print(to, to_arg, "Ys%X:%X:%X", val, Sb->offs, size);
+ }
+ erts_putc(to, to_arg, '\n');
+ *ptr = OUR_NIL;
+ } else if (is_external_pid_header(hdr)) {
+ erts_print(to, to_arg, "P%T\n", x);
+ *ptr = OUR_NIL;
+ } else if (is_external_port_header(hdr)) {
+ erts_print(to, to_arg, "p<%bpu.%bpu>\n",
+ port_channel_no(x), port_number(x));
+ *ptr = OUR_NIL;
+ } else {
+ /*
+ * All other we dump in the external term format.
+ */
+ dump_externally(to, to_arg, x);
+ erts_putc(to, to_arg, '\n');
+ *ptr = OUR_NIL;
+ }
+ }
+ }
+
+ x = *next;
+ *next = OUR_NIL;
+ next--;
+ goto again;
+}
+
+static void
+dump_binaries(int to, void *to_arg, Binary* current)
+{
+ while (current) {
+ long i;
+ long size = current->orig_size;
+ byte* bytes = (byte*) current->orig_bytes;
+
+ erts_print(to, to_arg, "=binary:%X\n", current);
+ erts_print(to, to_arg, "%X:", size);
+ for (i = 0; i < size; i++) {
+ erts_print(to, to_arg, "%02X", bytes[i]);
+ }
+ erts_putc(to, to_arg, '\n');
+ current = (Binary *) current->flags;
+ }
+}
+
+static void
+dump_externally(int to, void *to_arg, Eterm term)
+{
+ byte sbuf[1024]; /* encode and hope for the best ... */
+ byte* s;
+ byte* p;
+
+ if (is_fun(term)) {
+ /*
+ * The fun's environment used to cause trouble. There were
+ * two kind of problems:
+ *
+ * 1. A term used in the environment could already have been
+ * dumped and thus destroyed (since dumping is destructive).
+ *
+ * 2. A term in the environment could be too big, so that
+ * the buffer for external format overflowed (allocating
+ * memory is not really a solution, as it could be exhausted).
+ *
+ * Simple solution: Set all variables in the environment to NIL.
+ * The crashdump_viewer does not allow inspection of them anyway.
+ */
+ ErlFunThing* funp = (ErlFunThing *) fun_val(term);
+ Uint num_free = funp->num_free;
+ Uint i;
+
+ for (i = 0; i < num_free; i++) {
+ funp->env[i] = NIL;
+ }
+ }
+
+ s = p = sbuf;
+ erts_encode_ext(term, &p);
+ erts_print(to, to_arg, "E%X:", p-s);
+ while (s < p) {
+ erts_print(to, to_arg, "%02X", *s++);
+ }
+}
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
new file mode 100644
index 0000000000..52440fb635
--- /dev/null
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -0,0 +1,1431 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2007-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description: Impementation of Erlang process locks.
+ *
+ * Author: Rickard Green
+ */
+
+/*
+ * A short explanation of the process lock implementation:
+ * Each process has a lock bitfield and a number of lock wait
+ * queues.
+ * The bit field contains of a number of lock flags (L1, L2, ...)
+ * and a number of wait flags (W1, W2, ...). Each lock flag has a
+ * corresponding wait flag. The bit field isn't guarranteed to be
+ * larger than 32-bits which sets a maximum of 16 different locks
+ * per process. Currently, only 4 locks per process are used. The
+ * bit field is operated on by use of atomic operations (custom
+ * made bitwise atomic operations). When a lock is locked the
+ * corresponding lock bit is set. When a thread is waiting on a
+ * lock the wait flag for the lock is set.
+ * The process table is protected by pix (process index) locks
+ * which is spinlocks that protects a number of process indices in
+ * the process table. The pix locks also protects the lock queues
+ * and modifications of wait flags.
+ * When acquiring a process lock we first try to set the lock
+ * flag. If we are able to set the lock flag and the wait flag
+ * isn't set we are done. If the lock flag was already set we
+ * have to acquire the pix lock, set the wait flag, and put
+ * ourselves in the wait queue.
+ * Process locks will always be acquired in fifo order.
+ * When releasing a process lock we first unset all lock flags
+ * whose corresponding wait flag is clear (which will succeed).
+ * If wait flags were set for the locks being released, we acquire
+ * the pix lock, and transfer the lock to the first thread
+ * in the wait queue.
+ * Note that wait flags may be read without the pix lock, but
+ * it is important that wait flags only are modified when the pix
+ * lock is held.
+ * This implementation assumes that erts_smp_atomic_or_retold()
+ * provides necessary memorybarriers for a lock operation, and that
+ * erts_smp_atomic_and_retold() provides necessary memorybarriers
+ * for an unlock operation.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_process.h"
+
+const Process erts_proc_lock_busy;
+
+#ifdef ERTS_SMP
+
+/*#define ERTS_PROC_LOCK_SPIN_ON_GATE*/
+#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 16000
+#define ERTS_PROC_LOCK_SPIN_COUNT_BASE 1000
+
+#ifdef ERTS_PROC_LOCK_DEBUG
+#define ERTS_PROC_LOCK_HARD_DEBUG
+#endif
+
+#ifdef ERTS_PROC_LOCK_HARD_DEBUG
+static void check_queue(erts_proc_lock_t *lck);
+#endif
+
+
+typedef struct erts_proc_lock_waiter_t_ erts_proc_lock_waiter_t;
+struct erts_proc_lock_waiter_t_ {
+ erts_proc_lock_waiter_t *next;
+ erts_proc_lock_waiter_t *prev;
+ ErtsProcLocks wait_locks;
+ erts_smp_gate_t gate;
+ erts_proc_lock_queues_t *queues;
+};
+
+struct erts_proc_lock_queues_t_ {
+ erts_proc_lock_queues_t *next;
+ erts_proc_lock_waiter_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
+};
+
+struct erts_proc_lock_thr_spec_data_t_ {
+ erts_proc_lock_queues_t *qs;
+ erts_proc_lock_waiter_t *wtr;
+};
+
+static erts_proc_lock_queues_t zeroqs = {0};
+
+static erts_smp_spinlock_t wtr_lock;
+static erts_proc_lock_waiter_t *waiter_free_list;
+static erts_proc_lock_queues_t *queue_free_list;
+static erts_tsd_key_t waiter_key;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+static struct {
+ Sint16 proc_lock_main;
+ Sint16 proc_lock_link;
+ Sint16 proc_lock_msgq;
+ Sint16 proc_lock_status;
+} lc_id;
+#endif
+
+erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
+
+static int proc_lock_spin_count;
+static int proc_lock_trans_spin_cost;
+
+static void cleanup_waiter(void);
+
+void
+erts_init_proc_lock(void)
+{
+ int i;
+ int cpus;
+ erts_smp_spinlock_init(&wtr_lock, "proc_lck_wtr_alloc");
+ for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
+#if ERTS_PROC_LOCK_MUTEX_IMPL
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_smp_mtx_init_x(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i));
+#else
+ erts_smp_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
+#endif
+#else
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck, "pix_lock", make_small(i));
+#else
+ erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
+#endif
+#endif
+ }
+ waiter_free_list = NULL;
+ queue_free_list = NULL;
+ erts_tsd_key_create(&waiter_key);
+ erts_thr_install_exit_handler(cleanup_waiter);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
+ lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
+ lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
+ lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
+#endif
+ cpus = erts_get_cpu_configured(erts_cpuinfo);
+ if (cpus > 1)
+ proc_lock_spin_count = (ERTS_PROC_LOCK_SPIN_COUNT_BASE
+ * ((int) erts_no_schedulers));
+ else if (cpus == 1)
+ proc_lock_spin_count = 0;
+ else /* No of cpus unknown. Assume multi proc, but be conservative. */
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE;
+ if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
+ proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
+ proc_lock_trans_spin_cost = proc_lock_spin_count/20;
+}
+
+static ERTS_INLINE erts_proc_lock_waiter_t *
+alloc_wtr(void)
+{
+ erts_proc_lock_waiter_t *wtr;
+ erts_smp_spin_lock(&wtr_lock);
+ wtr = waiter_free_list;
+ if (wtr) {
+ waiter_free_list = wtr->next;
+ ERTS_LC_ASSERT(queue_free_list);
+ wtr->queues = queue_free_list;
+ queue_free_list = wtr->queues->next;
+ erts_smp_spin_unlock(&wtr_lock);
+ }
+ else {
+ erts_smp_spin_unlock(&wtr_lock);
+ wtr = erts_alloc(ERTS_ALC_T_PROC_LCK_WTR,
+ sizeof(erts_proc_lock_waiter_t));
+ erts_smp_gate_init(&wtr->gate);
+ wtr->wait_locks = (ErtsProcLocks) 0;
+ wtr->queues = erts_alloc(ERTS_ALC_T_PROC_LCK_QS,
+ sizeof(erts_proc_lock_queues_t));
+ sys_memcpy((void *) wtr->queues,
+ (void *) &zeroqs,
+ sizeof(erts_proc_lock_queues_t));
+ }
+ return wtr;
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+static void
+check_unused_waiter(erts_proc_lock_waiter_t *wtr)
+{
+ int i;
+ ERTS_LC_ASSERT(wtr->wait_locks == 0);
+ for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
+ ERTS_LC_ASSERT(!wtr->queues->queue[i]);
+}
+#define CHECK_UNUSED_WAITER(W) check_unused_waiter((W))
+#else
+#define CHECK_UNUSED_WAITER(W)
+#endif
+
+
+static ERTS_INLINE void
+free_wtr(erts_proc_lock_waiter_t *wtr)
+{
+ CHECK_UNUSED_WAITER(wtr);
+ erts_smp_spin_lock(&wtr_lock);
+ wtr->next = waiter_free_list;
+ waiter_free_list = wtr;
+ wtr->queues->next = queue_free_list;
+ queue_free_list = wtr->queues;
+ erts_smp_spin_unlock(&wtr_lock);
+}
+
+void
+erts_proc_lock_prepare_proc_lock_waiter(void)
+{
+ erts_tsd_set(waiter_key, (void *) alloc_wtr());
+}
+
+
+static void
+cleanup_waiter(void)
+{
+ erts_proc_lock_waiter_t *wtr = erts_tsd_get(waiter_key);
+ if (wtr)
+ free_wtr(wtr);
+}
+
+
+/*
+ * Waiters are queued in a circular double linked list;
+ * where qs->queue[lock_ix] is the first waiter in queue, and
+ * qs->queue[lock_ix]->prev is the last waiter in queue.
+ */
+
+static ERTS_INLINE void
+enqueue_waiter(erts_proc_lock_queues_t *qs,
+ int ix,
+ erts_proc_lock_waiter_t *wtr)
+{
+ if (!qs->queue[ix]) {
+ qs->queue[ix] = wtr;
+ wtr->next = wtr;
+ wtr->prev = wtr;
+ }
+ else {
+ ERTS_LC_ASSERT(qs->queue[ix]->next && qs->queue[ix]->prev);
+ wtr->next = qs->queue[ix];
+ wtr->prev = qs->queue[ix]->prev;
+ wtr->prev->next = wtr;
+ qs->queue[ix]->prev = wtr;
+ }
+}
+
+static erts_proc_lock_waiter_t *
+dequeue_waiter(erts_proc_lock_queues_t *qs, int ix)
+{
+ erts_proc_lock_waiter_t *wtr = qs->queue[ix];
+ ERTS_LC_ASSERT(qs->queue[ix]);
+ if (wtr->next == wtr) {
+ ERTS_LC_ASSERT(qs->queue[ix]->prev == wtr);
+ qs->queue[ix] = NULL;
+ }
+ else {
+ ERTS_LC_ASSERT(wtr->next != wtr);
+ ERTS_LC_ASSERT(wtr->prev != wtr);
+ wtr->next->prev = wtr->prev;
+ wtr->prev->next = wtr->next;
+ qs->queue[ix] = wtr->next;
+ }
+ return wtr;
+}
+
+/*
+ * Tries to aquire as many locks as possible in lock order,
+ * and sets the wait flag on the first lock not possible to
+ * aquire.
+ *
+ * Note: We need the pix lock during this operation. Wait
+ * flags are only allowed to be manipulated under pix
+ * lock.
+ */
+static ERTS_INLINE void
+try_aquire(erts_proc_lock_t *lck, erts_proc_lock_waiter_t *wtr)
+{
+ ErtsProcLocks got_locks = (ErtsProcLocks) 0;
+ ErtsProcLocks locks = wtr->wait_locks;
+ int lock_no;
+
+ ERTS_LC_ASSERT(lck->queues);
+ ERTS_LC_ASSERT(got_locks != locks);
+
+ for (lock_no = 0; lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) {
+ ErtsProcLocks lock = ((ErtsProcLocks) 1) << lock_no;
+ if (locks & lock) {
+ ErtsProcLocks wflg, old_lflgs;
+ if (lck->queues->queue[lock_no]) {
+ /* Others already waiting */
+ enqueue:
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_FLGS_READ_(lck)
+ & (lock << ERTS_PROC_LOCK_WAITER_SHIFT));
+ enqueue_waiter(lck->queues, lock_no, wtr);
+ break;
+ }
+ wflg = lock << ERTS_PROC_LOCK_WAITER_SHIFT;
+ old_lflgs = ERTS_PROC_LOCK_FLGS_BOR_(lck, wflg | lock);
+ if (old_lflgs & lock) {
+ /* Didn't get the lock */
+ goto enqueue;
+ }
+ else {
+ /* Got the lock */
+ got_locks |= lock;
+ ERTS_LC_ASSERT(!(old_lflgs & wflg));
+ /* No one else can be waiting for the lock; remove wait flag */
+ (void) ERTS_PROC_LOCK_FLGS_BAND_(lck, ~wflg);
+ if (got_locks == locks)
+ break;
+ }
+ }
+ }
+
+ wtr->wait_locks &= ~got_locks;
+}
+
+/*
+ * Transfer 'trnsfr_lcks' held by this executing thread to other
+ * threads waiting for the locks. When a lock has been transferred
+ * we also have to try to aquire as many lock as possible for the
+ * other thread.
+ */
+static int
+transfer_locks(Process *p,
+ ErtsProcLocks trnsfr_lcks,
+ erts_pix_lock_t *pix_lock,
+ int unlock)
+{
+ int transferred = 0;
+ erts_proc_lock_waiter_t *wake = NULL;
+ erts_proc_lock_waiter_t *wtr;
+ ErtsProcLocks unset_waiter = 0;
+ ErtsProcLocks tlocks = trnsfr_lcks;
+ int lock_no;
+
+ ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
+
+#ifdef ERTS_PROC_LOCK_HARD_DEBUG
+ check_queue(&p->lock);
+#endif
+
+ for (lock_no = 0; tlocks && lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) {
+ ErtsProcLocks lock = ((ErtsProcLocks) 1) << lock_no;
+ if (tlocks & lock) {
+ erts_proc_lock_queues_t *qs = p->lock.queues;
+ /* Transfer lock */
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ tlocks &= ~lock;
+#endif
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_FLGS_READ_(&p->lock)
+ & (lock << ERTS_PROC_LOCK_WAITER_SHIFT));
+ transferred++;
+ wtr = dequeue_waiter(qs, lock_no);
+ ERTS_LC_ASSERT(wtr);
+ if (!qs->queue[lock_no])
+ unset_waiter |= lock;
+ ERTS_LC_ASSERT(wtr->wait_locks & lock);
+ wtr->wait_locks &= ~lock;
+ if (wtr->wait_locks)
+ try_aquire(&p->lock, wtr);
+ if (!wtr->wait_locks) {
+ /*
+ * The other thread got all locks it needs;
+ * need to wake it up.
+ */
+ wtr->next = wake;
+ wake = wtr;
+ }
+ }
+
+ }
+
+ if (unset_waiter) {
+ unset_waiter <<= ERTS_PROC_LOCK_WAITER_SHIFT;
+ (void) ERTS_PROC_LOCK_FLGS_BAND_(&p->lock, ~unset_waiter);
+ }
+
+#ifdef ERTS_PROC_LOCK_HARD_DEBUG
+ check_queue(&p->lock);
+#endif
+
+ ERTS_LC_ASSERT(tlocks == 0); /* We should have transferred all of them */
+
+ if (!wake) {
+ if (unlock)
+ erts_pix_unlock(pix_lock);
+ }
+ else {
+ erts_pix_unlock(pix_lock);
+
+ do {
+ erts_proc_lock_waiter_t *tmp = wake;
+ wake = wake->next;
+ erts_smp_gate_let_through(&tmp->gate, 1);
+ } while (wake);
+
+ if (!unlock)
+ erts_pix_lock(pix_lock);
+ }
+ return transferred;
+}
+
+/*
+ * Determine which locks in 'need_locks' are not currently locked in
+ * 'in_use', but do not return any locks "above" some lock we need,
+ * so we do not attempt to grab locks out of order.
+ *
+ * For example, if we want to lock 10111, and 00100 was already locked, this
+ * would return 00011, indicating we should not try for 10000 yet because
+ * that would be a lock-ordering violation.
+ */
+static ERTS_INLINE ErtsProcLocks
+in_order_locks(ErtsProcLocks in_use, ErtsProcLocks need_locks)
+{
+ /* All locks we want that are already locked by someone else. */
+ ErtsProcLocks busy = in_use & need_locks;
+
+ /* Just the lowest numbered lock we want that's in use; 0 if none. */
+ ErtsProcLocks lowest_busy = busy & -busy;
+
+ /* All locks below the lowest one we want that's in use already. */
+ return need_locks & (lowest_busy - 1);
+}
+
+/*
+ * Try to grab locks one at a time in lock order and wait on the lowest
+ * lock we fail to grab, if any.
+ *
+ * If successful, this returns 0 and all locks in 'need_locks' are held.
+ *
+ * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL.
+ * On exit it is not held.
+ */
+static void
+wait_for_locks(Process *p,
+ erts_pix_lock_t *pixlck,
+ ErtsProcLocks locks,
+ ErtsProcLocks need_locks,
+ ErtsProcLocks olflgs)
+{
+ erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
+ int tsd;
+ erts_proc_lock_waiter_t *wtr;
+
+ /* Acquire a waiter object on which this thread can wait. */
+ wtr = erts_tsd_get(waiter_key);
+ if (wtr)
+ tsd = 1;
+ else {
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_unlock(pix_lock);
+#endif
+ wtr = alloc_wtr();
+ tsd = 0;
+#if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_lock(pix_lock);
+#endif
+ }
+
+ /* Record which locks this waiter needs. */
+ wtr->wait_locks = need_locks;
+
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_lock(pix_lock);
+#endif
+
+ ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock));
+
+ /* Provide the process with waiter queues, if it doesn't have one. */
+ if (!p->lock.queues) {
+ wtr->queues->next = NULL;
+ p->lock.queues = wtr->queues;
+ }
+ else {
+ wtr->queues->next = p->lock.queues->next;
+ p->lock.queues->next = wtr->queues;
+ }
+
+#ifdef ERTS_PROC_LOCK_HARD_DEBUG
+ check_queue(&p->lock);
+#endif
+
+ /* Try to aquire locks one at a time in lock order and set wait flag */
+ try_aquire(&p->lock, wtr);
+
+#ifdef ERTS_PROC_LOCK_HARD_DEBUG
+ check_queue(&p->lock);
+#endif
+
+ if (wtr->wait_locks) { /* We didn't get them all; need to wait... */
+ /* Got to wait for locks... */
+ erts_pix_unlock(pix_lock);
+
+ /*
+ * Wait for needed locks. When we return all needed locks have
+ * have been acquired by other threads and transfered to us.
+ */
+#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
+ erts_smp_gate_swait(&wtr->gate, proc_lock_spin_count);
+#else
+ erts_smp_gate_wait(&wtr->gate);
+#endif
+
+ erts_pix_lock(pix_lock);
+ }
+
+ /* Recover some queues to store in the waiter. */
+ ERTS_LC_ASSERT(p->lock.queues);
+ if (p->lock.queues->next) {
+ wtr->queues = p->lock.queues->next;
+ p->lock.queues->next = wtr->queues->next;
+ }
+ else {
+ wtr->queues = p->lock.queues;
+ p->lock.queues = NULL;
+ }
+
+ erts_pix_unlock(pix_lock);
+
+ ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
+
+ if (tsd)
+ CHECK_UNUSED_WAITER(wtr);
+ else
+ free_wtr(wtr);
+}
+
+/*
+ * erts_proc_lock_failed() is called when erts_smp_proc_lock()
+ * wasn't able to lock all locks. We may need to transfer locks
+ * to waiters and wait for our turn on locks.
+ *
+ * Iff !ERTS_PROC_LOCK_ATOMIC_IMPL, the pix lock is locked on entry.
+ *
+ * This always returns with the pix lock unlocked.
+ */
+void
+erts_proc_lock_failed(Process *p,
+ erts_pix_lock_t *pixlck,
+ ErtsProcLocks locks,
+ ErtsProcLocks old_lflgs)
+{
+#ifdef ERTS_PROC_LOCK_SPIN_ON_GATE
+ int spin_count = 0;
+#else
+ int spin_count = proc_lock_spin_count;
+#endif
+
+ ErtsProcLocks need_locks = locks;
+ ErtsProcLocks olflgs = old_lflgs;
+
+ while (need_locks != 0)
+ {
+ ErtsProcLocks can_grab = in_order_locks(olflgs, need_locks);
+
+ if (can_grab == 0)
+ {
+ /* Someone already has the lowest-numbered lock we want. */
+
+ if (spin_count-- <= 0)
+ {
+ /* Too many retries, give up and sleep for the lock. */
+ wait_for_locks(p, pixlck, locks, need_locks, olflgs);
+ return;
+ }
+
+ olflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
+ }
+ else
+ {
+ /* Try to grab all of the grabbable locks at once with cmpxchg. */
+ ErtsProcLocks grabbed = olflgs | can_grab;
+ ErtsProcLocks nflgs =
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, grabbed, olflgs);
+
+ if (nflgs == olflgs)
+ {
+ /* Success! We grabbed the 'can_grab' locks. */
+ olflgs = grabbed;
+ need_locks &= ~can_grab;
+
+#ifndef ERTS_PROC_LOCK_SPIN_ON_GATE
+ /* Since we made progress, reset the spin count. */
+ spin_count = proc_lock_spin_count;
+#endif
+ }
+ else
+ {
+ /* Compare-and-exchange failed, try again. */
+ olflgs = nflgs;
+ }
+ }
+ }
+
+ /* Now we have all of the locks we wanted. */
+
+#if !ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_unlock(pixlck);
+#endif
+}
+
+/*
+ * erts_proc_unlock_failed() is called when erts_smp_proc_unlock()
+ * wasn't able to unlock all locks. We may need to transfer locks
+ * to waiters.
+ */
+void
+erts_proc_unlock_failed(Process *p,
+ erts_pix_lock_t *pixlck,
+ ErtsProcLocks wait_locks)
+{
+ erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
+
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_lock(pix_lock);
+#endif
+
+ transfer_locks(p, wait_locks, pix_lock, 1); /* unlocks pix_lock */
+}
+
+/*
+ * proc_safelock() locks process locks on two processes. In order
+ * to avoid a deadlock, proc_safelock() unlocks those locks that
+ * needs to be unlocked, and then acquires locks in lock order
+ * (including the previously unlocked ones).
+ */
+
+static void
+proc_safelock(Process *a_proc,
+ erts_pix_lock_t *a_pix_lck,
+ ErtsProcLocks a_have_locks,
+ ErtsProcLocks a_need_locks,
+ Process *b_proc,
+ erts_pix_lock_t *b_pix_lck,
+ ErtsProcLocks b_have_locks,
+ ErtsProcLocks b_need_locks)
+{
+ Process *p1, *p2;
+ Eterm pid1, pid2;
+ erts_pix_lock_t *pix_lck1, *pix_lck2;
+ ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2;
+ ErtsProcLocks unlock_mask;
+ int lock_no, refc1 = 0, refc2 = 0;
+
+ ERTS_LC_ASSERT(b_proc);
+
+
+ /* Determine inter process lock order...
+ * Locks with the same lock order should be locked on p1 before p2.
+ */
+ if (a_proc) {
+ if (a_proc->id < b_proc->id) {
+ p1 = a_proc;
+ pid1 = a_proc->id;
+ pix_lck1 = a_pix_lck;
+ need_locks1 = a_need_locks;
+ have_locks1 = a_have_locks;
+ p2 = b_proc;
+ pid2 = b_proc->id;
+ pix_lck2 = b_pix_lck;
+ need_locks2 = b_need_locks;
+ have_locks2 = b_have_locks;
+ }
+ else if (a_proc->id > b_proc->id) {
+ p1 = b_proc;
+ pid1 = b_proc->id;
+ pix_lck1 = b_pix_lck;
+ need_locks1 = b_need_locks;
+ have_locks1 = b_have_locks;
+ p2 = a_proc;
+ pid2 = a_proc->id;
+ pix_lck2 = a_pix_lck;
+ need_locks2 = a_need_locks;
+ have_locks2 = a_have_locks;
+ }
+ else {
+ ERTS_LC_ASSERT(a_proc == b_proc);
+ ERTS_LC_ASSERT(a_proc->id == b_proc->id);
+ p1 = a_proc;
+ pid1 = a_proc->id;
+ pix_lck1 = a_pix_lck;
+ need_locks1 = a_need_locks | b_need_locks;
+ have_locks1 = a_have_locks | b_have_locks;
+ p2 = NULL;
+ pid2 = 0;
+ pix_lck2 = NULL;
+ need_locks2 = 0;
+ have_locks2 = 0;
+ }
+ }
+ else {
+ p1 = b_proc;
+ pid1 = b_proc->id;
+ pix_lck1 = b_pix_lck;
+ need_locks1 = b_need_locks;
+ have_locks1 = b_have_locks;
+ p2 = NULL;
+ pid2 = 0;
+ pix_lck2 = NULL;
+ need_locks2 = 0;
+ have_locks2 = 0;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ a_need_locks = 0;
+ a_have_locks = 0;
+#endif
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (p1)
+ erts_proc_lc_chk_proc_locks(p1, have_locks1);
+ if (p2)
+ erts_proc_lc_chk_proc_locks(p2, have_locks2);
+
+ if ((need_locks1 & have_locks1) != have_locks1)
+ erts_lc_fail("Thread tries to release process lock(s) "
+ "on %T via erts_proc_safelock().", pid1);
+ if ((need_locks2 & have_locks2) != have_locks2)
+ erts_lc_fail("Thread tries to release process lock(s) "
+ "on %T via erts_proc_safelock().",
+ pid2);
+#endif
+
+
+ need_locks1 &= ~have_locks1;
+ need_locks2 &= ~have_locks2;
+
+ /* Figure out the range of locks that needs to be unlocked... */
+ unlock_mask = ERTS_PROC_LOCKS_ALL;
+ for (lock_no = 0;
+ lock_no <= ERTS_PROC_LOCK_MAX_BIT;
+ lock_no++) {
+ ErtsProcLocks lock = (1 << lock_no);
+ if (lock & need_locks1)
+ break;
+ unlock_mask &= ~lock;
+ if (lock & need_locks2)
+ break;
+ }
+
+ /* ... and unlock locks in that range... */
+ if (have_locks1 || have_locks2) {
+ ErtsProcLocks unlock_locks;
+ unlock_locks = unlock_mask & have_locks1;
+ if (unlock_locks) {
+ have_locks1 &= ~unlock_locks;
+ need_locks1 |= unlock_locks;
+ if (!have_locks1) {
+ refc1 = 1;
+ erts_smp_proc_inc_refc(p1);
+ }
+ erts_smp_proc_unlock__(p1, pix_lck1, unlock_locks);
+ }
+ unlock_locks = unlock_mask & have_locks2;
+ if (unlock_locks) {
+ have_locks2 &= ~unlock_locks;
+ need_locks2 |= unlock_locks;
+ if (!have_locks2) {
+ refc2 = 1;
+ erts_smp_proc_inc_refc(p2);
+ }
+ erts_smp_proc_unlock__(p2, pix_lck2, unlock_locks);
+ }
+ }
+
+ /*
+ * lock_no equals the number of the first lock to lock on
+ * either p1 *or* p2.
+ */
+
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (p1)
+ erts_proc_lc_chk_proc_locks(p1, have_locks1);
+ if (p2)
+ erts_proc_lc_chk_proc_locks(p2, have_locks2);
+#endif
+
+ /* Lock locks in lock order... */
+ while (lock_no <= ERTS_PROC_LOCK_MAX_BIT) {
+ ErtsProcLocks locks;
+ ErtsProcLocks lock = (1 << lock_no);
+ ErtsProcLocks lock_mask = 0;
+ if (need_locks1 & lock) {
+ do {
+ lock = (1 << lock_no++);
+ lock_mask |= lock;
+ } while (lock_no <= ERTS_PROC_LOCK_MAX_BIT
+ && !(need_locks2 & lock));
+ if (need_locks2 & lock)
+ lock_no--;
+ locks = need_locks1 & lock_mask;
+ erts_smp_proc_lock__(p1, pix_lck1, locks);
+ have_locks1 |= locks;
+ need_locks1 &= ~locks;
+ }
+ else if (need_locks2 & lock) {
+ while (lock_no <= ERTS_PROC_LOCK_MAX_BIT
+ && !(need_locks1 & lock)) {
+ lock_mask |= lock;
+ lock = (1 << ++lock_no);
+ }
+ locks = need_locks2 & lock_mask;
+ erts_smp_proc_lock__(p2, pix_lck2, locks);
+ have_locks2 |= locks;
+ need_locks2 &= ~locks;
+ }
+ else
+ lock_no++;
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (p1)
+ erts_proc_lc_chk_proc_locks(p1, have_locks1);
+ if (p2)
+ erts_proc_lc_chk_proc_locks(p2, have_locks2);
+
+ if (p1 && p2) {
+ if (p1 == a_proc) {
+ ERTS_LC_ASSERT(a_need_locks == have_locks1);
+ ERTS_LC_ASSERT(b_need_locks == have_locks2);
+ }
+ else {
+ ERTS_LC_ASSERT(a_need_locks == have_locks2);
+ ERTS_LC_ASSERT(b_need_locks == have_locks1);
+ }
+ }
+ else {
+ ERTS_LC_ASSERT(p1);
+ if (a_proc) {
+ ERTS_LC_ASSERT(have_locks1 == (a_need_locks | b_need_locks));
+ }
+ else {
+ ERTS_LC_ASSERT(have_locks1 == b_need_locks);
+ }
+ }
+#endif
+
+ if (refc1)
+ erts_smp_proc_dec_refc(p1);
+ if (refc2)
+ erts_smp_proc_dec_refc(p2);
+}
+
+void
+erts_proc_safelock(Process *a_proc,
+ ErtsProcLocks a_have_locks,
+ ErtsProcLocks a_need_locks,
+ Process *b_proc,
+ ErtsProcLocks b_have_locks,
+ ErtsProcLocks b_need_locks)
+{
+ proc_safelock(a_proc,
+ a_proc ? ERTS_PID2PIXLOCK(a_proc->id) : NULL,
+ a_have_locks,
+ a_need_locks,
+ b_proc,
+ b_proc ? ERTS_PID2PIXLOCK(b_proc->id) : NULL,
+ b_have_locks,
+ b_need_locks);
+}
+
+/*
+ * erts_pid2proc_safelock() is called from erts_pid2proc_opt() when
+ * it wasn't possible to trylock all locks needed.
+ * c_p - current process
+ * c_p_have_locks - locks held on c_p
+ * pid - process id of process we are looking up
+ * proc - process struct of process we are looking
+ * up (both in and out argument)
+ * need_locks - all locks we need (including have_locks)
+ * pix_lock - pix lock for process we are looking up
+ * flags - option flags
+ */
+void
+erts_pid2proc_safelock(Process *c_p,
+ ErtsProcLocks c_p_have_locks,
+ Process **proc,
+ ErtsProcLocks need_locks,
+ erts_pix_lock_t *pix_lock,
+ int flags)
+{
+ Process *p = *proc;
+ ERTS_LC_ASSERT(p->lock.refc > 0);
+ ERTS_LC_ASSERT(process_tab[internal_pid_index(p->id)] == p);
+ p->lock.refc++;
+ erts_pix_unlock(pix_lock);
+
+ proc_safelock(c_p,
+ c_p ? ERTS_PID2PIXLOCK(c_p->id) : NULL,
+ c_p_have_locks,
+ c_p_have_locks,
+ p,
+ pix_lock,
+ 0,
+ need_locks);
+
+ erts_pix_lock(pix_lock);
+
+ if (!p->is_exiting
+ || ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
+ && process_tab[internal_pid_index(p->id)] == p)) {
+ ERTS_LC_ASSERT(p->lock.refc > 1);
+ p->lock.refc--;
+ }
+ else {
+ /* No proc. Note, we need to keep refc until after process unlock */
+ erts_pix_unlock(pix_lock);
+ erts_smp_proc_unlock__(p, pix_lock, need_locks);
+ *proc = NULL;
+ erts_pix_lock(pix_lock);
+ ERTS_LC_ASSERT(p->lock.refc > 0);
+ if (--p->lock.refc == 0) {
+ erts_pix_unlock(pix_lock);
+ erts_free_proc(p);
+ erts_pix_lock(pix_lock);
+ }
+ }
+}
+
+void
+erts_proc_lock_init(Process *p)
+{
+ /* We always start with all locks locked */
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_smp_atomic_init(&p->lock.flags, (long) ERTS_PROC_LOCKS_ALL);
+#else
+ p->lock.flags = ERTS_PROC_LOCKS_ALL;
+#endif
+ p->lock.queues = NULL;
+ p->lock.refc = 1;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_proc_lock_init(p);
+ erts_lcnt_proc_lock(&(p->lock), ERTS_PROC_LOCKS_ALL);
+ erts_lcnt_proc_lock_post_x(&(p->lock), ERTS_PROC_LOCKS_ALL, __FILE__, __LINE__);
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1);
+#endif
+#ifdef ERTS_PROC_LOCK_DEBUG
+ {
+ int i;
+ for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
+ erts_smp_atomic_init(&p->lock.locked[i], (long) 1);
+ }
+#endif
+}
+
+/* --- Process lock counting ----------------------------------------------- */
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_proc_lock_init(Process *p) {
+
+ if (p->id != ERTS_INVALID_PID) {
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->id);
+ } else {
+ erts_lcnt_init_lock(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK);
+ erts_lcnt_init_lock(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK);
+ erts_lcnt_init_lock(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK);
+ erts_lcnt_init_lock(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK);
+ }
+}
+
+
+void erts_lcnt_proc_lock_destroy(Process *p) {
+ erts_lcnt_destroy_lock(&(p->lock.lcnt_main));
+ erts_lcnt_destroy_lock(&(p->lock.lcnt_msgq));
+ erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
+ erts_lcnt_destroy_lock(&(p->lock.lcnt_status));
+}
+
+void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock(&(lock->lcnt_main));
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock(&(lock->lcnt_msgq));
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock(&(lock->lcnt_link));
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock(&(lock->lcnt_status));
+ }
+ }
+}
+
+void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, char *file, unsigned int line) {
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_post_x(&(lock->lcnt_main), file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_post_x(&(lock->lcnt_msgq), file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_post_x(&(lock->lcnt_status), file, line);
+ }
+ }
+}
+
+void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_unaquire(&(lock->lcnt_main));
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_unaquire(&(lock->lcnt_msgq));
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_unaquire(&(lock->lcnt_link));
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_unaquire(&(lock->lcnt_status));
+ }
+ }
+}
+
+void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_unlock(&(lock->lcnt_main));
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_unlock(&(lock->lcnt_msgq));
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_unlock(&(lock->lcnt_link));
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_unlock(&(lock->lcnt_status));
+ }
+ }
+}
+void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_trylock(&(lock->lcnt_main), res);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_trylock(&(lock->lcnt_msgq), res);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_trylock(&(lock->lcnt_link), res);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_trylock(&(lock->lcnt_status), res);
+ }
+ }
+}
+
+#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
+
+
+/* --- Process lock checking ----------------------------------------------- */
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+
+void
+erts_proc_lc_lock(Process *p, ErtsProcLocks locks)
+{
+ erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ lck.id = lc_id.proc_lock_main;
+ erts_lc_lock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ lck.id = lc_id.proc_lock_link;
+ erts_lc_lock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ lck.id = lc_id.proc_lock_msgq;
+ erts_lc_lock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ lck.id = lc_id.proc_lock_status;
+ erts_lc_lock(&lck);
+ }
+}
+
+void
+erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked)
+{
+ erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ lck.id = lc_id.proc_lock_main;
+ erts_lc_trylock(locked, &lck);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ lck.id = lc_id.proc_lock_link;
+ erts_lc_trylock(locked, &lck);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ lck.id = lc_id.proc_lock_msgq;
+ erts_lc_trylock(locked, &lck);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ lck.id = lc_id.proc_lock_status;
+ erts_lc_trylock(locked, &lck);
+ }
+}
+
+void
+erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
+{
+ erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ lck.id = lc_id.proc_lock_status;
+ erts_lc_unlock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ lck.id = lc_id.proc_lock_msgq;
+ erts_lc_unlock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ lck.id = lc_id.proc_lock_link;
+ erts_lc_unlock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ lck.id = lc_id.proc_lock_main;
+ erts_lc_unlock(&lck);
+ }
+}
+
+void
+erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
+{
+ erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ lck.id = lc_id.proc_lock_status;
+ erts_lc_might_unlock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ lck.id = lc_id.proc_lock_msgq;
+ erts_lc_might_unlock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ lck.id = lc_id.proc_lock_link;
+ erts_lc_might_unlock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ lck.id = lc_id.proc_lock_main;
+ erts_lc_might_unlock(&lck);
+ }
+}
+
+void
+erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
+{
+ erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ lck.id = lc_id.proc_lock_main;
+ erts_lc_require_lock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ lck.id = lc_id.proc_lock_link;
+ erts_lc_require_lock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ lck.id = lc_id.proc_lock_msgq;
+ erts_lc_require_lock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ lck.id = lc_id.proc_lock_status;
+ erts_lc_require_lock(&lck);
+ }
+}
+
+void
+erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
+{
+ erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ lck.id = lc_id.proc_lock_status;
+ erts_lc_unrequire_lock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ lck.id = lc_id.proc_lock_msgq;
+ erts_lc_unrequire_lock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ lck.id = lc_id.proc_lock_link;
+ erts_lc_unrequire_lock(&lck);
+ }
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ lck.id = lc_id.proc_lock_main;
+ erts_lc_unrequire_lock(&lck);
+ }
+}
+
+
+int
+erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
+{
+ if (locks & ERTS_PROC_LOCKS_ALL) {
+ erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK);
+
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ lck.id = lc_id.proc_lock_main;
+ else if (locks & ERTS_PROC_LOCK_LINK)
+ lck.id = lc_id.proc_lock_link;
+ else if (locks & ERTS_PROC_LOCK_MSGQ)
+ lck.id = lc_id.proc_lock_msgq;
+ else if (locks & ERTS_PROC_LOCK_STATUS)
+ lck.id = lc_id.proc_lock_status;
+ else
+ erts_lc_fail("Unknown proc lock found");
+
+ return erts_lc_trylock_force_busy(&lck);
+ }
+ return 0;
+}
+
+void erts_proc_lc_chk_only_proc_main(Process *p)
+{
+ erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK);
+ erts_lc_check_exact(&proc_main, 1);
+}
+
+#define ERTS_PROC_LC_EMPTY_LOCK_INIT \
+ ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
+
+void
+erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
+{
+ int have_locks_len = 0;
+ erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT};
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_main;
+ have_locks[have_locks_len++].extra = p->id;
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_link;
+ have_locks[have_locks_len++].extra = p->id;
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
+ have_locks[have_locks_len++].extra = p->id;
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_status;
+ have_locks[have_locks_len++].extra = p->id;
+ }
+
+ erts_lc_check(have_locks, have_locks_len, NULL, 0);
+}
+
+void
+erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
+{
+ int have_locks_len = 0;
+ int have_not_locks_len = 0;
+ erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT};
+ erts_lc_lock_t have_not_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT};
+
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_main;
+ have_locks[have_locks_len++].extra = p->id;
+ }
+ else {
+ have_not_locks[have_not_locks_len].id = lc_id.proc_lock_main;
+ have_not_locks[have_not_locks_len++].extra = p->id;
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_link;
+ have_locks[have_locks_len++].extra = p->id;
+ }
+ else {
+ have_not_locks[have_not_locks_len].id = lc_id.proc_lock_link;
+ have_not_locks[have_not_locks_len++].extra = p->id;
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
+ have_locks[have_locks_len++].extra = p->id;
+ }
+ else {
+ have_not_locks[have_not_locks_len].id = lc_id.proc_lock_msgq;
+ have_not_locks[have_not_locks_len++].extra = p->id;
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_status;
+ have_locks[have_locks_len++].extra = p->id;
+ }
+ else {
+ have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status;
+ have_not_locks[have_not_locks_len++].extra = p->id;
+ }
+
+ erts_lc_check(have_locks, have_locks_len,
+ have_not_locks, have_not_locks_len);
+}
+
+ErtsProcLocks
+erts_proc_lc_my_proc_locks(Process *p)
+{
+ int resv[4];
+ erts_lc_lock_t locks[4] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
+ p->id,
+ ERTS_LC_FLG_LT_PROCLOCK)};
+
+ ErtsProcLocks res = 0;
+
+ erts_lc_have_locks(resv, locks, 4);
+ if (resv[0])
+ res |= ERTS_PROC_LOCK_MAIN;
+ if (resv[1])
+ res |= ERTS_PROC_LOCK_LINK;
+ if (resv[2])
+ res |= ERTS_PROC_LOCK_MSGQ;
+ if (resv[3])
+ res |= ERTS_PROC_LOCK_STATUS;
+
+ return res;
+}
+
+void
+erts_proc_lc_chk_no_proc_locks(char *file, int line)
+{
+ int resv[4];
+ int ids[4] = {lc_id.proc_lock_main,
+ lc_id.proc_lock_link,
+ lc_id.proc_lock_msgq,
+ lc_id.proc_lock_status};
+ erts_lc_have_lock_ids(resv, ids, 4);
+ if (resv[0] || resv[1] || resv[2] || resv[3]) {
+ erts_lc_fail("%s:%d: Thread has process locks locked when expected "
+ "not to have any process locks locked",
+ file, line);
+ }
+}
+
+#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
+
+#ifdef ERTS_PROC_LOCK_HARD_DEBUG
+void
+check_queue(erts_proc_lock_t *lck)
+{
+ int lock_no;
+ ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_READ_(lck);
+
+ for (lock_no = 0; lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) {
+ ErtsProcLocks wtr;
+ wtr = (((ErtsProcLocks) 1) << lock_no) << ERTS_PROC_LOCK_WAITER_SHIFT;
+ if (lflgs & wtr) {
+ int n;
+ erts_proc_lock_waiter_t *wtr;
+ ERTS_LC_ASSERT(lck->queues && lck->queues->queue[lock_no]);
+ wtr = lck->queues->queue[lock_no];
+ n = 0;
+ do {
+ wtr = wtr->next;
+ n++;
+ } while (wtr != lck->queues->queue[lock_no]);
+ do {
+ wtr = wtr->prev;
+ n--;
+ } while (wtr != lck->queues->queue[lock_no]);
+ ERTS_LC_ASSERT(n == 0);
+ }
+ else {
+ ERTS_LC_ASSERT(!lck->queues || !lck->queues->queue[lock_no]);
+ }
+ }
+}
+#endif
+
+#endif /* ERTS_SMP (the whole file) */
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
new file mode 100644
index 0000000000..d71e5a0a6e
--- /dev/null
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -0,0 +1,990 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2007-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+/*
+ * Description: Impementation of Erlang process locks.
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERTS_PROC_LOCK_TYPE__
+#define ERTS_PROC_LOCK_TYPE__
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+#define ERTS_PROC_LOCK_DEBUG
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "erl_lock_count.h"
+#endif
+
+#include "erl_smp.h"
+
+#define ERTS_PROC_LOCK_ATOMIC_IMPL 0
+#define ERTS_PROC_LOCK_SPINLOCK_IMPL 0
+#define ERTS_PROC_LOCK_MUTEX_IMPL 0
+
+#if defined(ETHR_HAVE_OPTIMIZED_ATOMIC_OPS)
+# undef ERTS_PROC_LOCK_ATOMIC_IMPL
+# define ERTS_PROC_LOCK_ATOMIC_IMPL 1
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCK)
+# undef ERTS_PROC_LOCK_SPINLOCK_IMPL
+# define ERTS_PROC_LOCK_SPINLOCK_IMPL 1
+#else
+# undef ERTS_PROC_LOCK_MUTEX_IMPL
+# define ERTS_PROC_LOCK_MUTEX_IMPL 1
+#endif
+
+#define ERTS_PROC_LOCK_MAX_BIT 3
+
+typedef Uint32 ErtsProcLocks;
+
+typedef struct erts_proc_lock_queues_t_ erts_proc_lock_queues_t;
+
+typedef struct erts_proc_lock_t_ {
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_smp_atomic_t flags;
+#else
+ ErtsProcLocks flags;
+#endif
+ erts_proc_lock_queues_t *queues;
+ long refc;
+#ifdef ERTS_PROC_LOCK_DEBUG
+ erts_smp_atomic_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_t lcnt_main;
+ erts_lcnt_lock_t lcnt_link;
+ erts_lcnt_lock_t lcnt_msgq;
+ erts_lcnt_lock_t lcnt_status;
+#endif
+} erts_proc_lock_t;
+
+/* Process lock flags */
+
+/*
+ * Main lock:
+ * The main lock is held by the scheduler running a process. It
+ * is used to protect all fields in the process structure except
+ * for those fields protected by other process locks (follows).
+ */
+#define ERTS_PROC_LOCK_MAIN (((ErtsProcLocks) 1) << 0)
+
+/*
+ * Link lock:
+ * Protects the following fields in the process structure:
+ * * nlinks
+ * * monitors
+ * * suspend_monitors
+ */
+#define ERTS_PROC_LOCK_LINK (((ErtsProcLocks) 1) << 1)
+
+/*
+ * Message queue lock:
+ * Protects the following fields in the process structure:
+ * * msg_inq
+ * * bif_timers
+ */
+#define ERTS_PROC_LOCK_MSGQ (((ErtsProcLocks) 1) << 2)
+
+/*
+ * Status lock:
+ * Protects the following fields in the process structure:
+ * * status
+ * * rstatus
+ * * status_flags
+ * * pending_suspenders
+ * * suspendee
+ */
+#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << ERTS_PROC_LOCK_MAX_BIT)
+
+/*
+ * Special fields:
+ *
+ * The following fields are read only and can be read if at
+ * least one process lock (whichever one doesn't matter)
+ * is held, or if the process structure is guaranteed not to
+ * disappear by other means (e.g. pix lock is held):
+ * * id
+ *
+ * The following fields are only allowed to be written if
+ * all process locks are held, and are allowed to be read if
+ * at least one process lock (whichever one doesn't matter)
+ * is held:
+ * * tracer_proc
+ * * tracer_flags
+ *
+ * The following fields are only allowed to be accessed if
+ * both the schedule queue lock and at least one process lock
+ * (whichever one doesn't matter) are held:
+ * * prio
+ * * next
+ * * scheduler_flags
+ */
+
+/*
+ * Other rules regarding process locking:
+ *
+ * Exiting processes:
+ * When changing status to P_EXITING on a process, you are required
+ * to take all process locks (ERTS_PROC_LOCKS_ALL). Thus, by holding
+ * at least one process lock (whichever one doesn't matter) you
+ * are guaranteed that the process won't exit until the lock you are
+ * holding has been released. Appart from all process locks also
+ * the pix lock corresponding to the process has to be held.
+ * At the same time as status is changed to P_EXITING, also the
+ * field 'is_exiting' in the process structure is set to a value != 0.
+ *
+ * Lock order:
+ * Process locks with low numeric values has to be locked before
+ * process locks with high numeric values. E.g., main locks has
+ * to be locked before message queue locks.
+ *
+ * When process locks with the same numeric value are to be locked
+ * on multiple processes, locks on processes with low process ids
+ * have to be locked before locks on processes with high process
+ * ids. E.g., if the main and the message queue locks are to be
+ * locked on processes p1 and p2 and p1->id < p2->id, then locks
+ * should be locked in the following order:
+ * 1. main lock on p1
+ * 2. main lock on p2
+ * 3. message queue lock on p1
+ * 4. message queue lock on p2
+ */
+
+/* Other lock flags */
+#define ERTS_PROC_LOCK_WAITER_SHIFT (ERTS_PROC_LOCK_MAX_BIT + 1)
+
+
+/* ERTS_PROC_LOCKS_* are combinations of process locks */
+
+#define ERTS_PROC_LOCKS_MSG_RECEIVE (ERTS_PROC_LOCK_MSGQ \
+ | ERTS_PROC_LOCK_STATUS)
+#define ERTS_PROC_LOCKS_MSG_SEND (ERTS_PROC_LOCK_MSGQ \
+ | ERTS_PROC_LOCK_STATUS)
+#define ERTS_PROC_LOCKS_XSIG_SEND ERTS_PROC_LOCK_STATUS
+
+#define ERTS_PROC_LOCKS_ALL \
+ ((((ErtsProcLocks) 1) << (ERTS_PROC_LOCK_MAX_BIT + 1)) - 1)
+
+#define ERTS_PROC_LOCKS_ALL_MINOR (ERTS_PROC_LOCKS_ALL \
+ & ~ERTS_PROC_LOCK_MAIN)
+
+
+#define ERTS_PIX_LOCKS_BITS 8
+#define ERTS_NO_OF_PIX_LOCKS (1 << ERTS_PIX_LOCKS_BITS)
+
+
+#endif /* #ifndef ERTS_PROC_LOCK_TYPE__ */
+
+#ifndef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
+#ifndef ERTS_PROC_LOCK_LOCK_CHECK__
+#define ERTS_PROC_LOCK_LOCK_CHECK__
+
+/* Lock counter implemetation */
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+#define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__)
+#define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__)
+#endif
+
+#if defined(ERTS_SMP) && defined (ERTS_ENABLE_LOCK_COUNT)
+
+void erts_lcnt_proc_lock_init(Process *p);
+void erts_lcnt_proc_lock_destroy(Process *p);
+void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, char *file, unsigned int line);
+void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res);
+
+#endif /* ERTS_ENABLE_LOCK_COUNT*/
+
+
+
+/* --- Process lock checking ----------------------------------------------- */
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+#define ERTS_SMP_CHK_NO_PROC_LOCKS \
+ erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__)
+#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \
+ erts_proc_lc_chk_only_proc_main((P))
+void erts_proc_lc_lock(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked);
+void erts_proc_lc_unlock(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_chk_only_proc_main(Process *p);
+void erts_proc_lc_chk_no_proc_locks(char *file, int line);
+ErtsProcLocks erts_proc_lc_my_proc_locks(Process *p);
+int erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks);
+void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
+#else
+#define ERTS_SMP_CHK_NO_PROC_LOCKS
+#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P)
+#endif
+
+#endif /* #ifndef ERTS_PROC_LOCK_LOCK_CHECK__ */
+#endif /* #ifndef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__ */
+
+#if !defined(ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__) \
+ && !defined(ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__)
+#ifndef ERTS_PROCESS_LOCK_H__
+#define ERTS_PROCESS_LOCK_H__
+
+#ifdef ERTS_SMP
+
+typedef struct {
+ union {
+#if ERTS_PROC_LOCK_MUTEX_IMPL
+ erts_smp_mtx_t mtx;
+#else
+ erts_smp_spinlock_t spnlck;
+#endif
+ char buf[64]; /* Try to get locks in different cache lines */
+ } u;
+} erts_pix_lock_t;
+
+#define ERTS_PIX2PIXLOCKIX(PIX) \
+ ((PIX) & ((1 << ERTS_PIX_LOCKS_BITS) - 1))
+#define ERTS_PIX2PIXLOCK(PIX) \
+ (&erts_pix_locks[ERTS_PIX2PIXLOCKIX((PIX))])
+#define ERTS_PID2PIXLOCK(PID) \
+ ERTS_PIX2PIXLOCK(internal_pid_data((PID)))
+
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+
+#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \
+ ((ErtsProcLocks) erts_smp_atomic_band(&(L)->flags, (long) (MSK)))
+#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \
+ ((ErtsProcLocks) erts_smp_atomic_bor(&(L)->flags, (long) (MSK)))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
+ ((ErtsProcLocks) erts_smp_atomic_cmpxchg(&(L)->flags, \
+ (long) (NEW), (long) (EXPECTED)))
+#define ERTS_PROC_LOCK_FLGS_READ_(L) \
+ ((ErtsProcLocks) erts_smp_atomic_read(&(L)->flags))
+
+#else /* no opt atomic ops */
+
+ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_band(erts_proc_lock_t *,
+ ErtsProcLocks);
+ERTS_GLB_INLINE ErtsProcLocks erts_proc_lock_flags_bor(erts_proc_lock_t *,
+ ErtsProcLocks);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE ErtsProcLocks
+erts_proc_lock_flags_band(erts_proc_lock_t *lck, ErtsProcLocks mask)
+{
+ ErtsProcLocks res = lck->flags;
+ lck->flags &= mask;
+ return res;
+}
+
+ERTS_GLB_INLINE ErtsProcLocks
+erts_proc_lock_flags_bor(erts_proc_lock_t *lck, ErtsProcLocks mask)
+{
+ ErtsProcLocks res = lck->flags;
+ lck->flags |= mask;
+ return res;
+}
+
+ERTS_GLB_INLINE ErtsProcLocks
+erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new,
+ ErtsProcLocks expected)
+{
+ ErtsProcLocks res = lck->flags;
+ if (res == expected)
+ lck->flags = new;
+ return res;
+}
+
+#endif
+
+#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) erts_proc_lock_flags_band((L), (MSK))
+#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) erts_proc_lock_flags_bor((L), (MSK))
+#define ERTS_PROC_LOCK_FLGS_CMPXCHG_(L, NEW, EXPECTED) \
+ erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED))
+#define ERTS_PROC_LOCK_FLGS_READ_(L) ((L)->flags)
+
+#endif /* end no opt atomic ops */
+
+extern erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
+
+void erts_init_proc_lock(void);
+void erts_proc_lock_prepare_proc_lock_waiter(void);
+void erts_proc_lock_failed(Process *,
+ erts_pix_lock_t *,
+ ErtsProcLocks,
+ ErtsProcLocks);
+void erts_proc_unlock_failed(Process *,
+ erts_pix_lock_t *,
+ ErtsProcLocks);
+
+ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *);
+ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *);
+ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *);
+
+ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p,
+ ErtsProcLocks locks);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *,
+ erts_pix_lock_t *,
+ ErtsProcLocks,
+ char *file, unsigned int line);
+#else
+ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *,
+ erts_pix_lock_t *,
+ ErtsProcLocks);
+#endif
+ERTS_GLB_INLINE void erts_smp_proc_unlock__(Process *,
+ erts_pix_lock_t *,
+ ErtsProcLocks);
+ERTS_GLB_INLINE int erts_smp_proc_trylock__(Process *,
+ erts_pix_lock_t *,
+ ErtsProcLocks);
+
+#ifdef ERTS_PROC_LOCK_DEBUG
+ERTS_GLB_INLINE void erts_proc_lock_op_debug(Process *, ErtsProcLocks, int);
+#endif
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *pixlck)
+{
+ ERTS_LC_ASSERT(pixlck);
+#if ERTS_PROC_LOCK_MUTEX_IMPL
+ erts_smp_mtx_lock(&pixlck->u.mtx);
+#else
+ erts_smp_spin_lock(&pixlck->u.spnlck);
+#endif
+}
+
+ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *pixlck)
+{
+ ERTS_LC_ASSERT(pixlck);
+#if ERTS_PROC_LOCK_MUTEX_IMPL
+ erts_smp_mtx_unlock(&pixlck->u.mtx);
+#else
+ erts_smp_spin_unlock(&pixlck->u.spnlck);
+#endif
+}
+
+ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
+{
+#if ERTS_PROC_LOCK_MUTEX_IMPL
+ return erts_smp_lc_mtx_is_locked(&pixlck->u.mtx);
+#else
+ return erts_smp_lc_spinlock_is_locked(&pixlck->u.spnlck);
+#endif
+}
+
+/*
+ * Helper function for erts_smp_proc_lock__ and erts_smp_proc_trylock__.
+ *
+ * Attempts to grab all of 'locks' simultaneously.
+ *
+ * On success, returns zero.
+ *
+ * On failure, returns the p->locks at the moment it tried to grab them,
+ * at least some of which will intersect with 'locks', so it is nonzero.
+ *
+ * This assumes p's pix lock is held on entry if !ERTS_PROC_LOCK_ATOMIC_IMPL.
+ * Does not release the pix lock.
+ */
+ERTS_GLB_INLINE ErtsProcLocks
+erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
+{
+ ErtsProcLocks expct_lflgs = 0;
+
+ while (1) {
+ ErtsProcLocks lflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock,
+ expct_lflgs | locks,
+ expct_lflgs);
+ if (ERTS_LIKELY(lflgs == expct_lflgs)) {
+ /* We successfully grabbed all locks. */
+ return 0;
+ }
+
+ if (lflgs & locks) {
+ /* Some locks we need are locked, give up. */
+ return lflgs;
+ }
+
+ /* cmpxchg failed, try again (should be rare). */
+ expct_lflgs = lflgs;
+ }
+}
+
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_smp_proc_lock_x__(Process *p,
+ erts_pix_lock_t *pix_lck,
+ ErtsProcLocks locks,
+ char *file, unsigned int line)
+#else
+erts_smp_proc_lock__(Process *p,
+ erts_pix_lock_t *pix_lck,
+ ErtsProcLocks locks)
+#endif
+{
+ ErtsProcLocks old_lflgs;
+#if !ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_lock(pix_lck);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_proc_lock(&(p->lock), locks);
+#endif
+
+ ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
+
+ old_lflgs = erts_smp_proc_raw_trylock__(p, locks);
+
+ if (old_lflgs != 0) {
+ /*
+ * There is lock contention, so let erts_proc_lock_failed() deal
+ * with it. Note that erts_proc_lock_failed() returns with
+ * pix_lck unlocked.
+ */
+ erts_proc_lock_failed(p, pix_lck, locks, old_lflgs);
+ }
+
+#if !ERTS_PROC_LOCK_ATOMIC_IMPL
+ else {
+ ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
+ erts_pix_unlock(pix_lck);
+ }
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_proc_lock_post_x(&(p->lock), locks, file, line);
+#endif
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_proc_lc_lock(p, locks);
+#endif
+#ifdef ERTS_PROC_LOCK_DEBUG
+ erts_proc_lock_op_debug(p, locks, 1);
+#endif
+
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ ETHR_COMPILER_BARRIER;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_proc_unlock__(Process *p,
+ erts_pix_lock_t *pix_lck,
+ ErtsProcLocks locks)
+{
+ ErtsProcLocks old_lflgs;
+
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ ETHR_COMPILER_BARRIER;
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_proc_unlock(&(p->lock), locks);
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_proc_lc_unlock(p, locks);
+#endif
+#ifdef ERTS_PROC_LOCK_DEBUG
+ erts_proc_lock_op_debug(p, locks, 0);
+#endif
+
+#if !ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_lock(pix_lck);
+#endif
+
+ old_lflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock);
+
+ ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
+ ERTS_LC_ASSERT(locks == (old_lflgs & locks));
+
+ while (1) {
+ /*
+ * We'll atomically unlock every lock that has no waiter.
+ * If any locks with waiters remain we'll let
+ * erts_proc_unlock_failed() deal with them.
+ */
+ ErtsProcLocks wait_locks =
+ (old_lflgs >> ERTS_PROC_LOCK_WAITER_SHIFT) & locks;
+
+ /* What p->lock will look like with all non-waited locks released. */
+ ErtsProcLocks want_lflgs = old_lflgs & (wait_locks | ~locks);
+
+ if (want_lflgs != old_lflgs) {
+ ErtsProcLocks new_lflgs =
+ ERTS_PROC_LOCK_FLGS_CMPXCHG_(&p->lock, want_lflgs, old_lflgs);
+
+ if (new_lflgs != old_lflgs) {
+ /* cmpxchg failed, try again. */
+ old_lflgs = new_lflgs;
+ continue;
+ }
+ }
+
+ /* We have successfully unlocked every lock with no waiter. */
+
+ if (want_lflgs & locks) {
+ /* Locks with waiters remain. */
+ /* erts_proc_unlock_failed() returns with pix_lck unlocked. */
+ erts_proc_unlock_failed(p, pix_lck, want_lflgs & locks);
+ }
+ else {
+#if !ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_unlock(pix_lck);
+#endif
+ }
+
+ break;
+ }
+}
+
+ERTS_GLB_INLINE int
+erts_smp_proc_trylock__(Process *p,
+ erts_pix_lock_t *pix_lck,
+ ErtsProcLocks locks)
+{
+ int res;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCKS_ALL) == 0);
+ if (erts_proc_lc_trylock_force_busy(p, locks)) {
+ res = EBUSY; /* Make sure caller can handle the situation without
+ causing a lock order violation to occur */
+ }
+ else
+#endif
+ {
+#if !ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_lock(pix_lck);
+#endif
+
+ if (erts_smp_proc_raw_trylock__(p, locks) != 0) {
+ /* Didn't get all locks... */
+ res = EBUSY;
+
+#if !ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_unlock(pix_lck);
+#endif
+ }
+ else {
+ res = 0;
+
+ ERTS_LC_ASSERT(locks
+ == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks));
+
+#if !ERTS_PROC_LOCK_ATOMIC_IMPL
+ erts_pix_unlock(pix_lck);
+#endif
+
+#ifdef ERTS_PROC_LOCK_DEBUG
+ erts_proc_lock_op_debug(p, locks, 1);
+#endif
+ }
+ }
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_proc_trylock(&(p->lock), locks, res);
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_proc_lc_trylock(p, locks, res == 0);
+#endif
+
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ ETHR_COMPILER_BARRIER;
+#endif
+
+ return res;
+}
+
+#ifdef ERTS_PROC_LOCK_DEBUG
+ERTS_GLB_INLINE void
+erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
+{
+ int i;
+ for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) {
+ ErtsProcLocks lock = ((ErtsProcLocks) 1) << i;
+ if (locks & lock) {
+ long lock_count;
+ if (locked) {
+ lock_count = erts_smp_atomic_inctest(&p->lock.locked[i]);
+ ERTS_LC_ASSERT(lock_count == 1);
+ }
+ else {
+ lock_count = erts_smp_atomic_dectest(&p->lock.locked[i]);
+ ERTS_LC_ASSERT(lock_count == 0);
+ }
+ }
+ }
+}
+#endif
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERTS_SMP */
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line);
+#else
+ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks);
+#endif
+ERTS_GLB_INLINE void erts_smp_proc_unlock(Process *, ErtsProcLocks);
+ERTS_GLB_INLINE int erts_smp_proc_trylock(Process *, ErtsProcLocks);
+
+ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *);
+ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line)
+#else
+erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
+#endif
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+ erts_smp_proc_lock_x__(p,
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ NULL,
+#else
+ ERTS_PID2PIXLOCK(p->id),
+#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
+ locks, file, line);
+#elif defined(ERTS_SMP)
+ erts_smp_proc_lock__(p,
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ NULL,
+#else
+ ERTS_PID2PIXLOCK(p->id),
+#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
+ locks);
+#endif /*ERTS_SMP*/
+}
+
+ERTS_GLB_INLINE void
+erts_smp_proc_unlock(Process *p, ErtsProcLocks locks)
+{
+#ifdef ERTS_SMP
+ erts_smp_proc_unlock__(p,
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ NULL,
+#else
+ ERTS_PID2PIXLOCK(p->id),
+#endif
+ locks);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
+{
+#ifndef ERTS_SMP
+ return 0;
+#else
+ return erts_smp_proc_trylock__(p,
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ NULL,
+#else
+ ERTS_PID2PIXLOCK(p->id),
+#endif
+ locks);
+#endif
+}
+
+
+ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *p)
+{
+#ifdef ERTS_SMP
+ erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock(pixlck);
+ ERTS_LC_ASSERT(p->lock.refc > 0);
+ p->lock.refc++;
+ erts_pix_unlock(pixlck);
+#endif
+}
+
+ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
+{
+#ifdef ERTS_SMP
+ Process *fp;
+ erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock(pixlck);
+ ERTS_LC_ASSERT(p->lock.refc > 0);
+ fp = --p->lock.refc == 0 ? p : NULL;
+ erts_pix_unlock(pixlck);
+ if (fp)
+ erts_free_proc(fp);
+#endif
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#ifdef ERTS_SMP
+void erts_proc_lock_init(Process *);
+void erts_proc_safelock(Process *a_proc,
+ ErtsProcLocks a_have_locks,
+ ErtsProcLocks a_need_locks,
+ Process *b_proc,
+ ErtsProcLocks b_have_locks,
+ ErtsProcLocks b_need_locks);
+#endif
+
+/*
+ * --- Process table lookup ------------------------------------------------
+ *
+ * erts_pid2proc() and friends looks up the process structure of a pid
+ * and at the same time acquires process locks in the smp case. Locks
+ * on currently executing process and looked up process are taken according
+ * to the lock order, i.e., locks on currently executing process may have
+ * been released and reacquired.
+ *
+ * erts_pid2proc_opt() currently accepts the following flags:
+ * ERTS_P2P_FLG_ALLOW_OTHER_X Lookup process even if it currently
+ * is exiting.
+ */
+
+#define ERTS_P2P_FLG_ALLOW_OTHER_X (1 << 0)
+#define ERTS_P2P_FLG_TRY_LOCK (1 << 1)
+#define ERTS_P2P_FLG_SMP_INC_REFC (1 << 2)
+
+#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_proc_lock_busy)
+extern const Process erts_proc_lock_busy;
+
+#define erts_pid2proc(PROC, HL, PID, NL) \
+ erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0)
+
+ERTS_GLB_INLINE Process *
+erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int);
+
+#ifdef ERTS_SMP
+void
+erts_pid2proc_safelock(Process *c_p,
+ ErtsProcLocks c_p_have_locks,
+ Process **proc,
+ ErtsProcLocks need_locks,
+ erts_pix_lock_t *pix_lock,
+ int flags);
+ERTS_GLB_INLINE Process *erts_pid2proc_unlocked_opt(Eterm pid, int flags);
+#define erts_pid2proc_unlocked(PID) erts_pid2proc_unlocked_opt((PID), 0)
+#else
+#define erts_pid2proc_unlocked_opt(PID, FLGS) \
+ erts_pid2proc_opt(NULL, 0, (PID), 0, FLGS)
+#define erts_pid2proc_unlocked(PID) erts_pid2proc_opt(NULL, 0, (PID), 0, 0)
+#endif
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Process *
+#ifdef ERTS_SMP
+erts_pid2proc_unlocked_opt(Eterm pid, int flags)
+#else
+erts_pid2proc_opt(Process *c_p_unused,
+ ErtsProcLocks c_p_have_locks_unused,
+ Eterm pid,
+ ErtsProcLocks pid_need_locks_unused,
+ int flags)
+#endif
+{
+ Uint pix;
+ Process *proc;
+
+ if (is_not_internal_pid(pid))
+ return NULL;
+ pix = internal_pid_index(pid);
+ if(pix >= erts_max_processes)
+ return NULL;
+ proc = process_tab[pix];
+ if (proc) {
+ if (proc->id != pid
+ || (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
+ && proc->status == P_EXITING))
+ proc = NULL;
+ }
+ return proc;
+}
+
+#ifdef ERTS_SMP
+
+ERTS_GLB_INLINE Process *
+erts_pid2proc_opt(Process *c_p,
+ ErtsProcLocks c_p_have_locks,
+ Eterm pid,
+ ErtsProcLocks pid_need_locks,
+ int flags)
+{
+ erts_pix_lock_t *pix_lock;
+ ErtsProcLocks need_locks;
+ Uint pix;
+ Process *proc;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ ErtsProcLocks lcnt_locks;
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (c_p) {
+ ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks;
+ if (might_unlock)
+ erts_proc_lc_might_unlock(c_p, might_unlock);
+ }
+#endif
+ if (is_not_internal_pid(pid)) {
+ proc = NULL;
+ goto done;
+ }
+ pix = internal_pid_index(pid);
+ if(pix >= erts_max_processes) {
+ proc = NULL;
+ goto done;
+ }
+
+ ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
+ need_locks = pid_need_locks;
+
+ pix_lock = ERTS_PIX2PIXLOCK(pix);
+
+ if (c_p && c_p->id == pid) {
+ ASSERT(c_p->id != ERTS_INVALID_PID);
+ ASSERT(c_p == process_tab[pix]);
+ if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && c_p->is_exiting) {
+ proc = NULL;
+ goto done;
+ }
+ need_locks &= ~c_p_have_locks;
+ if (!need_locks) {
+ proc = c_p;
+ erts_pix_lock(pix_lock);
+ if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ proc->lock.refc++;
+ erts_pix_unlock(pix_lock);
+ goto done;
+ }
+ }
+
+ erts_pix_lock(pix_lock);
+
+ proc = process_tab[pix];
+ if (proc) {
+ if (proc->id != pid || (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
+ && ERTS_PROC_IS_EXITING(proc))) {
+ proc = NULL;
+ }
+ else if (!need_locks) {
+ if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ proc->lock.refc++;
+ }
+ else {
+ int busy;
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ lcnt_locks = need_locks;
+ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
+ erts_lcnt_proc_lock(&proc->lock, need_locks);
+ }
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ /* Make sure erts_pid2proc_safelock() is enough to handle
+ a potential lock order violation situation... */
+ busy = erts_proc_lc_trylock_force_busy(proc, need_locks);
+ if (!busy)
+#endif
+ {
+ /* Try a quick trylock to grab all the locks we need. */
+ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_proc_lc_trylock(proc, need_locks, !busy);
+#endif
+#ifdef ERTS_PROC_LOCK_DEBUG
+ if (!busy)
+ erts_proc_lock_op_debug(proc, need_locks, 1);
+#endif
+ }
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if (flags & ERTS_P2P_FLG_TRY_LOCK) {
+ if (busy) {
+ erts_lcnt_proc_trylock(&proc->lock, need_locks, EBUSY);
+ } else {
+ erts_lcnt_proc_trylock(&proc->lock, need_locks, 0);
+ }
+ }
+#endif
+ if (!busy) {
+ if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ proc->lock.refc++;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ /* all is great */
+ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
+ erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__);
+ }
+#endif
+ }
+ else {
+ if (flags & ERTS_P2P_FLG_TRY_LOCK)
+ proc = ERTS_PROC_LOCK_BUSY;
+ else {
+ if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ proc->lock.refc++;
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
+#endif
+ erts_pid2proc_safelock(c_p,
+ c_p_have_locks,
+ &proc,
+ pid_need_locks,
+ pix_lock,
+ flags);
+ }
+ }
+ }
+ }
+
+ erts_pix_unlock(pix_lock);
+#ifdef ERTS_PROC_LOCK_DEBUG
+ ERTS_LC_ASSERT(!proc
+ || proc == ERTS_PROC_LOCK_BUSY
+ || (pid_need_locks ==
+ (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
+ & pid_need_locks)));
+#endif
+
+
+ done:
+
+#if ERTS_PROC_LOCK_ATOMIC_IMPL
+ ETHR_COMPILER_BARRIER;
+#endif
+
+ return proc;
+}
+#endif /* ERTS_SMP */
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* #ifndef ERTS_PROCESS_LOCK_H__ */
+#endif /* #if !defined(ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__)
+ && !defined(ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__) */
diff --git a/erts/emulator/beam/erl_resolv_dns.c b/erts/emulator/beam/erl_resolv_dns.c
new file mode 100644
index 0000000000..9d76fa89f8
--- /dev/null
+++ b/erts/emulator/beam/erl_resolv_dns.c
@@ -0,0 +1,23 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Set this to non-zero value if DNS should be used.
+ */
+int erl_use_resolver = 1;
diff --git a/erts/emulator/beam/erl_resolv_nodns.c b/erts/emulator/beam/erl_resolv_nodns.c
new file mode 100644
index 0000000000..f14ab68e27
--- /dev/null
+++ b/erts/emulator/beam/erl_resolv_nodns.c
@@ -0,0 +1,23 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Set this to non-zero value if DNS should be used.
+ */
+int erl_use_resolver = 0;
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
new file mode 100644
index 0000000000..03d2a586e3
--- /dev/null
+++ b/erts/emulator/beam/erl_smp.h
@@ -0,0 +1,993 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * SMP interface to ethread library.
+ * This is essentially "sed s/erts_/erts_smp_/g < erl_threads.h > erl_smp.h",
+ * plus changes to NOP operations when ERTS_SMP is disabled.
+ * Author: Mikael Pettersson
+ */
+#ifndef ERL_SMP_H
+#define ERL_SMP_H
+#include "erl_threads.h"
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+#define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__)
+#define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__)
+#define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__)
+#define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__)
+#define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__)
+#define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__)
+#endif
+
+
+#ifdef ERTS_SMP
+#define ERTS_SMP_THR_OPTS_DEFAULT_INITER ERTS_THR_OPTS_DEFAULT_INITER
+typedef erts_thr_opts_t erts_smp_thr_opts_t;
+typedef erts_thr_init_data_t erts_smp_thr_init_data_t;
+typedef erts_tid_t erts_smp_tid_t;
+typedef erts_mtx_t erts_smp_mtx_t;
+typedef erts_cnd_t erts_smp_cnd_t;
+typedef erts_rwmtx_t erts_smp_rwmtx_t;
+typedef erts_tsd_key_t erts_smp_tsd_key_t;
+typedef erts_gate_t erts_smp_gate_t;
+typedef ethr_atomic_t erts_smp_atomic_t;
+typedef erts_spinlock_t erts_smp_spinlock_t;
+typedef erts_rwlock_t erts_smp_rwlock_t;
+typedef erts_thr_timeval_t erts_smp_thr_timeval_t;
+void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
+
+#else /* #ifdef ERTS_SMP */
+
+#define ERTS_SMP_THR_OPTS_DEFAULT_INITER 0
+typedef int erts_smp_thr_opts_t;
+typedef int erts_smp_thr_init_data_t;
+typedef int erts_smp_tid_t;
+typedef int erts_smp_mtx_t;
+typedef int erts_smp_cnd_t;
+typedef int erts_smp_rwmtx_t;
+typedef int erts_smp_tsd_key_t;
+typedef int erts_smp_gate_t;
+typedef long erts_smp_atomic_t;
+#if __GNUC__ > 2
+typedef struct { } erts_smp_spinlock_t;
+typedef struct { } erts_smp_rwlock_t;
+#else
+typedef struct { int gcc_is_buggy; } erts_smp_spinlock_t;
+typedef struct { int gcc_is_buggy; } erts_smp_rwlock_t;
+#endif
+
+typedef struct {
+ long tv_sec;
+ long tv_nsec;
+} erts_smp_thr_timeval_t;
+
+#endif /* #ifdef ERTS_SMP */
+
+ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id);
+ERTS_GLB_INLINE void erts_smp_thr_create(erts_smp_tid_t *tid,
+ void * (*func)(void *),
+ void *arg,
+ erts_smp_thr_opts_t *opts);
+ERTS_GLB_INLINE void erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res);
+ERTS_GLB_INLINE void erts_smp_thr_detach(erts_smp_tid_t tid);
+ERTS_GLB_INLINE void erts_smp_thr_exit(void *res);
+ERTS_GLB_INLINE void erts_smp_install_exit_handler(void (*exit_handler)(void));
+ERTS_GLB_INLINE erts_smp_tid_t erts_smp_thr_self(void);
+ERTS_GLB_INLINE int erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y);
+#ifdef ERTS_HAVE_REC_MTX_INIT
+#define ERTS_SMP_HAVE_REC_MTX_INIT 1
+ERTS_GLB_INLINE void erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx);
+#endif
+ERTS_GLB_INLINE void erts_smp_mtx_init_x(erts_smp_mtx_t *mtx,
+ char *name,
+ Eterm extra);
+ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
+ char *name,
+ Eterm extra);
+ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
+ERTS_GLB_INLINE void erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx);
+ERTS_GLB_INLINE void erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx);
+ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line);
+#else
+ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx);
+#endif
+ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx);
+ERTS_GLB_INLINE int erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx);
+ERTS_GLB_INLINE void erts_smp_cnd_init(erts_smp_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_smp_cnd_destroy(erts_smp_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
+ erts_smp_mtx_t *mtx);
+ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra);
+ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
+ char *name);
+ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
+ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
+#else
+ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx);
+#endif
+ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
+ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
+ERTS_GLB_INLINE void erts_smp_atomic_init(erts_smp_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_smp_atomic_set(erts_smp_atomic_t *var, long i);
+ERTS_GLB_INLINE long erts_smp_atomic_read(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE long erts_smp_atomic_inctest(erts_smp_atomic_t *incp);
+ERTS_GLB_INLINE long erts_smp_atomic_dectest(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE void erts_smp_atomic_inc(erts_smp_atomic_t *incp);
+ERTS_GLB_INLINE void erts_smp_atomic_dec(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE long erts_smp_atomic_addtest(erts_smp_atomic_t *addp,
+ long i);
+ERTS_GLB_INLINE void erts_smp_atomic_add(erts_smp_atomic_t *addp, long i);
+ERTS_GLB_INLINE long erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp,
+ long new);
+ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
+ long new,
+ long expected);
+ERTS_GLB_INLINE long erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_smp_atomic_band(erts_smp_atomic_t *var, long mask);
+ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
+ char *name,
+ Eterm extra);
+ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock,
+ char *name);
+ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock);
+ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line);
+#else
+ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock);
+#endif
+ERTS_GLB_INLINE int erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock);
+ERTS_GLB_INLINE void erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock,
+ char *name,
+ Eterm extra);
+ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock,
+ char *name);
+ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock);
+ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
+ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line);
+#else
+ERTS_GLB_INLINE void erts_smp_read_lock(erts_smp_rwlock_t *lock);
+ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock);
+#endif
+ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock);
+ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock);
+ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock);
+ERTS_GLB_INLINE void erts_smp_thr_time_now(erts_smp_thr_timeval_t *time);
+ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
+ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
+ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
+ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key);
+ERTS_GLB_INLINE void erts_smp_gate_init(erts_smp_gate_t *gp);
+ERTS_GLB_INLINE void erts_smp_gate_destroy(erts_smp_gate_t *gp);
+ERTS_GLB_INLINE void erts_smp_gate_close(erts_smp_gate_t *gp);
+ERTS_GLB_INLINE void erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no);
+ERTS_GLB_INLINE void erts_smp_gate_wait(erts_smp_gate_t *gp);
+ERTS_GLB_INLINE void erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount);
+
+#ifdef ERTS_THR_HAVE_SIG_FUNCS
+#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
+ERTS_GLB_INLINE void erts_smp_thr_sigmask(int how,
+ const sigset_t *set,
+ sigset_t *oset);
+ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
+#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_smp_thr_init(erts_smp_thr_init_data_t *id)
+{
+#ifdef ERTS_SMP
+ erts_thr_init(id);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_thr_create(erts_smp_tid_t *tid, void * (*func)(void *), void *arg,
+ erts_smp_thr_opts_t *opts)
+{
+#ifdef ERTS_SMP
+ erts_thr_create(tid, func, arg, opts);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res)
+{
+#ifdef ERTS_SMP
+ erts_thr_join(tid, thr_res);
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_smp_thr_detach(erts_smp_tid_t tid)
+{
+#ifdef ERTS_SMP
+ erts_thr_detach(tid);
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_smp_thr_exit(void *res)
+{
+#ifdef ERTS_SMP
+ erts_thr_exit(res);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_install_exit_handler(void (*exit_handler)(void))
+{
+#ifdef ERTS_SMP
+ erts_thr_install_exit_handler(exit_handler);
+#endif
+}
+
+ERTS_GLB_INLINE erts_smp_tid_t
+erts_smp_thr_self(void)
+{
+#ifdef ERTS_SMP
+ return erts_thr_self();
+#else
+ return 0;
+#endif
+}
+
+
+ERTS_GLB_INLINE int
+erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y)
+{
+#ifdef ERTS_SMP
+ return erts_equal_tids(x, y);
+#else
+ return 1;
+#endif
+}
+
+
+#ifdef ERTS_HAVE_REC_MTX_INIT
+ERTS_GLB_INLINE void
+erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx)
+{
+#ifdef ERTS_SMP
+ erts_rec_mtx_init(mtx);
+#endif
+}
+#endif
+
+ERTS_GLB_INLINE void
+erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_mtx_init_x(mtx, name, extra);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_mtx_init_locked_x(mtx, name, extra);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name)
+{
+#ifdef ERTS_SMP
+ erts_mtx_init(mtx, name);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name)
+{
+#ifdef ERTS_SMP
+ erts_mtx_init_locked(mtx, name);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
+{
+#ifdef ERTS_SMP
+ erts_mtx_destroy(mtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_mtx_set_forksafe(erts_smp_mtx_t *mtx)
+{
+#ifdef ERTS_SMP
+ erts_mtx_set_forksafe(mtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_mtx_unset_forksafe(erts_smp_mtx_t *mtx)
+{
+#ifdef ERTS_SMP
+ erts_mtx_unset_forksafe(mtx);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
+{
+#ifdef ERTS_SMP
+ return erts_mtx_trylock(mtx);
+#else
+ return 0;
+#endif
+
+}
+
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, int line)
+#else
+erts_smp_mtx_lock(erts_smp_mtx_t *mtx)
+#endif
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+ erts_mtx_lock_x(mtx, file, line);
+#elif defined(ERTS_SMP)
+ erts_mtx_lock(mtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_mtx_unlock(erts_smp_mtx_t *mtx)
+{
+#ifdef ERTS_SMP
+ erts_mtx_unlock(mtx);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ return erts_lc_mtx_is_locked(mtx);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_cnd_init(erts_smp_cnd_t *cnd)
+{
+#ifdef ERTS_SMP
+ erts_cnd_init(cnd);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_cnd_destroy(erts_smp_cnd_t *cnd)
+{
+#ifdef ERTS_SMP
+ erts_cnd_destroy(cnd);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx)
+{
+#ifdef ERTS_SMP
+ erts_cnd_wait(cnd, mtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_cnd_signal(erts_smp_cnd_t *cnd)
+{
+#ifdef ERTS_SMP
+ erts_cnd_signal(cnd);
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd)
+{
+#ifdef ERTS_SMP
+ erts_cnd_broadcast(cnd);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init_x(rwmtx, name, extra);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_init(rwmtx, name);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_destroy(rwmtx);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
+{
+#ifdef ERTS_SMP
+ return erts_rwmtx_tryrlock(rwmtx);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
+erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx)
+#endif
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+ erts_rwmtx_rlock_x(rwmtx, file, line);
+#elif defined(ERTS_SMP)
+ erts_rwmtx_rlock(rwmtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_runlock(rwmtx);
+#endif
+}
+
+
+ERTS_GLB_INLINE int
+erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
+{
+#ifdef ERTS_SMP
+ return erts_rwmtx_tryrwlock(rwmtx);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
+erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx)
+#endif
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+ erts_rwmtx_rwlock_x(rwmtx, file, line);
+#elif defined(ERTS_SMP)
+ erts_rwmtx_rwlock(rwmtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx)
+{
+#ifdef ERTS_SMP
+ erts_rwmtx_rwunlock(rwmtx);
+#endif
+}
+
+#if 0 /* The following rwmtx function names are
+ reserved for potential future use. */
+
+/* Try upgrade from r-locked state to rw-locked state */
+ERTS_GLB_INLINE int
+erts_smp_rwmtx_trywlock(erts_smp_rwmtx_t *rwmtx)
+{
+ return 0;
+}
+
+/* Upgrade from r-locked state to rw-locked state */
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_wlock(erts_smp_rwmtx_t *rwmtx)
+{
+
+}
+
+/* Downgrade from rw-locked state to r-locked state */
+ERTS_GLB_INLINE void
+erts_smp_rwmtx_wunlock(erts_smp_rwmtx_t *rwmtx)
+{
+
+}
+
+#endif
+
+ERTS_GLB_INLINE int
+erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ return erts_lc_rwmtx_is_rlocked(mtx);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ return erts_lc_rwmtx_is_rwlocked(mtx);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_init(erts_smp_atomic_t *var, long i)
+{
+#ifdef ERTS_SMP
+ erts_atomic_init(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_set(erts_smp_atomic_t *var, long i)
+{
+#ifdef ERTS_SMP
+ erts_atomic_set(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_read(erts_smp_atomic_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_read(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_inctest(erts_smp_atomic_t *incp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_inctest(incp);
+#else
+ return ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_dectest(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_dectest(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_inc(erts_smp_atomic_t *incp)
+{
+#ifdef ERTS_SMP
+ erts_atomic_inc(incp);
+#else
+ ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_dec(erts_smp_atomic_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic_dec(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_addtest(erts_smp_atomic_t *addp, long i)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_addtest(addp, i);
+#else
+ return *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic_add(erts_smp_atomic_t *addp, long i)
+{
+#ifdef ERTS_SMP
+ erts_atomic_add(addp, i);
+#else
+ *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp, long new)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_xchg(xchgp, new);
+#else
+ long old;
+ old = *xchgp;
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp, long new, long expected)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_cmpxchg(xchgp, new, expected);
+#else
+ long old = *xchgp;
+ if (old == expected)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_bor(var, mask);
+#else
+ long old;
+ old = *var;
+ *var |= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_smp_atomic_band(erts_smp_atomic_t *var, long mask)
+{
+#ifdef ERTS_SMP
+ return erts_atomic_band(var, mask);
+#else
+ long old;
+ old = *var;
+ *var &= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_spinlock_init_x(lock, name, extra);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name)
+{
+#ifdef ERTS_SMP
+ erts_spinlock_init(lock, name);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock)
+{
+#ifdef ERTS_SMP
+ erts_spinlock_destroy(lock);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_spin_unlock(erts_smp_spinlock_t *lock)
+{
+#ifdef ERTS_SMP
+ erts_spin_unlock(lock);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line)
+#else
+erts_smp_spin_lock(erts_smp_spinlock_t *lock)
+#endif
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+ erts_spin_lock_x(lock, file, line);
+#elif defined(ERTS_SMP)
+ erts_spin_lock(lock);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ return erts_lc_spinlock_is_locked(lock);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock, char *name, Eterm extra)
+{
+#ifdef ERTS_SMP
+ erts_rwlock_init_x(lock, name, extra);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name)
+{
+#ifdef ERTS_SMP
+ erts_rwlock_init(lock, name);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock)
+{
+#ifdef ERTS_SMP
+ erts_rwlock_destroy(lock);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_read_unlock(erts_smp_rwlock_t *lock)
+{
+#ifdef ERTS_SMP
+ erts_read_unlock(lock);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
+#else
+erts_smp_read_lock(erts_smp_rwlock_t *lock)
+#endif
+{
+#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+ erts_read_lock_x(lock, file, line);
+#elif defined(ERTS_SMP)
+ erts_read_lock(lock);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_write_unlock(erts_smp_rwlock_t *lock)
+{
+#ifdef ERTS_SMP
+ erts_write_unlock(lock);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line)
+#else
+erts_smp_write_lock(erts_smp_rwlock_t *lock)
+#endif
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
+ erts_write_lock_x(lock, file, line);
+#elif defined(ERTS_SMP)
+ erts_write_lock(lock);
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ return erts_lc_rwlock_is_rlocked(lock);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ return erts_lc_rwlock_is_rwlocked(lock);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_thr_time_now(erts_smp_thr_timeval_t *time)
+{
+#ifdef ERTS_SMP
+ erts_thr_time_now(time);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp)
+{
+#ifdef ERTS_SMP
+ erts_tsd_key_create(keyp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_tsd_key_delete(erts_smp_tsd_key_t key)
+{
+#ifdef ERTS_SMP
+ erts_tsd_key_delete(key);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value)
+{
+#ifdef ERTS_SMP
+ erts_tsd_set(key, value);
+#endif
+}
+
+ERTS_GLB_INLINE void *
+erts_smp_tsd_get(erts_smp_tsd_key_t key)
+{
+#ifdef ERTS_SMP
+ return erts_tsd_get(key);
+#else
+ return NULL;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_gate_init(erts_smp_gate_t *gp)
+{
+#ifdef ERTS_SMP
+ erts_gate_init((erts_gate_t *) gp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_gate_destroy(erts_smp_gate_t *gp)
+{
+#ifdef ERTS_SMP
+ erts_gate_destroy((erts_gate_t *) gp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_gate_close(erts_smp_gate_t *gp)
+{
+#ifdef ERTS_SMP
+ erts_gate_close((erts_gate_t *) gp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_gate_let_through(erts_smp_gate_t *gp, unsigned no)
+{
+#ifdef ERTS_SMP
+ erts_gate_let_through((erts_gate_t *) gp, no);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_gate_wait(erts_smp_gate_t *gp)
+{
+#ifdef ERTS_SMP
+ erts_gate_wait((erts_gate_t *) gp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_gate_swait(erts_smp_gate_t *gp, int spincount)
+{
+#ifdef ERTS_SMP
+ erts_gate_swait((erts_gate_t *) gp, spincount);
+#endif
+}
+
+#ifdef ERTS_THR_HAVE_SIG_FUNCS
+#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1
+
+ERTS_GLB_INLINE void
+erts_smp_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+#ifdef ERTS_SMP
+ erts_thr_sigmask(how, set, oset);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_thr_sigwait(const sigset_t *set, int *sig)
+{
+#ifdef ERTS_SMP
+ erts_thr_sigwait(set, sig);
+#endif
+}
+
+#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* ERL_SMP_H */
diff --git a/erts/emulator/beam/erl_sock.h b/erts/emulator/beam/erl_sock.h
new file mode 100644
index 0000000000..7ae6116dc5
--- /dev/null
+++ b/erts/emulator/beam/erl_sock.h
@@ -0,0 +1,44 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * A *very* limited socket interface exported by inet_drv.c.
+ * Used by the erl_mtrace.c.
+ */
+
+#ifndef ERL_SOCK_H_
+#define ERL_SOCK_H_
+
+#ifdef __WIN32__
+#include <winsock2.h>
+typedef SOCKET erts_sock_t;
+#else
+typedef int erts_sock_t;
+#endif
+
+#define ERTS_SOCK_INVALID_SOCKET -1
+
+erts_sock_t erts_sock_open(void);
+void erts_sock_close(erts_sock_t);
+int erts_sock_connect(erts_sock_t, byte *, int, Uint16);
+Sint erts_sock_send(erts_sock_t, const void *, Sint);
+int erts_sock_gethostname(char *, int);
+int erts_sock_errno(void);
+
+#endif
diff --git a/erts/emulator/beam/erl_sys_driver.h b/erts/emulator/beam/erl_sys_driver.h
new file mode 100644
index 0000000000..d429d0ce96
--- /dev/null
+++ b/erts/emulator/beam/erl_sys_driver.h
@@ -0,0 +1,44 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2001-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Include file for erlang driver writers.
+ */
+
+#ifndef __ERL_SYS_DRIVER_H__
+#define __ERL_SYS_DRIVER_H__
+
+#ifdef __ERL_DRIVER_H__
+#error erl_sys_driver.h cannot be included after erl_driver.h
+#endif
+
+#define ERL_SYS_DRV
+
+typedef long ErlDrvEvent; /* An event to be selected on. */
+typedef long ErlDrvPort; /* A port descriptor. */
+
+/* typedef struct _SysDriverOpts SysDriverOpts; defined in sys.h */
+
+#include "erl_driver.h"
+
+#endif
+
+
+
+
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
new file mode 100644
index 0000000000..2924abbd51
--- /dev/null
+++ b/erts/emulator/beam/erl_term.c
@@ -0,0 +1,174 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+__decl_noreturn static void __noreturn
+et_abort(const char *expr, const char *file, unsigned line)
+{
+#ifdef EXIT_ON_ET_ABORT
+ static int have_been_called = 0;
+
+ if (have_been_called) {
+ abort();
+ } else {
+ /*
+ * Prevent infinite loop.
+ */
+ have_been_called = 1;
+ erl_exit(1, "TYPE ASSERTION FAILED, file %s, line %u: %s\n", file, line, expr);
+ }
+#else
+ erts_fprintf(stderr, "TYPE ASSERTION FAILED, file %s, line %u: %s\n", file, line, expr);
+ abort();
+#endif
+}
+
+#if ET_DEBUG
+#define ET_ASSERT(expr,file,line) \
+do { \
+ if (!(expr)) \
+ et_abort(#expr, file, line); \
+} while(0)
+#else
+#define ET_ASSERT(expr,file,line) do { } while(0)
+#endif
+
+#if ET_DEBUG
+unsigned tag_val_def_debug(Eterm x, const char *file, unsigned line)
+#else
+unsigned tag_val_def(Eterm x)
+#define file __FILE__
+#define line __LINE__
+#endif
+{
+ static char msg[32];
+
+ switch (x & _TAG_PRIMARY_MASK) {
+ case TAG_PRIMARY_LIST: return LIST_DEF;
+ case TAG_PRIMARY_BOXED: {
+ Eterm hdr = *boxed_val(x);
+ ET_ASSERT(is_header(hdr),file,line);
+ switch ((hdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE): return TUPLE_DEF;
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE): return BIG_DEF;
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE): return BIG_DEF;
+ case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE): return REF_DEF;
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE): return FLOAT_DEF;
+ case (_TAG_HEADER_EXPORT >> _TAG_PRIMARY_SIZE): return EXPORT_DEF;
+ case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE): return FUN_DEF;
+ case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE): return EXTERNAL_PID_DEF;
+ case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE): return EXTERNAL_PORT_DEF;
+ case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE): return EXTERNAL_REF_DEF;
+ default: return BINARY_DEF;
+ }
+ break;
+ }
+ case TAG_PRIMARY_IMMED1: {
+ switch ((x & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_PID >> _TAG_PRIMARY_SIZE): return PID_DEF;
+ case (_TAG_IMMED1_PORT >> _TAG_PRIMARY_SIZE): return PORT_DEF;
+ case (_TAG_IMMED1_IMMED2 >> _TAG_PRIMARY_SIZE): {
+ switch ((x & _TAG_IMMED2_MASK) >> _TAG_IMMED1_SIZE) {
+ case (_TAG_IMMED2_ATOM >> _TAG_IMMED1_SIZE): return ATOM_DEF;
+ case (_TAG_IMMED2_NIL >> _TAG_IMMED1_SIZE): return NIL_DEF;
+ }
+ break;
+ }
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE): return SMALL_DEF;
+ }
+ break;
+ }
+ }
+ sprintf(msg, "tag_val_def: %#lx", x);
+ et_abort(msg, file, line);
+#undef file
+#undef line
+}
+
+/*
+ * XXX: define NUMBER_CODE() here when new representation is used
+ */
+
+#if ET_DEBUG
+#define ET_DEFINE_CHECKED(FUNTY,FUN,ARGTY,PRECOND) \
+FUNTY checked_##FUN(ARGTY x, const char *file, unsigned line) \
+{ \
+ ET_ASSERT(PRECOND(x),file,line); \
+ return _unchecked_##FUN(x); \
+}
+
+ET_DEFINE_CHECKED(Eterm,make_boxed,Eterm*,_is_aligned);
+ET_DEFINE_CHECKED(int,is_boxed,Eterm,!is_header);
+ET_DEFINE_CHECKED(Eterm*,boxed_val,Eterm,is_boxed);
+ET_DEFINE_CHECKED(Eterm,make_list,Eterm*,_is_aligned);
+ET_DEFINE_CHECKED(int,is_not_list,Eterm,!is_header);
+ET_DEFINE_CHECKED(Eterm*,list_val,Eterm,is_list);
+ET_DEFINE_CHECKED(Uint,unsigned_val,Eterm,is_small);
+ET_DEFINE_CHECKED(Sint,signed_val,Eterm,is_small);
+ET_DEFINE_CHECKED(Uint,atom_val,Eterm,is_atom);
+ET_DEFINE_CHECKED(Uint,header_arity,Eterm,is_header);
+ET_DEFINE_CHECKED(Uint,arityval,Eterm,is_arity_value);
+ET_DEFINE_CHECKED(Uint,thing_arityval,Eterm,is_thing);
+ET_DEFINE_CHECKED(Uint,thing_subtag,Eterm,is_thing);
+ET_DEFINE_CHECKED(Eterm*,binary_val,Eterm,is_binary);
+ET_DEFINE_CHECKED(Eterm*,fun_val,Eterm,is_fun);
+ET_DEFINE_CHECKED(int,bignum_header_is_neg,Eterm,_is_bignum_header);
+ET_DEFINE_CHECKED(Eterm,bignum_header_neg,Eterm,_is_bignum_header);
+ET_DEFINE_CHECKED(Uint,bignum_header_arity,Eterm,_is_bignum_header);
+ET_DEFINE_CHECKED(Eterm*,big_val,Eterm,is_big);
+ET_DEFINE_CHECKED(Eterm*,float_val,Eterm,is_float);
+ET_DEFINE_CHECKED(Eterm*,tuple_val,Eterm,is_tuple);
+ET_DEFINE_CHECKED(Uint,internal_pid_data,Eterm,is_internal_pid);
+ET_DEFINE_CHECKED(struct erl_node_*,internal_pid_node,Eterm,is_internal_pid);
+ET_DEFINE_CHECKED(Uint,internal_port_data,Eterm,is_internal_port);
+ET_DEFINE_CHECKED(struct erl_node_*,internal_port_node,Eterm,is_internal_port);
+ET_DEFINE_CHECKED(Eterm*,internal_ref_val,Eterm,is_internal_ref);
+ET_DEFINE_CHECKED(Uint,internal_ref_data_words,Eterm,is_internal_ref);
+ET_DEFINE_CHECKED(Uint32*,internal_ref_data,Eterm,is_internal_ref);
+ET_DEFINE_CHECKED(struct erl_node_*,internal_ref_node,Eterm,is_internal_ref);
+ET_DEFINE_CHECKED(Eterm*,external_val,Eterm,is_external);
+ET_DEFINE_CHECKED(Uint,external_data_words,Eterm,is_external);
+ET_DEFINE_CHECKED(Uint,external_pid_data_words,Eterm,is_external_pid);
+ET_DEFINE_CHECKED(Uint,external_pid_data,Eterm,is_external_pid);
+ET_DEFINE_CHECKED(struct erl_node_*,external_pid_node,Eterm,is_external_pid);
+ET_DEFINE_CHECKED(Uint,external_port_data_words,Eterm,is_external_port);
+ET_DEFINE_CHECKED(Uint,external_port_data,Eterm,is_external_port);
+ET_DEFINE_CHECKED(struct erl_node_*,external_port_node,Eterm,is_external_port);
+ET_DEFINE_CHECKED(Uint,external_ref_data_words,Eterm,is_external_ref);
+ET_DEFINE_CHECKED(Uint32*,external_ref_data,Eterm,is_external_ref);
+ET_DEFINE_CHECKED(struct erl_node_*,external_ref_node,Eterm,is_external_ref);
+ET_DEFINE_CHECKED(Eterm*,export_val,Eterm,is_export);
+
+ET_DEFINE_CHECKED(Eterm,make_cp,Uint*,_is_aligned);
+ET_DEFINE_CHECKED(Uint*,cp_val,Eterm,is_CP);
+ET_DEFINE_CHECKED(Uint,catch_val,Eterm,is_catch);
+ET_DEFINE_CHECKED(Uint,x_reg_offset,Uint,_is_xreg);
+ET_DEFINE_CHECKED(Uint,y_reg_offset,Uint,_is_yreg);
+ET_DEFINE_CHECKED(Uint,x_reg_index,Uint,_is_xreg);
+ET_DEFINE_CHECKED(Uint,y_reg_index,Uint,_is_yreg);
+
+#endif /* ET_DEBUG */
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
new file mode 100644
index 0000000000..b0a57a3ebe
--- /dev/null
+++ b/erts/emulator/beam/erl_term.h
@@ -0,0 +1,1056 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ERL_TERM_H
+#define __ERL_TERM_H
+
+struct erl_node_; /* Declared in erl_node_tables.h */
+
+/*
+ * Defining ET_DEBUG to 1 causes all type-specific data access
+ * macros to perform runtime type checking. This is very useful
+ * during development but reduces performance, so ET_DEBUG should
+ * be disabled during benchmarking or release.
+ */
+/* #define ET_DEBUG 1 */
+#ifndef ET_DEBUG
+# ifdef DEBUG
+# define ET_DEBUG 1
+# else
+# define ET_DEBUG 0
+# endif
+#endif
+
+#if ET_DEBUG
+#define _ET_DECLARE_CHECKED(TF,F,TX) extern TF checked_##F(TX,const char*,unsigned)
+#define _ET_APPLY(F,X) checked_##F(X,__FILE__,__LINE__)
+#else
+#define _ET_DECLARE_CHECKED(TF,F,TX)
+#define _ET_APPLY(F,X) _unchecked_##F(X)
+#endif
+
+#define _TAG_PRIMARY_SIZE 2
+#define _TAG_PRIMARY_MASK 0x3
+#define TAG_PRIMARY_HEADER 0x0
+#define TAG_PRIMARY_LIST 0x1
+#define TAG_PRIMARY_BOXED 0x2
+#define TAG_PRIMARY_IMMED1 0x3
+
+#define primary_tag(x) ((x) & _TAG_PRIMARY_MASK)
+
+#define _TAG_IMMED1_SIZE 4
+#define _TAG_IMMED1_MASK 0xF
+#define _TAG_IMMED1_PID ((0x0 << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_IMMED1)
+#define _TAG_IMMED1_PORT ((0x1 << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_IMMED1)
+#define _TAG_IMMED1_IMMED2 ((0x2 << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_IMMED1)
+#define _TAG_IMMED1_SMALL ((0x3 << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_IMMED1)
+
+#define _TAG_IMMED2_SIZE 6
+#define _TAG_IMMED2_MASK 0x3F
+#define _TAG_IMMED2_ATOM ((0x0 << _TAG_IMMED1_SIZE) | _TAG_IMMED1_IMMED2)
+#define _TAG_IMMED2_CATCH ((0x1 << _TAG_IMMED1_SIZE) | _TAG_IMMED1_IMMED2)
+#define _TAG_IMMED2_NIL ((0x3 << _TAG_IMMED1_SIZE) | _TAG_IMMED1_IMMED2)
+
+/*
+ * HEADER representation:
+ *
+ * aaaaaaaaaaaaaaaaaaaaaaaaaatttt00 arity:26, tag:4
+ *
+ * HEADER tags:
+ *
+ * 0000 ARITYVAL
+ * 0001 BINARY_AGGREGATE |
+ * 001x BIGNUM with sign bit |
+ * 0100 REF |
+ * 0101 FUN | THINGS
+ * 0110 FLONUM |
+ * 0111 EXPORT |
+ * 1000 REFC_BINARY | |
+ * 1001 HEAP_BINARY | BINARIES |
+ * 1010 SUB_BINARY | |
+ * 1011 Not used
+ * 1100 EXTERNAL_PID | |
+ * 1101 EXTERNAL_PORT | EXTERNAL THINGS |
+ * 1110 EXTERNAL_REF | |
+ * 1111 Not used
+ *
+ * COMMENTS:
+ *
+ * - The tag is zero for arityval and non-zero for thing headers.
+ * - A single bit differentiates between positive and negative bignums.
+ * - If more tags are needed, the REF and and EXTERNAL_REF tags could probably
+ * be combined to one tag.
+ *
+ * XXX: globally replace XXX_SUBTAG with TAG_HEADER_XXX
+ */
+#define ARITYVAL_SUBTAG (0x0 << _TAG_PRIMARY_SIZE) /* TUPLE */
+#define BIN_MATCHSTATE_SUBTAG (0x1 << _TAG_PRIMARY_SIZE)
+#define POS_BIG_SUBTAG (0x2 << _TAG_PRIMARY_SIZE) /* BIG: tags 2&3 */
+#define NEG_BIG_SUBTAG (0x3 << _TAG_PRIMARY_SIZE) /* BIG: tags 2&3 */
+#define _BIG_SIGN_BIT (0x1 << _TAG_PRIMARY_SIZE)
+#define REF_SUBTAG (0x4 << _TAG_PRIMARY_SIZE) /* REF */
+#define FUN_SUBTAG (0x5 << _TAG_PRIMARY_SIZE) /* FUN */
+#define FLOAT_SUBTAG (0x6 << _TAG_PRIMARY_SIZE) /* FLOAT */
+#define EXPORT_SUBTAG (0x7 << _TAG_PRIMARY_SIZE) /* FLOAT */
+#define _BINARY_XXX_MASK (0x3 << _TAG_PRIMARY_SIZE)
+#define REFC_BINARY_SUBTAG (0x8 << _TAG_PRIMARY_SIZE) /* BINARY */
+#define HEAP_BINARY_SUBTAG (0x9 << _TAG_PRIMARY_SIZE) /* BINARY */
+#define SUB_BINARY_SUBTAG (0xA << _TAG_PRIMARY_SIZE) /* BINARY */
+#define EXTERNAL_PID_SUBTAG (0xC << _TAG_PRIMARY_SIZE) /* EXTERNAL_PID */
+#define EXTERNAL_PORT_SUBTAG (0xD << _TAG_PRIMARY_SIZE) /* EXTERNAL_PORT */
+#define EXTERNAL_REF_SUBTAG (0xE << _TAG_PRIMARY_SIZE) /* EXTERNAL_REF */
+
+
+#define _TAG_HEADER_ARITYVAL (TAG_PRIMARY_HEADER|ARITYVAL_SUBTAG)
+#define _TAG_HEADER_FUN (TAG_PRIMARY_HEADER|FUN_SUBTAG)
+#define _TAG_HEADER_POS_BIG (TAG_PRIMARY_HEADER|POS_BIG_SUBTAG)
+#define _TAG_HEADER_NEG_BIG (TAG_PRIMARY_HEADER|NEG_BIG_SUBTAG)
+#define _TAG_HEADER_FLOAT (TAG_PRIMARY_HEADER|FLOAT_SUBTAG)
+#define _TAG_HEADER_EXPORT (TAG_PRIMARY_HEADER|EXPORT_SUBTAG)
+#define _TAG_HEADER_REF (TAG_PRIMARY_HEADER|REF_SUBTAG)
+#define _TAG_HEADER_REFC_BIN (TAG_PRIMARY_HEADER|REFC_BINARY_SUBTAG)
+#define _TAG_HEADER_HEAP_BIN (TAG_PRIMARY_HEADER|HEAP_BINARY_SUBTAG)
+#define _TAG_HEADER_SUB_BIN (TAG_PRIMARY_HEADER|SUB_BINARY_SUBTAG)
+#define _TAG_HEADER_EXTERNAL_PID (TAG_PRIMARY_HEADER|EXTERNAL_PID_SUBTAG)
+#define _TAG_HEADER_EXTERNAL_PORT (TAG_PRIMARY_HEADER|EXTERNAL_PORT_SUBTAG)
+#define _TAG_HEADER_EXTERNAL_REF (TAG_PRIMARY_HEADER|EXTERNAL_REF_SUBTAG)
+#define _TAG_HEADER_BIN_MATCHSTATE (TAG_PRIMARY_HEADER|BIN_MATCHSTATE_SUBTAG)
+
+
+#define _TAG_HEADER_MASK 0x3F
+#define _HEADER_SUBTAG_MASK 0x3C /* 4 bits for subtag */
+#define _HEADER_ARITY_OFFS 6
+
+#define header_is_transparent(x) \
+ (((x) & (_HEADER_SUBTAG_MASK)) == ARITYVAL_SUBTAG)
+#define header_is_arityval(x) (((x) & _HEADER_SUBTAG_MASK) == ARITYVAL_SUBTAG)
+#define header_is_thing(x) (!header_is_transparent((x)))
+#define header_is_bin_matchstate(x) ((((x) & (_HEADER_SUBTAG_MASK)) == BIN_MATCHSTATE_SUBTAG))
+
+#define _CPMASK 0x3
+
+/* immediate object access methods */
+#define is_immed(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_IMMED1)
+#define is_not_immed(x) (!is_immed((x)))
+#define IS_CONST(x) is_immed((x))
+#if TAG_PRIMARY_IMMED1 == _TAG_PRIMARY_MASK
+#define is_both_immed(x,y) is_immed(((x)&(y)))
+#else
+#define is_both_immed(x,y) (is_immed((x)) && is_immed((y)))
+#endif
+#define is_not_both_immed(x,y) (!is_both_immed((x),(y)))
+
+
+/* boxed object access methods */
+#define _is_aligned(x) (((Uint)(x) & 0x3) == 0)
+#define _unchecked_make_boxed(x) ((Uint)(x) + TAG_PRIMARY_BOXED)
+_ET_DECLARE_CHECKED(Eterm,make_boxed,Eterm*);
+#define make_boxed(x) _ET_APPLY(make_boxed,(x))
+#if 1
+#define _is_not_boxed(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_BOXED))
+#define _unchecked_is_boxed(x) (!_is_not_boxed((x)))
+_ET_DECLARE_CHECKED(int,is_boxed,Eterm);
+#define is_boxed(x) _ET_APPLY(is_boxed,(x))
+#else
+#define is_boxed(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_BOXED)
+#endif
+#define _unchecked_boxed_val(x) ((Eterm*)((x) - TAG_PRIMARY_BOXED))
+_ET_DECLARE_CHECKED(Eterm*,boxed_val,Eterm);
+#define boxed_val(x) _ET_APPLY(boxed_val,(x))
+
+/* cons cell ("list") access methods */
+#define _unchecked_make_list(x) ((Uint)(x) + TAG_PRIMARY_LIST)
+_ET_DECLARE_CHECKED(Eterm,make_list,Eterm*);
+#define make_list(x) _ET_APPLY(make_list,(x))
+#if 1
+#define _unchecked_is_not_list(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_LIST))
+_ET_DECLARE_CHECKED(int,is_not_list,Eterm);
+#define is_not_list(x) _ET_APPLY(is_not_list,(x))
+#define is_list(x) (!is_not_list((x)))
+#else
+#define is_list(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_LIST)
+#define is_not_list(x) (!is_list((x)))
+#endif
+#define _unchecked_list_val(x) ((Eterm*)((x) - TAG_PRIMARY_LIST))
+_ET_DECLARE_CHECKED(Eterm*,list_val,Eterm);
+#define list_val(x) _ET_APPLY(list_val,(x))
+
+#define CONS(hp, car, cdr) \
+ (CAR(hp)=(car), CDR(hp)=(cdr), make_list(hp))
+
+#define CAR(x) ((x)[0])
+#define CDR(x) ((x)[1])
+
+/* generic tagged pointer (boxed or list) access methods */
+#define _unchecked_ptr_val(x) ((Eterm*)((x) & ~((Uint) 0x3)))
+#define ptr_val(x) _unchecked_ptr_val((x)) /*XXX*/
+#define _unchecked_offset_ptr(x,offs) ((x)+((offs)*sizeof(Eterm)))
+#define offset_ptr(x,offs) _unchecked_offset_ptr(x,offs) /*XXX*/
+
+/* fixnum ("small") access methods */
+#if defined(ARCH_64)
+#define SMALL_BITS (64-4)
+#define SMALL_DIGITS (17)
+#else
+#define SMALL_BITS (28)
+#define SMALL_DIGITS (8)
+#endif
+#define MAX_SMALL ((1L << (SMALL_BITS-1))-1)
+#define MIN_SMALL (-(1L << (SMALL_BITS-1)))
+#define make_small(x) (((Uint)(x) << _TAG_IMMED1_SIZE) + _TAG_IMMED1_SMALL)
+#define is_small(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_SMALL)
+#define is_not_small(x) (!is_small((x)))
+#define is_byte(x) (((x) & ((~(Uint)0 << (_TAG_IMMED1_SIZE+8)) + _TAG_IMMED1_MASK)) == _TAG_IMMED1_SMALL)
+#define is_valid_bit_size(x) (((Sint)(x)) >= 0 && ((x) & 0x7F) == _TAG_IMMED1_SMALL)
+#define is_not_valid_bit_size(x) (!is_valid_bit_size((x)))
+#define MY_IS_SSMALL(x) (((Uint) (((x) >> (SMALL_BITS-1)) + 1)) < 2)
+#define _unchecked_unsigned_val(x) ((x) >> _TAG_IMMED1_SIZE)
+_ET_DECLARE_CHECKED(Uint,unsigned_val,Eterm);
+#define unsigned_val(x) _ET_APPLY(unsigned_val,(x))
+#define _unchecked_signed_val(x) ((Sint)(x) >> _TAG_IMMED1_SIZE)
+_ET_DECLARE_CHECKED(Sint,signed_val,Eterm);
+#define signed_val(x) _ET_APPLY(signed_val,(x))
+
+#if _TAG_IMMED1_SMALL == 0x0F
+#define is_both_small(x,y) (((x) & (y) & _TAG_IMMED1_MASK) == _TAG_IMMED1_SMALL)
+#elif _TAG_IMMED1_SMALL == 0x00
+#define is_both_small(x,y) ((((x)|(y)) & _TAG_IMMED1_MASK) == _TAG_IMMED1_SMALL)
+#else
+#define is_both_small(x,y) (is_small(x) && is_small(y))
+#endif
+
+/* NIL access methods */
+#define NIL ((~((Uint) 0) << _TAG_IMMED2_SIZE) | _TAG_IMMED2_NIL)
+#define is_nil(x) ((x) == NIL)
+#define is_not_nil(x) ((x) != NIL)
+
+#define MAX_ATOM_INDEX (~(~((Uint) 0) << (sizeof(Uint)*8 - _TAG_IMMED2_SIZE)))
+
+/* atom access methods */
+#define make_atom(x) ((Eterm)(((x) << _TAG_IMMED2_SIZE) + _TAG_IMMED2_ATOM))
+#define is_atom(x) (((x) & _TAG_IMMED2_MASK) == _TAG_IMMED2_ATOM)
+#define is_not_atom(x) (!is_atom(x))
+#define _unchecked_atom_val(x) ((x) >> _TAG_IMMED2_SIZE)
+_ET_DECLARE_CHECKED(Uint,atom_val,Eterm);
+#define atom_val(x) _ET_APPLY(atom_val,(x))
+
+/* header (arityval or thing) access methods */
+#define _make_header(sz,tag) ((Uint)(((sz) << _HEADER_ARITY_OFFS) + (tag)))
+#define is_header(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_HEADER)
+#define _unchecked_header_arity(x) ((x) >> _HEADER_ARITY_OFFS)
+_ET_DECLARE_CHECKED(Uint,header_arity,Eterm);
+#define header_arity(x) _ET_APPLY(header_arity,(x))
+
+/* arityval access methods */
+#define make_arityval(sz) _make_header((sz),_TAG_HEADER_ARITYVAL)
+#define is_arity_value(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL)
+#define is_not_arity_value(x) (!is_arity_value((x)))
+#define _unchecked_arityval(x) _unchecked_header_arity((x))
+_ET_DECLARE_CHECKED(Uint,arityval,Eterm);
+#define arityval(x) _ET_APPLY(arityval,(x))
+
+/* thing access methods */
+#define is_thing(x) (is_header((x)) && header_is_thing((x)))
+#define _unchecked_thing_arityval(x) _unchecked_header_arity((x))
+_ET_DECLARE_CHECKED(Uint,thing_arityval,Eterm);
+#define thing_arityval(x) _ET_APPLY(thing_arityval,(x))
+#define _unchecked_thing_subtag(x) ((x) & _HEADER_SUBTAG_MASK)
+_ET_DECLARE_CHECKED(Uint,thing_subtag,Eterm);
+#define thing_subtag(x) _ET_APPLY(thing_subtag,(x))
+
+/*
+ * Magic non-value object.
+ * Used as function return error and "absent value" indicator
+ * in the original runtime system. The new runtime system also
+ * uses it as forwarding marker for CONS cells.
+ *
+ * This value is 0 in the original runtime system, which unfortunately
+ * promotes sloppy programming practices. It also prevents some useful
+ * tag assignment schemes, e.g. using a 2-bit tag 00 for FIXNUM.
+ *
+ * To help find code which makes unwarranted assumptions about zero,
+ * we now use a non-zero bit-pattern in debug mode.
+ */
+#if ET_DEBUG
+#define THE_NON_VALUE _make_header(0,_TAG_HEADER_FLOAT)
+#else
+#define THE_NON_VALUE (0)
+#endif
+#define is_non_value(x) ((x) == THE_NON_VALUE)
+#define is_value(x) ((x) != THE_NON_VALUE)
+
+/* binary object access methods */
+#define is_binary_header(x) (((x) & (_TAG_HEADER_MASK-_BINARY_XXX_MASK)) == _TAG_HEADER_REFC_BIN)
+#define make_binary(x) make_boxed((Eterm*)(x))
+#define is_binary(x) (is_boxed((x)) && is_binary_header(*boxed_val((x))))
+#define is_not_binary(x) (!is_binary((x)))
+#define _unchecked_binary_val(x) _unchecked_boxed_val((x))
+_ET_DECLARE_CHECKED(Eterm*,binary_val,Eterm);
+#define binary_val(x) _ET_APPLY(binary_val,(x))
+
+/* process binaries stuff (special case of binaries) */
+#define HEADER_PROC_BIN _make_header(PROC_BIN_SIZE-1,_TAG_HEADER_REFC_BIN)
+
+/* fun & export objects */
+#define is_any_fun(x) (is_fun((x)) || is_export((x)))
+#define is_not_any_fun(x) (!is_any_fun((x)))
+
+/* fun objects */
+#define HEADER_FUN _make_header(ERL_FUN_SIZE-2,_TAG_HEADER_FUN)
+#define is_fun_header(x) ((x) == HEADER_FUN)
+#define make_fun(x) make_boxed((Eterm*)(x))
+#define is_fun(x) (is_boxed((x)) && is_fun_header(*boxed_val((x))))
+#define is_not_fun(x) (!is_fun((x)))
+#define _unchecked_fun_val(x) _unchecked_boxed_val((x))
+_ET_DECLARE_CHECKED(Eterm*,fun_val,Eterm);
+#define fun_val(x) _ET_APPLY(fun_val,(x))
+
+/* export access methods */
+#define make_export(x) make_boxed((x))
+#define is_export(x) (is_boxed((x)) && is_export_header(*boxed_val((x))))
+#define is_not_export(x) (!is_export((x)))
+#define _unchecked_export_val(x) _unchecked_boxed_val(x)
+_ET_DECLARE_CHECKED(Eterm*,export_val,Eterm);
+#define export_val(x) _ET_APPLY(export_val,(x))
+#define is_export_header(x) ((x) == HEADER_EXPORT)
+#define HEADER_EXPORT _make_header(1,_TAG_HEADER_EXPORT)
+
+/* bignum access methods */
+#define make_pos_bignum_header(sz) _make_header((sz),_TAG_HEADER_POS_BIG)
+#define make_neg_bignum_header(sz) _make_header((sz),_TAG_HEADER_NEG_BIG)
+#define _is_bignum_header(x) (((x) & (_TAG_HEADER_MASK-_BIG_SIGN_BIT)) == _TAG_HEADER_POS_BIG)
+#define _unchecked_bignum_header_is_neg(x) ((x) & _BIG_SIGN_BIT)
+_ET_DECLARE_CHECKED(int,bignum_header_is_neg,Eterm);
+#define bignum_header_is_neg(x) _ET_APPLY(bignum_header_is_neg,(x))
+#define _unchecked_bignum_header_neg(x) ((x) | _BIG_SIGN_BIT)
+_ET_DECLARE_CHECKED(Eterm,bignum_header_neg,Eterm);
+#define bignum_header_neg(x) _ET_APPLY(bignum_header_neg,(x))
+#define _unchecked_bignum_header_arity(x) _unchecked_header_arity((x))
+_ET_DECLARE_CHECKED(Uint,bignum_header_arity,Eterm);
+#define bignum_header_arity(x) _ET_APPLY(bignum_header_arity,(x))
+#define BIG_ARITY_MAX ((1 << 19)-1)
+#define make_big(x) make_boxed((x))
+#define is_big(x) (is_boxed((x)) && _is_bignum_header(*boxed_val((x))))
+#define is_not_big(x) (!is_big((x)))
+#define _unchecked_big_val(x) _unchecked_boxed_val((x))
+_ET_DECLARE_CHECKED(Eterm*,big_val,Eterm);
+#define big_val(x) _ET_APPLY(big_val,(x))
+
+/* flonum ("float") access methods */
+#ifdef ARCH_64
+#define HEADER_FLONUM _make_header(1,_TAG_HEADER_FLOAT)
+#else
+#define HEADER_FLONUM _make_header(2,_TAG_HEADER_FLOAT)
+#endif
+#define make_float(x) make_boxed((x))
+#define is_float(x) (is_boxed((x)) && *boxed_val((x)) == HEADER_FLONUM)
+#define is_not_float(x) (!is_float(x))
+#define _unchecked_float_val(x) _unchecked_boxed_val((x))
+_ET_DECLARE_CHECKED(Eterm*,float_val,Eterm);
+#define float_val(x) _ET_APPLY(float_val,(x))
+
+/* Float definition for byte and word access */
+typedef double ieee754_8;
+
+typedef union float_def
+{
+ ieee754_8 fd;
+ byte fb[sizeof(ieee754_8)];
+ Uint16 fs[sizeof(ieee754_8) / sizeof(Uint16)];
+ Uint32 fw[sizeof(ieee754_8) / sizeof(Uint32)];
+#ifdef ARCH_64
+ Uint fdw;
+#endif
+} FloatDef;
+
+#ifdef ARCH_64
+#define GET_DOUBLE(x, f) (f).fdw = *(float_val(x)+1)
+
+#define PUT_DOUBLE(f, x) *(x) = HEADER_FLONUM, \
+ *((x)+1) = (f).fdw
+#define GET_DOUBLE_DATA(p, f) (f).fdw = *((Uint *) (p))
+#define PUT_DOUBLE_DATA(f,p) *((Uint *) (p)) = (f).fdw
+#else
+#define GET_DOUBLE(x, f) (f).fw[0] = *(float_val(x)+1), \
+ (f).fw[1] = *(float_val(x)+2)
+
+#define PUT_DOUBLE(f, x) *(x) = HEADER_FLONUM, \
+ *((x)+1) = (f).fw[0], \
+ *((x)+2) = (f).fw[1]
+#define GET_DOUBLE_DATA(p, f) (f).fw[0] = *((Uint *) (p)),\
+ (f).fw[1] = *(((Uint *) (p))+1)
+#define PUT_DOUBLE_DATA(f,p) *((Uint *) (p)) = (f).fw[0],\
+ *(((Uint *) (p))+1) = (f).fw[1]
+#endif
+#define DOUBLE_DATA_WORDS (sizeof(ieee754_8)/sizeof(Eterm))
+#define FLOAT_SIZE_OBJECT (DOUBLE_DATA_WORDS+1)
+
+/* tuple access methods */
+#define make_tuple(x) make_boxed((x))
+#define is_tuple(x) (is_boxed((x)) && is_arity_value(*boxed_val((x))))
+#define is_not_tuple(x) (!is_tuple((x)))
+#define is_tuple_arity(x, a) \
+ (is_boxed((x)) && *boxed_val((x)) == make_arityval((a)))
+#define is_not_tuple_arity(x, a) (!is_tuple_arity((x),(a)))
+#define _unchecked_tuple_val(x) _unchecked_boxed_val(x)
+_ET_DECLARE_CHECKED(Eterm*,tuple_val,Eterm);
+#define tuple_val(x) _ET_APPLY(tuple_val,(x))
+
+#define TUPLE0(t) \
+ ((t)[0] = make_arityval(0), \
+ make_tuple(t))
+#define TUPLE1(t,e1) \
+ ((t)[0] = make_arityval(1), \
+ (t)[1] = (e1), \
+ make_tuple(t))
+#define TUPLE2(t,e1,e2) \
+ ((t)[0] = make_arityval(2), \
+ (t)[1] = (e1), \
+ (t)[2] = (e2), \
+ make_tuple(t))
+#define TUPLE3(t,e1,e2,e3) \
+ ((t)[0] = make_arityval(3), \
+ (t)[1] = (e1), \
+ (t)[2] = (e2), \
+ (t)[3] = (e3), \
+ make_tuple(t))
+#define TUPLE4(t,e1,e2,e3,e4) \
+ ((t)[0] = make_arityval(4), \
+ (t)[1] = (e1), \
+ (t)[2] = (e2), \
+ (t)[3] = (e3), \
+ (t)[4] = (e4), \
+ make_tuple(t))
+#define TUPLE5(t,e1,e2,e3,e4,e5) \
+ ((t)[0] = make_arityval(5), \
+ (t)[1] = (e1), \
+ (t)[2] = (e2), \
+ (t)[3] = (e3), \
+ (t)[4] = (e4), \
+ (t)[5] = (e5), \
+ make_tuple(t))
+#define TUPLE6(t,e1,e2,e3,e4,e5,e6) \
+ ((t)[0] = make_arityval(6), \
+ (t)[1] = (e1), \
+ (t)[2] = (e2), \
+ (t)[3] = (e3), \
+ (t)[4] = (e4), \
+ (t)[5] = (e5), \
+ (t)[6] = (e6), \
+ make_tuple(t))
+
+#define TUPLE7(t,e1,e2,e3,e4,e5,e6,e7) \
+ ((t)[0] = make_arityval(7), \
+ (t)[1] = (e1), \
+ (t)[2] = (e2), \
+ (t)[3] = (e3), \
+ (t)[4] = (e4), \
+ (t)[5] = (e5), \
+ (t)[6] = (e6), \
+ (t)[7] = (e7), \
+ make_tuple(t))
+
+#define TUPLE8(t,e1,e2,e3,e4,e5,e6,e7,e8) \
+ ((t)[0] = make_arityval(8), \
+ (t)[1] = (e1), \
+ (t)[2] = (e2), \
+ (t)[3] = (e3), \
+ (t)[4] = (e4), \
+ (t)[5] = (e5), \
+ (t)[6] = (e6), \
+ (t)[7] = (e7), \
+ (t)[8] = (e8), \
+ make_tuple(t))
+
+/* This macro get Size bits starting at low order position Pos
+ and adjusts the bits to the right
+ bits are numbered from 0 - (sizeof(Uint)*8-1) */
+
+#define _GETBITS(X,Pos,Size) (((X) >> (Pos)) & ~(~((Uint) 0) << (Size)))
+
+/*
+ * Observe! New layout for pids, ports and references in R9 (see also note
+ * in erl_node_container_utils.h).
+ */
+
+
+/*
+ * Creation in node specific data (pids, ports, refs)
+ */
+
+#define _CRE_SIZE 2
+
+/* MAX value for the creation field in pid, port and reference */
+#define MAX_CREATION (1 << _CRE_SIZE)
+
+/*
+ * PID layout (internal pids):
+ *
+ * |3 3 2 2 2 2 2 2|2 2 2 2 1 1 1 1|1 1 1 1 1 1 | |
+ * |1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0|
+ * | | | | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |n n n n n n n n n n n n n n n n n n n n n n n n n n n n|0 0|1 1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * n : number
+ *
+ * Old pid layout:
+ *
+ * |3 3 2 2 2 2 2 2|2 2 2 2 1 1 1 1|1 1 1 1 1 1 | |
+ * |1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0|
+ * | | | | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |s s s|n n n n n n n n n n n n n n n|N N N N N N N N|c c|0 0|1 1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * s : serial
+ * n : number
+ * c : creation
+ * N : node number
+ *
+ */
+
+#define _PID_R9_SER_SIZE 3
+#define _PID_SER_SIZE (_PID_DATA_SIZE - _PID_NUM_SIZE)
+#define _PID_NUM_SIZE 15
+
+#define _PID_DATA_SIZE 28
+#define _PID_DATA_SHIFT (_TAG_IMMED1_SIZE)
+
+#define _GET_PID_DATA(X) _GETBITS((X),_PID_DATA_SHIFT,_PID_DATA_SIZE)
+#define _GET_PID_NUM(X) _GETBITS((X),0,_PID_NUM_SIZE)
+#define _GET_PID_SER(X) _GETBITS((X),_PID_NUM_SIZE,_PID_SER_SIZE)
+
+#define make_pid_data(Ser, Num) \
+ ((Uint) ((Ser) << _PID_NUM_SIZE | (Num)))
+
+#define make_internal_pid(X) \
+ ((Eterm) (((X) << _PID_DATA_SHIFT) | _TAG_IMMED1_PID))
+
+#define is_internal_pid(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_PID)
+#define is_not_internal_pid(x) (!is_internal_pid((x)))
+
+#define _unchecked_internal_pid_data(x) _GET_PID_DATA((x))
+_ET_DECLARE_CHECKED(Uint,internal_pid_data,Eterm);
+#define internal_pid_data(x) _ET_APPLY(internal_pid_data,(x))
+
+#define _unchecked_internal_pid_node(x) erts_this_node
+_ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm);
+#define internal_pid_node(x) _ET_APPLY(internal_pid_node,(x))
+
+#define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x)))
+#define internal_pid_serial(x) _GET_PID_SER(internal_pid_data((x)))
+
+#define internal_pid_data_words(x) (1)
+
+/*
+ * PORT layout (internal ports):
+ *
+ * |3 3 2 2 2 2 2 2|2 2 2 2 1 1 1 1|1 1 1 1 1 1 | |
+ * |1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0|
+ * | | | | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |n n n n n n n n n n n n n n n n n n n n n n n n n n n n|0 1|1 1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * n : number
+ *
+ * Old port layout:
+ *
+ * |3 3 2 2 2 2 2 2|2 2 2 2 1 1 1 1|1 1 1 1 1 1 | |
+ * |1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0|
+ * | | | | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |N N N N N N N N|n n n n n n n n n n n n n n n n n n|c c|0 1|1 1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * s : serial
+ * n : number
+ * c : creation
+ * N : node number
+ *
+ */
+#define _PORT_R9_NUM_SIZE 18
+#define _PORT_NUM_SIZE _PORT_DATA_SIZE
+
+#define _PORT_DATA_SIZE 28
+#define _PORT_DATA_SHIFT (_TAG_IMMED1_SIZE)
+
+#define _GET_PORT_DATA(X) _GETBITS((X),_PORT_DATA_SHIFT,_PORT_DATA_SIZE)
+#define _GET_PORT_NUM(X) _GETBITS((X), 0, _PORT_NUM_SIZE)
+
+
+#define make_internal_port(X) \
+ ((Eterm) (((X) << _PORT_DATA_SHIFT) | _TAG_IMMED1_PORT))
+
+#define is_internal_port(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_PORT)
+#define is_not_internal_port(x) (!is_internal_port(x))
+
+#define _unchecked_internal_port_data(x) _GET_PORT_DATA((x))
+_ET_DECLARE_CHECKED(Uint,internal_port_data,Eterm);
+#define internal_port_data(x) _ET_APPLY(internal_port_data,(x))
+
+#define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x)))
+
+#define _unchecked_internal_port_node(x) erts_this_node
+_ET_DECLARE_CHECKED(struct erl_node_*,internal_port_node,Eterm);
+#define internal_port_node(x) _ET_APPLY(internal_port_node,(x))
+
+#define internal_port_data_words(x) (1)
+/*
+ * Ref layout (internal references):
+ *
+ * |3 3 2 2 2 2 2 2|2 2 2 2 1 1 1 1|1 1 1 1 1 1 | |
+ * |1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0|
+ * | | | | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1|0 1 0 0|0 0| Thing
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0 0 0 0 0 0 0 0 0 0 0 0 0 0|r r r r r r r r r r r r r r r r r r| Data 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r| Data 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r| Data 2
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *
+ * r : reference number
+ * c : creation
+ *
+ *
+ * Old "heap ref" layout:
+ *
+ *
+ * |3 3 2 2 2 2 2 2|2 2 2 2 1 1 1 1|1 1 1 1 1 1 | |
+ * |1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0|
+ * | | | | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1|0 1 0 0|0 0| Thing
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |N N N N N N N N|0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0|c c|0 1 1 1| Head
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |0 0 0 0 0 0 0 0 0 0 0 0 0 0|r r r r r r r r r r r r r r r r r r| Word 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r| Word 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r| Word 2
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * r : reference number
+ * c : creation
+ * N : node index
+ *
+ * Old "one-word ref" layout:
+ *
+ * |3 3 2 2 2 2 2 2|2 2 2 2 1 1 1 1|1 1 1 1 1 1 | |
+ * |1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0|
+ * | | | | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |N N N N N N N N|r r r r r r r r r r r r r r r r r r|c c|T T T T|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * r : reference number
+ * c : creation
+ * N : node index
+ *
+ */
+#define _REF_NUM_SIZE 18
+
+/* Old maximum number of references in the system */
+#define MAX_REFERENCE (1 << _REF_NUM_SIZE)
+#define REF_MASK (~(~((Uint)0) << _REF_NUM_SIZE))
+#define ERTS_MAX_REF_NUMBERS 3
+#define ERTS_REF_NUMBERS ERTS_MAX_REF_NUMBERS
+
+#ifdef ARCH_64
+# define ERTS_REF_WORDS (ERTS_REF_NUMBERS/2 + 1)
+# define ERTS_REF_32BIT_WORDS (ERTS_REF_NUMBERS+1)
+#else
+# define ERTS_REF_WORDS ERTS_REF_NUMBERS
+# define ERTS_REF_32BIT_WORDS ERTS_REF_NUMBERS
+#endif
+
+typedef struct {
+ Eterm header;
+ union {
+ Uint32 ui32[ERTS_REF_32BIT_WORDS];
+ Uint ui[ERTS_REF_WORDS];
+ } data;
+} RefThing;
+
+#define REF_THING_SIZE (sizeof(RefThing)/sizeof(Uint))
+#define REF_THING_HEAD_SIZE (sizeof(Eterm)/sizeof(Uint))
+
+#define make_ref_thing_header(DW) \
+ _make_header((DW)+REF_THING_HEAD_SIZE-1,_TAG_HEADER_REF)
+
+#ifdef ARCH_64
+
+/*
+ * Ref layout on a 64-bit little endian machine:
+ *
+ * 63 31 0
+ * +--------------+--------------+
+ * | Thing word |
+ * +--------------+--------------+
+ * | Data 0 | 32-bit arity |
+ * +--------------+--------------+
+ * | Data 2 | Data 1 |
+ * +--------------+--------------+
+ *
+ * Data is stored as an Uint32 array with 32-bit arity as first number.
+ */
+
+#define write_ref_thing(Hp, R0, R1, R2) \
+do { \
+ ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
+ ((RefThing *) (Hp))->data.ui32[0] = ERTS_REF_NUMBERS; \
+ ((RefThing *) (Hp))->data.ui32[1] = (R0); \
+ ((RefThing *) (Hp))->data.ui32[2] = (R1); \
+ ((RefThing *) (Hp))->data.ui32[3] = (R2); \
+} while (0)
+
+#else
+
+#define write_ref_thing(Hp, R0, R1, R2) \
+do { \
+ ((RefThing *) (Hp))->header = make_ref_thing_header(ERTS_REF_WORDS); \
+ ((RefThing *) (Hp))->data.ui32[0] = (R0); \
+ ((RefThing *) (Hp))->data.ui32[1] = (R1); \
+ ((RefThing *) (Hp))->data.ui32[2] = (R2); \
+} while (0)
+
+#endif
+
+#define is_ref_thing_header(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_REF)
+#define make_internal_ref(x) make_boxed((Eterm*)(x))
+
+#define _unchecked_ref_thing_ptr(x) \
+ ((RefThing*) _unchecked_internal_ref_val(x))
+#define ref_thing_ptr(x) \
+ ((RefThing*) internal_ref_val(x))
+
+#define is_internal_ref(x) \
+ (_unchecked_is_boxed((x)) && is_ref_thing_header(*boxed_val((x))))
+#define is_not_internal_ref(x) \
+ (!is_internal_ref((x)))
+
+#define _unchecked_internal_ref_val(x) _unchecked_boxed_val((x))
+_ET_DECLARE_CHECKED(Eterm*,internal_ref_val,Eterm);
+#define internal_ref_val(x) _ET_APPLY(internal_ref_val,(x))
+
+#define _unchecked_internal_ref_data_words(x) \
+ (_unchecked_thing_arityval(*_unchecked_internal_ref_val(x)))
+_ET_DECLARE_CHECKED(Uint,internal_ref_data_words,Eterm);
+#define internal_ref_data_words(x) _ET_APPLY(internal_ref_data_words,(x))
+
+#define _unchecked_internal_ref_data(x) (_unchecked_ref_thing_ptr(x)->data.ui32)
+_ET_DECLARE_CHECKED(Uint32*,internal_ref_data,Eterm);
+#define internal_ref_data(x) _ET_APPLY(internal_ref_data,(x))
+
+#define _unchecked_internal_ref_node(x) erts_this_node
+_ET_DECLARE_CHECKED(struct erl_node_*,internal_ref_node,Eterm);
+#define internal_ref_node(x) _ET_APPLY(internal_ref_node,(x))
+
+/*
+ *
+ * External thing layout (external pids, ports, and refs):
+ *
+ * |3 3 2 2 2 2 2 2|2 2 2 2 1 1 1 1|1 1 1 1 1 1 | |
+ * |1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0|
+ * | | | | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |A A A A A A A A A A A A A A A A A A A A A A A A A A|t t t t|0 0| Thing
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N| Next
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E E| ErlNode
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X| Data 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * . . .
+ * . . .
+ * . . .
+ *
+ * A : Arity
+ * t : External pid thing tag (1100)
+ * t : External port thing tag (1101)
+ * t : External ref thing tag (1110)
+ * N : Next (external thing) pointer
+ * E : ErlNode pointer
+ * X : Type specific data
+ *
+ * External pid and port layout:
+ * External pids and ports only have one data word (Data 0) which has
+ * the same layout as internal pids resp. internal ports.
+ *
+ * External refs layout:
+ * External refs has the same layout for the data words as in the internal
+ * ref.
+ *
+ */
+
+typedef struct external_thing_ {
+ /* ----+ */
+ Eterm header; /* | */
+ struct external_thing_ *next; /* > External thing head */
+ struct erl_node_ *node; /* | */
+ /* ----+ */
+ union {
+ Uint32 ui32[1];
+ Uint ui[1];
+ } data;
+} ExternalThing;
+
+#define EXTERNAL_THING_HEAD_SIZE (sizeof(ExternalThing)/sizeof(Uint) - 1)
+
+#define make_external_pid_header(DW) \
+ _make_header((DW)+EXTERNAL_THING_HEAD_SIZE-1,_TAG_HEADER_EXTERNAL_PID)
+#define is_external_pid_header(x) \
+ (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_EXTERNAL_PID)
+
+#define make_external_port_header(DW) \
+ _make_header((DW)+EXTERNAL_THING_HEAD_SIZE-1,_TAG_HEADER_EXTERNAL_PORT)
+#define is_external_port_header(x) \
+ (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_EXTERNAL_PORT)
+
+#define make_external_ref_header(DW) \
+ _make_header((DW)+EXTERNAL_THING_HEAD_SIZE-1,_TAG_HEADER_EXTERNAL_REF)
+#define is_external_ref_header(x) \
+ (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_EXTERNAL_REF)
+
+#define is_external_header(x) \
+ (((x) & (_TAG_HEADER_MASK-_BINARY_XXX_MASK)) == _TAG_HEADER_EXTERNAL_PID)
+
+#define is_external(x) \
+ (is_boxed((x)) && is_external_header(*boxed_val((x))))
+#define is_external_pid(x) \
+ (is_boxed((x)) && is_external_pid_header(*boxed_val((x))))
+#define is_external_port(x) \
+ (is_boxed((x)) && is_external_port_header(*boxed_val((x))))
+#define is_external_ref(x) \
+ (_unchecked_is_boxed((x)) && is_external_ref_header(*boxed_val((x))))
+
+#define _unchecked_is_external(x) \
+ (_unchecked_is_boxed((x)) && is_external_header(*_unchecked_boxed_val((x))))
+
+#define is_not_external(x) (!is_external((x)))
+#define is_not_external_pid(x) (!is_external_pid((x)))
+#define is_not_external_port(x) (!is_external_port((x)))
+#define is_not_external_ref(x) (!is_external_ref((x)))
+
+
+#define make_external(x) make_boxed((Eterm *) (x))
+
+#define make_external_pid make_external
+#define make_external_port make_external
+#define make_external_ref make_external
+
+#define _unchecked_external_val(x) _unchecked_boxed_val((x))
+_ET_DECLARE_CHECKED(Eterm*,external_val,Eterm);
+#define external_val(x) _ET_APPLY(external_val,(x))
+
+#define external_thing_ptr(x) ((ExternalThing *) external_val((x)))
+#define _unchecked_external_thing_ptr(x) \
+ ((ExternalThing *) _unchecked_external_val((x)))
+
+#define _unchecked_external_data_words(x) \
+ (_unchecked_thing_arityval(_unchecked_external_thing_ptr((x))->header) \
+ + (1 - EXTERNAL_THING_HEAD_SIZE))
+_ET_DECLARE_CHECKED(Uint,external_data_words,Eterm);
+#define external_data_words(x) _ET_APPLY(external_data_words,(x))
+
+#define _unchecked_external_data(x) (_unchecked_external_thing_ptr((x))->data.ui)
+#define _unchecked_external_node(x) (_unchecked_external_thing_ptr((x))->node)
+
+#define external_data(x) (external_thing_ptr((x))->data.ui)
+#define external_node(x) (external_thing_ptr((x))->node)
+
+#define _unchecked_external_pid_data_words(x) \
+ _unchecked_external_data_words((x))
+_ET_DECLARE_CHECKED(Uint,external_pid_data_words,Eterm);
+#define external_pid_data_words(x) _ET_APPLY(external_pid_data_words,(x))
+
+#define _unchecked_external_pid_data(x) _unchecked_external_data((x))[0]
+_ET_DECLARE_CHECKED(Uint,external_pid_data,Eterm);
+#define external_pid_data(x) _ET_APPLY(external_pid_data,(x))
+
+#define _unchecked_external_pid_node(x) _unchecked_external_node((x))
+_ET_DECLARE_CHECKED(struct erl_node_*,external_pid_node,Eterm);
+#define external_pid_node(x) _ET_APPLY(external_pid_node,(x))
+
+#define external_pid_number(x) _GET_PID_NUM(external_pid_data((x)))
+#define external_pid_serial(x) _GET_PID_SER(external_pid_data((x)))
+
+#define _unchecked_external_port_data_words(x) \
+ _unchecked_external_data_words((x))
+_ET_DECLARE_CHECKED(Uint,external_port_data_words,Eterm);
+#define external_port_data_words(x) _ET_APPLY(external_port_data_words,(x))
+
+#define _unchecked_external_port_data(x) _unchecked_external_data((x))[0]
+_ET_DECLARE_CHECKED(Uint,external_port_data,Eterm);
+#define external_port_data(x) _ET_APPLY(external_port_data,(x))
+
+#define _unchecked_external_port_node(x) _unchecked_external_node((x))
+_ET_DECLARE_CHECKED(struct erl_node_*,external_port_node,Eterm);
+#define external_port_node(x) _ET_APPLY(external_port_node,(x))
+
+#define external_port_number(x) _GET_PORT_NUM(external_port_data((x)))
+
+#define _unchecked_external_ref_data_words(x) \
+ _unchecked_external_data_words((x))
+_ET_DECLARE_CHECKED(Uint,external_ref_data_words,Eterm);
+#define external_ref_data_words(x) _ET_APPLY(external_ref_data_words,(x))
+
+#define _unchecked_external_ref_data(x) (_unchecked_external_thing_ptr((x))->data.ui32)
+_ET_DECLARE_CHECKED(Uint32*,external_ref_data,Eterm);
+#define external_ref_data(x) _ET_APPLY(external_ref_data,(x))
+
+#define _unchecked_external_ref_node(x) _unchecked_external_node((x))
+_ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm);
+#define external_ref_node(x) _ET_APPLY(external_ref_node,(x))
+
+/* number tests */
+
+#define is_integer(x) (is_small(x) || is_big(x))
+#define is_not_integer(x) (!is_integer(x))
+#define is_number(x) (is_integer(x) || is_float(x))
+
+#define SMALL_MINUS_ONE make_small(-1)
+#define SMALL_ZERO make_small(0)
+#define SMALL_ONE make_small(1)
+
+#define ENULL 0
+
+/* on some architectures CP contains labels which are not aligned */
+#ifdef NOT_ALIGNED
+#error "fix yer arch, like"
+#endif
+
+#define _unchecked_make_cp(x) ((Eterm)(x))
+_ET_DECLARE_CHECKED(Eterm,make_cp,Uint*);
+#define make_cp(x) _ET_APPLY(make_cp,(x))
+
+#define is_not_CP(x) ((x) & _CPMASK)
+#define is_CP(x) (!is_not_CP(x))
+
+#define _unchecked_cp_val(x) ((Uint*)(x))
+_ET_DECLARE_CHECKED(Uint*,cp_val,Eterm);
+#define cp_val(x) _ET_APPLY(cp_val,(x))
+
+#define make_catch(x) (((x) << _TAG_IMMED2_SIZE) | _TAG_IMMED2_CATCH)
+#define is_catch(x) (((x) & _TAG_IMMED2_MASK) == _TAG_IMMED2_CATCH)
+#define is_not_catch(x) (!is_catch(x))
+#define _unchecked_catch_val(x) ((x) >> _TAG_IMMED2_SIZE)
+_ET_DECLARE_CHECKED(Uint,catch_val,Eterm);
+#define catch_val(x) _ET_APPLY(catch_val,(x))
+
+#define make_blank(X) ((X) = NIL)
+
+/*
+ * Overloaded tags.
+ *
+ * SMALL = 15
+ * ATOM/NIL=7
+ *
+ * Note that the two least significant bits in SMALL/ATOM/NIL always are 3;
+ * thus, we can distinguish register from literals by looking at only these
+ * two bits.
+ */
+
+#define X_REG_DEF 0
+#define Y_REG_DEF 1
+#define R_REG_DEF 2
+
+#define beam_reg_tag(x) ((x) & 3)
+
+#define make_rreg() R_REG_DEF
+#define make_xreg(ix) (((ix) * sizeof(Eterm)) | X_REG_DEF)
+#define make_yreg(ix) (((ix) * sizeof(Eterm)) | Y_REG_DEF)
+
+#define _is_xreg(x) (beam_reg_tag(x) == X_REG_DEF)
+#define _is_yreg(x) (beam_reg_tag(x) == Y_REG_DEF)
+
+#define _unchecked_x_reg_offset(R) ((R) - X_REG_DEF)
+_ET_DECLARE_CHECKED(Uint,x_reg_offset,Uint);
+#define x_reg_offset(R) _ET_APPLY(x_reg_offset,(R))
+
+#define _unchecked_y_reg_offset(R) ((R) - Y_REG_DEF)
+_ET_DECLARE_CHECKED(Uint,y_reg_offset,Uint);
+#define y_reg_offset(R) _ET_APPLY(y_reg_offset,(R))
+
+#define reg_index(R) ((R) / sizeof(Eterm))
+
+#define _unchecked_x_reg_index(R) ((R) >> 2)
+_ET_DECLARE_CHECKED(Uint,x_reg_index,Uint);
+#define x_reg_index(R) _ET_APPLY(x_reg_index,(R))
+
+#define _unchecked_y_reg_index(R) ((R) >> 2)
+_ET_DECLARE_CHECKED(Uint,y_reg_index,Uint);
+#define y_reg_index(R) _ET_APPLY(y_reg_index,(R))
+
+/*
+ * Backwards compatibility definitions:
+ * - #define virtal *_DEF constants with values that fit term order:
+ * number < atom < ref < fun < port < pid < tuple < nil < cons < binary
+ * - tag_val_def() function generates virtual _DEF tag
+ * - not_eq_tags() and NUMBER_CODE() defined in terms
+ * of the tag_val_def() function
+ */
+
+#define BINARY_DEF 0x0
+#define LIST_DEF 0x1
+#define NIL_DEF 0x2
+#define TUPLE_DEF 0x3
+#define PID_DEF 0x4
+#define EXTERNAL_PID_DEF 0x5
+#define PORT_DEF 0x6
+#define EXTERNAL_PORT_DEF 0x7
+#define EXPORT_DEF 0x8
+#define FUN_DEF 0x9
+#define REF_DEF 0xa
+#define EXTERNAL_REF_DEF 0xb
+#define ATOM_DEF 0xc
+#define FLOAT_DEF 0xd
+#define BIG_DEF 0xe
+#define SMALL_DEF 0xf
+
+#if ET_DEBUG
+extern unsigned tag_val_def_debug(Eterm, const char*, unsigned);
+#define tag_val_def(x) tag_val_def_debug((x),__FILE__,__LINE__)
+#else
+extern unsigned tag_val_def(Eterm);
+#endif
+#define not_eq_tags(X,Y) (tag_val_def((X)) ^ tag_val_def((Y)))
+
+#define NUMBER_CODE(x,y) ((tag_val_def(x) << 4) | tag_val_def(y))
+#define _NUMBER_CODE(TX,TY) ((TX << 4) | TY)
+#define SMALL_SMALL _NUMBER_CODE(SMALL_DEF,SMALL_DEF)
+#define SMALL_BIG _NUMBER_CODE(SMALL_DEF,BIG_DEF)
+#define SMALL_FLOAT _NUMBER_CODE(SMALL_DEF,FLOAT_DEF)
+#define BIG_SMALL _NUMBER_CODE(BIG_DEF,SMALL_DEF)
+#define BIG_BIG _NUMBER_CODE(BIG_DEF,BIG_DEF)
+#define BIG_FLOAT _NUMBER_CODE(BIG_DEF,FLOAT_DEF)
+#define FLOAT_SMALL _NUMBER_CODE(FLOAT_DEF,SMALL_DEF)
+#define FLOAT_BIG _NUMBER_CODE(FLOAT_DEF,BIG_DEF)
+#define FLOAT_FLOAT _NUMBER_CODE(FLOAT_DEF,FLOAT_DEF)
+
+#endif /* __ERL_TERM_H */
+
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
new file mode 100644
index 0000000000..d635916dd8
--- /dev/null
+++ b/erts/emulator/beam/erl_threads.h
@@ -0,0 +1,1524 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2001-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Description: Error checking thread interface to the ethread library.
+ * All functions terminates the emulator on failure.
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_THREAD_H__
+#define ERL_THREAD_H__
+
+#include "sys.h"
+#ifdef USE_THREADS
+
+#define ETHR_TRY_INLINE_FUNCS
+#include "ethread.h"
+#include "erl_lock_check.h"
+#include "erl_lock_count.h"
+#include "erl_term.h"
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+#define erts_mtx_lock(L) erts_mtx_lock_x(L, __FILE__, __LINE__)
+#define erts_spin_lock(L) erts_spin_lock_x(L, __FILE__, __LINE__)
+#define erts_rwmtx_rlock(L) erts_rwmtx_rlock_x(L, __FILE__, __LINE__)
+#define erts_rwmtx_rwlock(L) erts_rwmtx_rwlock_x(L, __FILE__, __LINE__)
+#define erts_read_lock(L) erts_read_lock_x(L, __FILE__, __LINE__)
+#define erts_write_lock(L) erts_write_lock_x(L, __FILE__, __LINE__)
+#endif
+
+#define ERTS_THR_OPTS_DEFAULT_INITER ETHR_THR_OPTS_DEFAULT_INITER
+typedef ethr_thr_opts erts_thr_opts_t;
+typedef ethr_init_data erts_thr_init_data_t;
+typedef ethr_tid erts_tid_t;
+
+/* mutex */
+typedef struct {
+ ethr_mutex mtx;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock_t lc;
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_t lcnt;
+#endif
+
+} erts_mtx_t;
+typedef ethr_cond erts_cnd_t;
+
+/* rwmutex */
+typedef struct {
+ ethr_rwmutex rwmtx;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock_t lc;
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_t lcnt;
+#endif
+} erts_rwmtx_t;
+typedef ethr_tsd_key erts_tsd_key_t;
+typedef ethr_gate erts_gate_t;
+typedef ethr_atomic_t erts_atomic_t;
+
+/* spinlock */
+typedef struct {
+ ethr_spinlock_t slck;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock_t lc;
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_t lcnt;
+#endif
+} erts_spinlock_t;
+
+/* rwlock */
+typedef struct {
+ ethr_rwlock_t rwlck;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock_t lc;
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_t lcnt;
+#endif
+} erts_rwlock_t;
+
+typedef ethr_timeval erts_thr_timeval_t;
+__decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
+ /* implemented in erl_init.c */
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+#define ERTS_REC_MTX_INITER \
+ {ETHR_REC_MUTEX_INITER, \
+ ERTS_LC_LOCK_INIT(-1,THE_NON_VALUE,ERTS_LC_FLG_LT_MUTEX)}
+#define ERTS_MTX_INITER \
+ {ETHR_MUTEX_INITER, \
+ ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_MUTEX)}
+#else
+#define ERTS_REC_MTX_INITER {ETHR_REC_MUTEX_INITER}
+#define ERTS_MTX_INITER {ETHR_MUTEX_INITER}
+#endif
+#define ERTS_CND_INITER ETHR_COND_INITER
+#define ERTS_THR_INIT_DATA_DEF_INITER ETHR_INIT_DATA_DEFAULT_INITER
+
+#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+# define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT
+#endif
+
+
+#else /* #ifdef USE_THREADS */
+
+#define ERTS_THR_OPTS_DEFAULT_INITER 0
+typedef int erts_thr_opts_t;
+typedef int erts_thr_init_data_t;
+typedef int erts_tid_t;
+typedef int erts_mtx_t;
+typedef int erts_cnd_t;
+typedef int erts_rwmtx_t;
+typedef int erts_tsd_key_t;
+typedef int erts_gate_t;
+typedef long erts_atomic_t;
+#if __GNUC__ > 2
+typedef struct { } erts_spinlock_t;
+typedef struct { } erts_rwlock_t;
+#else
+typedef struct { int gcc_is_buggy; } erts_spinlock_t;
+typedef struct { int gcc_is_buggy; } erts_rwlock_t;
+#endif
+typedef struct {
+ long tv_sec;
+ long tv_nsec;
+} erts_thr_timeval_t;
+
+#define ERTS_REC_MTX_INITER 0
+#define ERTS_MTX_INITER 0
+#define ERTS_CND_INITER 0
+#define ERTS_THR_INIT_DATA_DEF_INITER 0
+
+#define ERTS_HAVE_REC_MTX_INIT 1
+
+#endif /* #ifdef USE_THREADS */
+
+ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id);
+ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *),
+ void *arg, erts_thr_opts_t *opts);
+ERTS_GLB_INLINE void erts_thr_join(erts_tid_t tid, void **thr_res);
+ERTS_GLB_INLINE void erts_thr_detach(erts_tid_t tid);
+ERTS_GLB_INLINE void erts_thr_exit(void *res);
+ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
+ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
+ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
+#ifdef ERTS_HAVE_REC_MTX_INIT
+ERTS_GLB_INLINE void erts_rec_mtx_init(erts_mtx_t *mtx);
+#endif
+ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
+ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
+ERTS_GLB_INLINE void erts_mtx_init_locked_x(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra);
+ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
+ERTS_GLB_INLINE void erts_mtx_set_forksafe(erts_mtx_t *mtx);
+ERTS_GLB_INLINE void erts_mtx_unset_forksafe(erts_mtx_t *mtx);
+ERTS_GLB_INLINE int erts_mtx_trylock(erts_mtx_t *mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line);
+#else
+ERTS_GLB_INLINE void erts_mtx_lock(erts_mtx_t *mtx);
+#endif
+ERTS_GLB_INLINE void erts_mtx_unlock(erts_mtx_t *mtx);
+ERTS_GLB_INLINE int erts_lc_mtx_is_locked(erts_mtx_t *mtx);
+ERTS_GLB_INLINE void erts_cnd_init(erts_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_cnd_destroy(erts_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
+ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
+ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra);
+ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
+ char *name);
+ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
+ERTS_GLB_INLINE void erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
+#else
+ERTS_GLB_INLINE void erts_rwmtx_rlock(erts_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE void erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx);
+#endif
+ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
+ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
+ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
+ERTS_GLB_INLINE void erts_atomic_init(erts_atomic_t *var, long i);
+ERTS_GLB_INLINE void erts_atomic_set(erts_atomic_t *var, long i);
+ERTS_GLB_INLINE long erts_atomic_read(erts_atomic_t *var);
+ERTS_GLB_INLINE long erts_atomic_inctest(erts_atomic_t *incp);
+ERTS_GLB_INLINE long erts_atomic_dectest(erts_atomic_t *decp);
+ERTS_GLB_INLINE void erts_atomic_inc(erts_atomic_t *incp);
+ERTS_GLB_INLINE void erts_atomic_dec(erts_atomic_t *decp);
+ERTS_GLB_INLINE long erts_atomic_addtest(erts_atomic_t *addp,
+ long i);
+ERTS_GLB_INLINE void erts_atomic_add(erts_atomic_t *addp, long i);
+ERTS_GLB_INLINE long erts_atomic_xchg(erts_atomic_t *xchgp,
+ long new);
+ERTS_GLB_INLINE long erts_atomic_cmpxchg(erts_atomic_t *xchgp,
+ long new,
+ long expected);
+ERTS_GLB_INLINE long erts_atomic_bor(erts_atomic_t *var, long mask);
+ERTS_GLB_INLINE long erts_atomic_band(erts_atomic_t *var, long mask);
+ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
+ char *name,
+ Eterm extra);
+ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
+ char *name);
+ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock);
+ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line);
+#else
+ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock);
+#endif
+ERTS_GLB_INLINE int erts_lc_spinlock_is_locked(erts_spinlock_t *lock);
+ERTS_GLB_INLINE void erts_rwlock_init_x(erts_rwlock_t *lock,
+ char *name,
+ Eterm extra);
+ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock,
+ char *name);
+ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock);
+ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ERTS_GLB_INLINE void erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line);
+ERTS_GLB_INLINE void erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line);
+#else
+ERTS_GLB_INLINE void erts_read_lock(erts_rwlock_t *lock);
+ERTS_GLB_INLINE void erts_write_lock(erts_rwlock_t *lock);
+#endif
+ERTS_GLB_INLINE void erts_write_unlock(erts_rwlock_t *lock);
+ERTS_GLB_INLINE int erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock);
+ERTS_GLB_INLINE int erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock);
+ERTS_GLB_INLINE void erts_thr_time_now(erts_thr_timeval_t *time);
+ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
+ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
+ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
+ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key);
+ERTS_GLB_INLINE void erts_gate_init(erts_gate_t *gp);
+ERTS_GLB_INLINE void erts_gate_destroy(erts_gate_t *gp);
+ERTS_GLB_INLINE void erts_gate_close(erts_gate_t *gp);
+ERTS_GLB_INLINE void erts_gate_let_through(erts_gate_t *gp, unsigned no);
+ERTS_GLB_INLINE void erts_gate_wait(erts_gate_t *gp);
+ERTS_GLB_INLINE void erts_gate_swait(erts_gate_t *gp, int spincount);
+
+#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
+#define ERTS_THR_HAVE_SIG_FUNCS 1
+ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set,
+ sigset_t *oset);
+ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
+#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_thr_init(erts_thr_init_data_t *id)
+{
+#ifdef USE_THREADS
+ int res = ethr_init(id);
+ if (res)
+ erts_thr_fatal_error(res, "initialize thread library");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
+ erts_thr_opts_t *opts)
+{
+#ifdef USE_THREADS
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ int res = erts_lcnt_thr_create(tid, func, arg, opts);
+#else
+ int res = ethr_thr_create(tid, func, arg, opts);
+#endif
+ if (res)
+ erts_thr_fatal_error(res, "create thread");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_thr_join(erts_tid_t tid, void **thr_res)
+{
+#ifdef USE_THREADS
+ int res = ethr_thr_join(tid, thr_res);
+ if (res)
+ erts_thr_fatal_error(res, "join thread");
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_thr_detach(erts_tid_t tid)
+{
+#ifdef USE_THREADS
+ int res = ethr_thr_detach(tid);
+ if (res)
+ erts_thr_fatal_error(res, "detach thread");
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_thr_exit(void *res)
+{
+#ifdef USE_THREADS
+ ethr_thr_exit(res);
+ erts_thr_fatal_error(0, "terminate thread");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_thr_install_exit_handler(void (*exit_handler)(void))
+{
+#ifdef USE_THREADS
+ int res = ethr_install_exit_handler(exit_handler);
+ if (res != 0)
+ erts_thr_fatal_error(res, "install thread exit handler");
+#endif
+}
+
+ERTS_GLB_INLINE erts_tid_t
+erts_thr_self(void)
+{
+#ifdef USE_THREADS
+ return ethr_self();
+#else
+ return 0;
+#endif
+}
+
+
+ERTS_GLB_INLINE int
+erts_equal_tids(erts_tid_t x, erts_tid_t y)
+{
+#ifdef USE_THREADS
+ return ethr_equal_tids(x, y);
+#else
+ return 1;
+#endif
+}
+
+
+#ifdef ERTS_HAVE_REC_MTX_INIT
+ERTS_GLB_INLINE void
+erts_rec_mtx_init(erts_mtx_t *mtx)
+{
+#ifdef USE_THREADS
+ int res = ethr_rec_mutex_init(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "initialize recursive mutex");
+#endif
+}
+#endif
+
+
+ERTS_GLB_INLINE void
+erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_mutex_init(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "initialize mutex");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+#endif
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
+{
+#ifdef USE_THREADS
+ int res = ethr_mutex_init(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "initialize mutex");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
+#endif
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_mtx_init_locked_x(erts_mtx_t *mtx, char *name, Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_mutex_init(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "initialize mutex");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
+#endif
+ res = ethr_mutex_lock(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "lock mutex");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &mtx->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&mtx->lcnt, 1);
+#endif
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_mtx_init(erts_mtx_t *mtx, char *name)
+{
+#ifdef USE_THREADS
+ int res = ethr_mutex_init(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "initialize mutex");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
+#endif
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
+{
+#ifdef USE_THREADS
+ int res = ethr_mutex_init(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "initialize mutex");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
+#endif
+ res = ethr_mutex_lock(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "lock mutex");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &mtx->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&mtx->lcnt, 1);
+#endif
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_mtx_destroy(erts_mtx_t *mtx)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_destroy_lock(&mtx->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_destroy_lock(&mtx->lcnt);
+#endif
+ res = ethr_mutex_destroy(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "destroy mutex");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_mtx_set_forksafe(erts_mtx_t *mtx)
+{
+#ifdef USE_THREADS
+ int res = ethr_mutex_set_forksafe(&mtx->mtx);
+ if (res != 0 && res != ENOTSUP)
+ erts_thr_fatal_error(res, "set mutex forksafe");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_mtx_unset_forksafe(erts_mtx_t *mtx)
+{
+#ifdef USE_THREADS
+ int res = ethr_mutex_unset_forksafe(&mtx->mtx);
+ if (res != 0 && res != ENOTSUP)
+ erts_thr_fatal_error(res, "unset mutex forksafe");
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_mtx_trylock(erts_mtx_t *mtx)
+{
+#ifdef USE_THREADS
+ int res;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (erts_lc_trylock_force_busy(&mtx->lc))
+ return EBUSY; /* Make sure caller can handle the situation without
+ causing a lock order violation */
+#endif
+
+ res = ethr_mutex_trylock(&mtx->mtx);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(res == 0, &mtx->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&mtx->lcnt, res);
+#endif
+
+ if (res != 0 && res != EBUSY)
+ erts_thr_fatal_error(res, "try lock mutex");
+
+ return res;
+#else
+ return 0;
+#endif
+
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line)
+#else
+erts_mtx_lock(erts_mtx_t *mtx)
+#endif
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock(&mtx->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&mtx->lcnt);
+#endif
+ res = ethr_mutex_lock(&mtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post_x(&mtx->lcnt, file, line);
+#endif
+ if (res)
+ erts_thr_fatal_error(res, "lock mutex");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_mtx_unlock(erts_mtx_t *mtx)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_unlock(&mtx->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&mtx->lcnt);
+#endif
+ res = ethr_mutex_unlock(&mtx->mtx);
+ if (res)
+ erts_thr_fatal_error(res, "unlock mutex");
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_lc_mtx_is_locked(erts_mtx_t *mtx)
+{
+#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+ int res;
+ erts_lc_lock_t lc = mtx->lc;
+ lc.flags = 0;
+ erts_lc_have_locks(&res, &lc, 1);
+ return res;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_cnd_init(erts_cnd_t *cnd)
+{
+#ifdef USE_THREADS
+ int res = ethr_cond_init(cnd);
+ if (res)
+ erts_thr_fatal_error(res, "initialize condition variable");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_cnd_destroy(erts_cnd_t *cnd)
+{
+#ifdef USE_THREADS
+ int res = ethr_cond_destroy(cnd);
+ if (res)
+ erts_thr_fatal_error(res, "destroy condition variable");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_unlock(&mtx->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&mtx->lcnt);
+#endif
+ res = ethr_cond_wait(cnd, &mtx->mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock(&mtx->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&mtx->lcnt);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post(&mtx->lcnt);
+#endif
+ if (res != 0 && res != EINTR)
+ erts_thr_fatal_error(res, "wait on condition variable");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_cnd_signal(erts_cnd_t *cnd)
+{
+#ifdef USE_THREADS
+ int res = ethr_cond_signal(cnd);
+ if (res)
+ erts_thr_fatal_error(res, "signal on condition variable");
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_cnd_broadcast(erts_cnd_t *cnd)
+{
+#ifdef USE_THREADS
+ int res = ethr_cond_broadcast(cnd);
+ if (res)
+ erts_thr_fatal_error(res, "broadcast on condition variable");
+#endif
+}
+
+/* rwmutex */
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init_x(erts_rwmtx_t *rwmtx, char *name, Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ if (res != 0)
+ erts_thr_fatal_error(res, "initialize rwmutex");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
+#endif
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
+{
+#ifdef USE_THREADS
+ int res = ethr_rwmutex_init(&rwmtx->rwmtx);
+ if (res != 0)
+ erts_thr_fatal_error(res, "initialize rwmutex");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX);
+#endif
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_destroy_lock(&rwmtx->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_destroy_lock(&rwmtx->lcnt);
+#endif
+ res = ethr_rwmutex_destroy(&rwmtx->rwmtx);
+ if (res != 0)
+ erts_thr_fatal_error(res, "destroy rwmutex");
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
+{
+#ifdef USE_THREADS
+ int res;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ))
+ return EBUSY; /* Make sure caller can handle the situation without
+ causing a lock order violation */
+#endif
+
+ res = ethr_rwmutex_tryrlock(&rwmtx->rwmtx);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
+#endif
+
+ if (res != 0 && res != EBUSY)
+ erts_thr_fatal_error(res, "try read lock rwmutex");
+
+ return res;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
+erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
+#endif
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+#endif
+ res = ethr_rwmutex_rlock(&rwmtx->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
+#endif
+ if (res != 0)
+ erts_thr_fatal_error(res, "read lock rwmutex");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+#endif
+ res = ethr_rwmutex_runlock(&rwmtx->rwmtx);
+ if (res != 0)
+ erts_thr_fatal_error(res, "read unlock rwmutex");
+#endif
+}
+
+
+ERTS_GLB_INLINE int
+erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
+{
+#ifdef USE_THREADS
+ int res;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE))
+ return EBUSY; /* Make sure caller can handle the situation without
+ causing a lock order violation */
+#endif
+
+ res = ethr_rwmutex_tryrwlock(&rwmtx->rwmtx);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
+#endif
+
+ if (res != 0 && res != EBUSY)
+ erts_thr_fatal_error(res, "try write lock rwmutex");
+
+ return res;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
+#else
+erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
+#endif
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+#endif
+ res = ethr_rwmutex_rwlock(&rwmtx->rwmtx);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
+#endif
+ if (res != 0)
+ erts_thr_fatal_error(res, "write lock rwmutex");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+#endif
+ res = ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
+ if (res != 0)
+ erts_thr_fatal_error(res, "write unlock rwmutex");
+#endif
+}
+
+#if 0 /* The following rwmtx function names are
+ reserved for potential future use. */
+
+/* Try upgrade from r-locked state to rw-locked state */
+ERTS_GLB_INLINE int
+erts_rwmtx_trywlock(erts_rwmtx_t *rwmtx)
+{
+ return 0;
+}
+
+/* Upgrade from r-locked state to rw-locked state */
+ERTS_GLB_INLINE void
+erts_rwmtx_wlock(erts_rwmtx_t *rwmtx)
+{
+
+}
+
+/* Downgrade from rw-locked state to r-locked state */
+ERTS_GLB_INLINE void
+erts_rwmtx_wunlock(erts_rwmtx_t *rwmtx)
+{
+
+}
+
+#endif
+
+ERTS_GLB_INLINE int
+erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx)
+{
+#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+ int res;
+ erts_lc_lock_t lc = mtx->lc;
+ lc.flags = ERTS_LC_FLG_LO_READ;
+ erts_lc_have_locks(&res, &lc, 1);
+ return res;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
+{
+#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+ int res;
+ erts_lc_lock_t lc = mtx->lc;
+ lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ erts_lc_have_locks(&res, &lc, 1);
+ return res;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_init(erts_atomic_t *var, long i)
+{
+#ifdef USE_THREADS
+ int res = ethr_atomic_init(var, i);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic init");
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_set(erts_atomic_t *var, long i)
+{
+#ifdef USE_THREADS
+ int res = ethr_atomic_set(var, i);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic set");
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_read(erts_atomic_t *var)
+{
+#ifdef USE_THREADS
+ long i;
+ int res = ethr_atomic_read(var, &i);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic read");
+ return i;
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_inctest(erts_atomic_t *incp)
+{
+#ifdef USE_THREADS
+ long test;
+ int res = ethr_atomic_inctest(incp, &test);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic increment and test");
+ return test;
+#else
+ return ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_dectest(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ long test;
+ int res = ethr_atomic_dectest(decp, &test);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic decrement and test");
+ return test;
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_inc(erts_atomic_t *incp)
+{
+#ifdef USE_THREADS
+ int res = ethr_atomic_inc(incp);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic increment");
+#else
+ ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_dec(erts_atomic_t *decp)
+{
+#ifdef USE_THREADS
+ int res = ethr_atomic_dec(decp);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic decrement");
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_addtest(erts_atomic_t *addp, long i)
+{
+#ifdef USE_THREADS
+ long test;
+ int res = ethr_atomic_addtest(addp, i, &test);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic addition and test");
+ return test;
+#else
+ return *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic_add(erts_atomic_t *addp, long i)
+{
+#ifdef USE_THREADS
+ int res = ethr_atomic_add(addp, i);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic addition");
+#else
+ *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_xchg(erts_atomic_t *xchgp, long new)
+{
+ long old;
+#ifdef USE_THREADS
+ int res = ethr_atomic_xchg(xchgp, new, &old);
+ if (res)
+ erts_thr_fatal_error(res, "perform atomic exchange");
+#else
+ old = *xchgp;
+ *xchgp = new;
+#endif
+ return old;
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
+{
+#ifdef USE_THREADS
+ long old;
+ int res = ethr_atomic_cmpxchg(xchgp, new, expected, &old);
+ if (ERTS_UNLIKELY(res != 0))
+ erts_thr_fatal_error(res, "perform atomic exchange");
+ return old;
+#else
+ long old = *xchgp;
+ if (old == expected)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_bor(erts_atomic_t *var, long mask)
+{
+ long old;
+#ifdef USE_THREADS
+ int res = ethr_atomic_or_old(var, mask, &old);
+ if (res != 0)
+ erts_thr_fatal_error(res, "perform atomic bitwise or");
+#else
+ old = *var;
+ *var |= mask;
+#endif
+ return old;
+}
+
+ERTS_GLB_INLINE long
+erts_atomic_band(erts_atomic_t *var, long mask)
+{
+ long old;
+#ifdef USE_THREADS
+ int res = ethr_atomic_and_old(var, mask, &old);
+ if (res != 0)
+ erts_thr_fatal_error(res, "perform atomic bitwise and");
+#else
+ old = *var;
+ *var &= mask;
+#endif
+ return old;
+}
+
+/* spinlock */
+
+ERTS_GLB_INLINE void
+erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_spinlock_init(&lock->slck);
+ if (res)
+ erts_thr_fatal_error(res, "init spinlock");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK, extra);
+#endif
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_spinlock_init(erts_spinlock_t *lock, char *name)
+{
+#ifdef USE_THREADS
+ int res = ethr_spinlock_init(&lock->slck);
+ if (res)
+ erts_thr_fatal_error(res, "init spinlock");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK);
+#endif
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_spinlock_destroy(erts_spinlock_t *lock)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_destroy_lock(&lock->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_destroy_lock(&lock->lcnt);
+#endif
+ res = ethr_spinlock_destroy(&lock->slck);
+ if (res)
+ erts_thr_fatal_error(res, "destroy spinlock");
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_spin_unlock(erts_spinlock_t *lock)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_unlock(&lock->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock(&lock->lcnt);
+#endif
+ res = ethr_spin_unlock(&lock->slck);
+ if (res)
+ erts_thr_fatal_error(res, "release spin lock");
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line)
+#else
+erts_spin_lock(erts_spinlock_t *lock)
+#endif
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock(&lock->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock(&lock->lcnt);
+#endif
+ res = ethr_spin_lock(&lock->slck);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post_x(&lock->lcnt, file, line);
+#endif
+ if (res)
+ erts_thr_fatal_error(res, "take spin lock");
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
+{
+#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+ int res;
+ erts_lc_lock_t lc = lock->lc;
+ lc.flags = 0;
+ erts_lc_have_locks(&res, &lc, 1);
+ return res;
+#else
+ return 0;
+#endif
+}
+
+/* rwspinlock */
+
+ERTS_GLB_INLINE void
+erts_rwlock_init_x(erts_rwlock_t *lock, char *name, Eterm extra)
+{
+#ifdef USE_THREADS
+ int res = ethr_rwlock_init(&lock->rwlck);
+ if (res)
+ erts_thr_fatal_error(res, "init rwlock");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK, extra);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK, extra);
+#endif
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwlock_init(erts_rwlock_t *lock, char *name)
+{
+#ifdef USE_THREADS
+ int res = ethr_rwlock_init(&lock->rwlck);
+ if (res)
+ erts_thr_fatal_error(res, "init rwlock");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK);
+#endif
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_rwlock_destroy(erts_rwlock_t *lock)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_destroy_lock(&lock->lc);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_destroy_lock(&lock->lcnt);
+#endif
+ res = ethr_rwlock_destroy(&lock->rwlck);
+ if (res)
+ erts_thr_fatal_error(res, "destroy rwlock");
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_read_unlock(erts_rwlock_t *lock)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+#endif
+ res = ethr_read_unlock(&lock->rwlck);
+ if (res)
+ erts_thr_fatal_error(res, "release read lock");
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
+#else
+erts_read_lock(erts_rwlock_t *lock)
+#endif
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+#endif
+ res = ethr_read_lock(&lock->rwlck);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post_x(&lock->lcnt, file, line);
+#endif
+ if (res)
+ erts_thr_fatal_error(res, "take read lock");
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_write_unlock(erts_rwlock_t *lock)
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+#endif
+ res = ethr_write_unlock(&lock->rwlck);
+ if (res)
+ erts_thr_fatal_error(res, "release write lock");
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE void
+#ifdef ERTS_ENABLE_LOCK_COUNT
+erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
+#else
+erts_write_lock(erts_rwlock_t *lock)
+#endif
+{
+#ifdef USE_THREADS
+ int res;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+#endif
+ res = ethr_write_lock(&lock->rwlck);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_lock_post_x(&lock->lcnt, file, line);
+#endif
+ if (res)
+ erts_thr_fatal_error(res, "take write lock");
+#else
+ (void)lock;
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock)
+{
+#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+ int res;
+ erts_lc_lock_t lc = lock->lc;
+ lc.flags = ERTS_LC_FLG_LO_READ;
+ erts_lc_have_locks(&res, &lc, 1);
+ return res;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
+{
+#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
+ int res;
+ erts_lc_lock_t lc = lock->lc;
+ lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ erts_lc_have_locks(&res, &lc, 1);
+ return res;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_thr_time_now(erts_thr_timeval_t *time)
+{
+#ifdef USE_THREADS
+ int res = ethr_time_now(time);
+ if (res)
+ erts_thr_fatal_error(res, "get current time");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_tsd_key_create(erts_tsd_key_t *keyp)
+{
+#ifdef USE_THREADS
+ int res = ethr_tsd_key_create(keyp);
+ if (res)
+ erts_thr_fatal_error(res, "create thread specific data key");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_tsd_key_delete(erts_tsd_key_t key)
+{
+#ifdef USE_THREADS
+ int res = ethr_tsd_key_delete(key);
+ if (res)
+ erts_thr_fatal_error(res, "delete thread specific data key");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_tsd_set(erts_tsd_key_t key, void *value)
+{
+#ifdef USE_THREADS
+ int res = ethr_tsd_set(key, value);
+ if (res)
+ erts_thr_fatal_error(res, "set thread specific data");
+#endif
+}
+
+ERTS_GLB_INLINE void *
+erts_tsd_get(erts_tsd_key_t key)
+{
+#ifdef USE_THREADS
+ return ethr_tsd_get(key);
+#else
+ return NULL;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_gate_init(erts_gate_t *gp)
+{
+#ifdef USE_THREADS
+ int res = ethr_gate_init((ethr_gate *) gp);
+ if (res != 0)
+ erts_thr_fatal_error(res, "initialize gate");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_gate_destroy(erts_gate_t *gp)
+{
+#ifdef USE_THREADS
+ int res = ethr_gate_destroy((ethr_gate *) gp);
+ if (res != 0)
+ erts_thr_fatal_error(res, "destroy gate");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_gate_close(erts_gate_t *gp)
+{
+#ifdef USE_THREADS
+ int res = ethr_gate_close((ethr_gate *) gp);
+ if (res != 0)
+ erts_thr_fatal_error(res, "close gate");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_gate_let_through(erts_gate_t *gp, unsigned no)
+{
+#ifdef USE_THREADS
+ int res = ethr_gate_let_through((ethr_gate *) gp, no);
+ if (res != 0)
+ erts_thr_fatal_error(res, "let through gate");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_gate_wait(erts_gate_t *gp)
+{
+#ifdef USE_THREADS
+ int res = ethr_gate_wait((ethr_gate *) gp);
+ if (res != 0)
+ erts_thr_fatal_error(res, "wait on gate");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_gate_swait(erts_gate_t *gp, int spincount)
+{
+#ifdef USE_THREADS
+ int res = ethr_gate_swait((ethr_gate *) gp, spincount);
+ if (res != 0)
+ erts_thr_fatal_error(res, "swait on gate");
+#endif
+}
+
+#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
+
+ERTS_GLB_INLINE void
+erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+#ifdef USE_THREADS
+ int res = ethr_sigmask(how, set, oset);
+ if (res)
+ erts_thr_fatal_error(res, "get or set signal mask");
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_thr_sigwait(const sigset_t *set, int *sig)
+{
+#ifdef USE_THREADS
+ int res;
+ do {
+ res = ethr_sigwait(set, sig);
+ } while (res == EINTR);
+ if (res)
+ erts_thr_fatal_error(res, "to wait for signal");
+#endif
+}
+
+#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* #ifndef ERL_THREAD_H__ */
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
new file mode 100644
index 0000000000..6f6b971d34
--- /dev/null
+++ b/erts/emulator/beam/erl_time.h
@@ -0,0 +1,67 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_TIME_H__
+#define ERL_TIME_H__
+
+/*
+** Timer entry:
+*/
+typedef struct erl_timer {
+ struct erl_timer* next; /* next entry tiw slot or chain */
+ Uint slot; /* slot in timer wheel */
+ Uint count; /* number of loops remaining */
+ int active; /* 1=activated, 0=deactivated */
+ /* called when timeout */
+ void (*timeout)(void*);
+ /* called when cancel (may be NULL) */
+ void (*cancel)(void*);
+ void* arg; /* argument to timeout/cancel procs */
+} ErlTimer;
+
+typedef void (*ErlTimeoutProc)(void*);
+typedef void (*ErlCancelProc)(void*);
+
+#ifdef ERTS_SMP
+
+/*
+ * Process and port timer
+ */
+typedef union ErtsSmpPTimer_ ErtsSmpPTimer;
+union ErtsSmpPTimer_ {
+ struct {
+ ErlTimer tm;
+ Eterm id;
+ void (*timeout_func)(void*);
+ ErtsSmpPTimer **timer_ref;
+ Uint32 flags;
+ } timer;
+ ErtsSmpPTimer *next;
+};
+
+
+void erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
+ Eterm id,
+ ErlTimeoutProc timeout_func,
+ Uint timeout);
+void erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer);
+
+#endif
+
+#endif
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
new file mode 100644
index 0000000000..76bfdecd9f
--- /dev/null
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -0,0 +1,899 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** Support routines for the timer wheel
+**
+** This code contains two strategies for dealing with
+** date/time changes in the system.
+** If the system has some kind of high resolution timer (HAVE_GETHRTIME),
+** the high resolution timer is used to correct the time-of-day and the
+** timeouts, the base source is the hrtimer, but at certain intervals the
+** OS time-of-day is checked and if it is not within certain bounds, the
+** delivered time gets slowly adjusted for each call until
+** it corresponds to the system time (built-in adjtime...).
+** The call gethrtime() is detected by autoconf on Unix, but other
+** platforms may define it in erl_*_sys.h and implement
+** their own high resolution timer. The high resolution timer
+** strategy is (probably) best on all systems where the timer have
+** a resolution higher or equal to gettimeofday (or what's implemented
+** is sys_gettimeofday()). The actual resolution is the interesting thing,
+** not the unit's thats used (i.e. on VxWorks, nanoseconds can be
+** retrieved in terms of units, but the actual resolution is the same as
+** for the clock ticks).
+** If the systems best timer routine is kernel ticks returned from
+** sys_times(), and the actual resolution of sys_gettimeofday() is
+** better (like most unixes that does not have any realtime extensions),
+** another strategy is used. The tolerant gettimeofday() corrects
+** the value with respect to uptime (sys_times() return value) and checks
+** for correction both when delivering timeticks and delivering nowtime.
+** this strategy is slower, but accurate on systems without better timer
+** routines. The kernel tick resolution is not enough to implement
+** a gethrtime routine. On Linux and other non solaris unix-boxes the second
+** strategy is used, on all other platforms we use the first.
+**
+** The following is expected (from sys.[ch] and erl_*_sys.h):
+**
+** 64 bit integers. So it is, and so it will be.
+**
+** sys_init_time(), will return the clock resolution in MS and
+** that's about it. More could be added of course
+** If the clock-rate is constant (i.e. 1 ms) one can define
+** SYS_CLOCK_RESOLUTION (to 1),
+** which makes erts_deliver_time/erts_time_remaining a bit faster.
+**
+** if HAVE_GETHRTIME is defined:
+** sys_gethrtime() will return a SysHrTime (long long) representing
+** nanoseconds, sys_init_hrtime() will do any initialization.
+** else
+** a long (64bit) integer type called Sint64 should be defined.
+**
+** sys_times() will return clock_ticks since start and
+** fill in a SysTimes structure (struct tms). Instead of CLK_TCK,
+** SYS_CLK_TCK is used to determine the resolution of kernel ticks.
+**
+** sys_gettimeofday() will take a SysTimeval (a struct timeval) as parameter
+** and fill it in as gettimeofday(X,NULL).
+**
+*/
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+
+static erts_smp_mtx_t erts_timeofday_mtx;
+
+static SysTimeval inittv; /* Used everywhere, the initial time-of-day */
+
+static SysTimes t_start; /* Used in elapsed_time_both */
+static SysTimeval gtv; /* Used in wall_clock_elapsed_time_both */
+static SysTimeval then; /* Used in get_now */
+static SysTimeval last_emu_time; /* Used in erts_get_emu_time() */
+SysTimeval erts_first_emu_time; /* Used in erts_get_emu_time() */
+
+
+#ifdef HAVE_GETHRTIME
+
+int erts_disable_tolerant_timeofday;
+
+static SysHrTime hr_init_time, hr_last_correction_check,
+ hr_correction, hr_last_time;
+
+static void init_tolerant_timeofday(void)
+{
+ /* Should be in sys.c */
+#if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_CONF)
+ if (sysconf(_SC_NPROCESSORS_CONF) > 1) {
+ char b[1024];
+ int maj,min,build;
+ os_flavor(b,1024);
+ os_version(&maj,&min,&build);
+ if (!strcmp(b,"sunos") && maj <= 5 && min <= 7) {
+ erts_disable_tolerant_timeofday = 1;
+ }
+ }
+#endif
+ hr_init_time = sys_gethrtime();
+ hr_last_correction_check = hr_last_time = hr_init_time;
+ hr_correction = 0;
+}
+
+static void get_tolerant_timeofday(SysTimeval *tv)
+{
+ SysHrTime diff_time, curr;
+
+ if (erts_disable_tolerant_timeofday) {
+ sys_gettimeofday(tv);
+ return;
+ }
+ *tv = inittv;
+ diff_time = ((curr = sys_gethrtime()) + hr_correction - hr_init_time) / 1000;
+
+ if (curr < hr_init_time) {
+ erl_exit(1,"Unexpected behaviour from operating system high "
+ "resolution timer");
+ }
+
+ if ((curr - hr_last_correction_check) / 1000 > 1000000) {
+ /* Check the correction need */
+ SysHrTime tv_diff, diffdiff;
+ SysTimeval tmp;
+ int done = 0;
+
+ sys_gettimeofday(&tmp);
+ tv_diff = ((SysHrTime) tmp.tv_sec) * 1000000 + tmp.tv_usec;
+ tv_diff -= ((SysHrTime) inittv.tv_sec) * 1000000 + inittv.tv_usec;
+ diffdiff = diff_time - tv_diff;
+ if (diffdiff > 10000) {
+ SysHrTime corr = (curr - hr_last_time) / 100;
+ if (corr / 1000 >= diffdiff) {
+ ++done;
+ hr_correction -= ((SysHrTime)diffdiff) * 1000;
+ } else {
+ hr_correction -= corr;
+ }
+ diff_time = (curr + hr_correction - hr_init_time) / 1000;
+ } else if (diffdiff < -10000) {
+ SysHrTime corr = (curr - hr_last_time) / 100;
+ if (corr / 1000 >= -diffdiff) {
+ ++done;
+ hr_correction -= ((SysHrTime)diffdiff) * 1000;
+ } else {
+ hr_correction += corr;
+ }
+ diff_time = (curr + hr_correction - hr_init_time) / 1000;
+ } else {
+ ++done;
+ }
+ if (done) {
+ hr_last_correction_check = curr;
+ }
+ }
+ tv->tv_sec += (int) (diff_time / ((SysHrTime) 1000000));
+ tv->tv_usec += (int) (diff_time % ((SysHrTime) 1000000));
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec += 1;
+ }
+ hr_last_time = curr;
+}
+
+#define correction (hr_correction/1000000)
+
+#else /* !HAVE_GETHRTIME */
+#if !defined(CORRECT_USING_TIMES)
+#define init_tolerant_timeofday()
+#define get_tolerant_timeofday(tvp) sys_gettimeofday(tvp)
+#else
+
+typedef Sint64 Milli;
+
+static clock_t init_ct;
+static Sint64 ct_wrap;
+static Milli init_tv_m;
+static Milli correction_supress;
+static Milli last_ct_diff;
+static Milli last_cc;
+static clock_t last_ct;
+
+/* sys_times() might need to be wrapped and the values shifted (right)
+ a bit to cope with newer linux (2.5.*) kernels, this has to be taken care
+ of dynamically to start with, a special version that uses
+ the times() return value as a high resolution timer can be made
+ to fully utilize the faster ticks, like on windows, but for now, we'll
+ settle with this silly workaround */
+#ifdef ERTS_WRAP_SYS_TIMES
+#define KERNEL_TICKS() (sys_times_wrap() & \
+ ((1UL << ((sizeof(clock_t) * 8) - 1)) - 1))
+#else
+SysTimes dummy_tms;
+
+#define KERNEL_TICKS() (sys_times(&dummy_tms) & \
+ ((1UL << ((sizeof(clock_t) * 8) - 1)) - 1))
+
+#endif
+
+static void init_tolerant_timeofday(void)
+{
+ last_ct = init_ct = KERNEL_TICKS();
+ last_cc = 0;
+ init_tv_m = (((Milli) inittv.tv_sec) * 1000) +
+ (inittv.tv_usec / 1000);
+ ct_wrap = 0;
+ correction_supress = 0;
+}
+
+
+static void get_tolerant_timeofday(SysTimeval *tvp)
+{
+ clock_t current_ct;
+ SysTimeval current_tv;
+ Milli ct_diff;
+ Milli tv_diff;
+ Milli current_correction;
+ Milli act_correction; /* long shown to be too small */
+ Milli max_adjust;
+
+ if (erts_disable_tolerant_timeofday) {
+ sys_gettimeofday(tvp);
+ return;
+ }
+
+#ifdef ERTS_WRAP_SYS_TIMES
+#define TICK_MS (1000 / SYS_CLK_TCK_WRAP)
+#else
+#define TICK_MS (1000 / SYS_CLK_TCK)
+#endif
+ current_ct = KERNEL_TICKS();
+ sys_gettimeofday(&current_tv);
+
+ /* I dont know if uptime can move some units backwards
+ on some systems, but I allow for small backward
+ jumps to avoid such problems if they exist...*/
+ if (last_ct > 100 && current_ct < (last_ct - 100)) {
+ ct_wrap += ((Sint64) 1) << ((sizeof(clock_t) * 8) - 1);
+ }
+ last_ct = current_ct;
+ ct_diff = ((ct_wrap + current_ct) - init_ct) * TICK_MS;
+
+ /*
+ * We will adjust the time in milliseconds and we allow for 1%
+ * adjustments, but if this function is called more often then every 100
+ * millisecond (which is obviously possible), we will never adjust, so
+ * we accumulate small times by setting last_ct_diff iff max_adjust > 0
+ */
+ if ((max_adjust = (ct_diff - last_ct_diff)/100) > 0)
+ last_ct_diff = ct_diff;
+
+ tv_diff = ((((Milli) current_tv.tv_sec) * 1000) +
+ (current_tv.tv_usec / 1000)) - init_tv_m;
+
+ current_correction = ((ct_diff - tv_diff) / TICK_MS) * TICK_MS; /* trunc */
+
+ /*
+ * We allow the current_correction value to wobble a little, as it
+ * suffers from the low resolution of the kernel ticks.
+ * if it hasn't changed more than one tick in either direction,
+ * we will keep the old value.
+ */
+ if ((last_cc > current_correction + TICK_MS) ||
+ (last_cc < current_correction - TICK_MS)) {
+ last_cc = current_correction;
+ } else {
+ current_correction = last_cc;
+ }
+
+ /*
+ * As time goes, we try to get the actual correction to 0,
+ * that is, make erlangs time correspond to the systems dito.
+ * The act correction is what we seem to need (current_correction)
+ * minus the correction suppression. The correction supression
+ * will change slowly (max 1% of elapsed time) but in millisecond steps.
+ */
+ act_correction = current_correction - correction_supress;
+ if (max_adjust > 0) {
+ /*
+ * Here we slowly adjust erlangs time to correspond with the
+ * system time by changing the correction_supress variable.
+ * It can change max_adjust milliseconds which is 1% of elapsed time
+ */
+ if (act_correction > 0) {
+ if (current_correction - correction_supress > max_adjust) {
+ correction_supress += max_adjust;
+ } else {
+ correction_supress = current_correction;
+ }
+ act_correction = current_correction - correction_supress;
+ } else if (act_correction < 0) {
+ if (correction_supress - current_correction > max_adjust) {
+ correction_supress -= max_adjust;
+ } else {
+ correction_supress = current_correction;
+ }
+ act_correction = current_correction - correction_supress;
+ }
+ }
+ /*
+ * The actual correction will correct the timeval so that system
+ * time warps gets smothed down.
+ */
+ current_tv.tv_sec += act_correction / 1000;
+ current_tv.tv_usec += (act_correction % 1000) * 1000;
+
+ if (current_tv.tv_usec >= 1000000) {
+ ++current_tv.tv_sec ;
+ current_tv.tv_usec -= 1000000;
+ } else if (current_tv.tv_usec < 0) {
+ --current_tv.tv_sec;
+ current_tv.tv_usec += 1000000;
+ }
+ *tvp = current_tv;
+#undef TICK_MS
+}
+
+#endif /* CORRECT_USING_TIMES */
+#endif /* !HAVE_GETHRTIME */
+
+/*
+** Why this? Well, most platforms have a constant clock resolution of 1,
+** we dont want the deliver_time/time_remaining routines to waste
+** time dividing and multiplying by/with a variable that's always one.
+** so the return value of sys_init_time is ignored on those platforms.
+*/
+
+#ifndef SYS_CLOCK_RESOLUTION
+static int clock_resolution;
+#define CLOCK_RESOLUTION clock_resolution
+#else
+#define CLOCK_RESOLUTION SYS_CLOCK_RESOLUTION
+#endif
+
+/*
+** The clock resolution should really be the resolution of the
+** time function in use, which on most platforms
+** is 1. On VxWorks the resolution shold be
+** the number of ticks per second (or 1, which would work nicely to).
+**
+** Setting lower resolutions is mostly interesting when timers are used
+** instead of something like select.
+*/
+
+#if defined(ERTS_TIMER_THREAD)
+static ERTS_INLINE void init_erts_deliver_time(const SysTimeval *inittv) { }
+static ERTS_INLINE void do_erts_deliver_time(const SysTimeval *current) { }
+#else
+static SysTimeval last_delivered;
+
+static void init_erts_deliver_time(const SysTimeval *inittv)
+{
+ /* We set the initial values for deliver_time here */
+ last_delivered = *inittv;
+ last_delivered.tv_usec = 1000 * (last_delivered.tv_usec / 1000);
+ /* ms resolution */
+}
+
+static void do_erts_deliver_time(const SysTimeval *current)
+{
+ SysTimeval cur_time;
+ long elapsed;
+
+ /* calculate and deliver appropriate number of ticks */
+ cur_time = *current;
+ cur_time.tv_usec = 1000 * (cur_time.tv_usec / 1000); /* ms resolution */
+ elapsed = (1000 * (cur_time.tv_sec - last_delivered.tv_sec) +
+ (cur_time.tv_usec - last_delivered.tv_usec) / 1000) /
+ CLOCK_RESOLUTION;
+
+ /* Sometimes the time jump backwards,
+ resulting in a negative elapsed time. We compensate for
+ this by simply pretend as if the time stood still. :) */
+
+ if (elapsed > 0) {
+ do_time_add(elapsed);
+ last_delivered = cur_time;
+ }
+}
+#endif
+
+int
+erts_init_time_sup(void)
+{
+ erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday");
+
+ last_emu_time.tv_sec = 0;
+ last_emu_time.tv_usec = 0;
+
+#ifndef SYS_CLOCK_RESOLUTION
+ clock_resolution = sys_init_time();
+#else
+ (void) sys_init_time();
+#endif
+ sys_gettimeofday(&inittv);
+
+#ifdef HAVE_GETHRTIME
+ sys_init_hrtime();
+#endif
+ init_tolerant_timeofday();
+
+ init_erts_deliver_time(&inittv);
+ gtv = inittv;
+ then.tv_sec = then.tv_usec = 0;
+
+ erts_get_emu_time(&erts_first_emu_time);
+
+ return CLOCK_RESOLUTION;
+}
+/* info functions */
+
+void
+elapsed_time_both(unsigned long *ms_user, unsigned long *ms_sys,
+ unsigned long *ms_user_diff, unsigned long *ms_sys_diff)
+{
+ unsigned long prev_total_user, prev_total_sys;
+ unsigned long total_user, total_sys;
+ SysTimes now;
+
+ sys_times(&now);
+ total_user = (now.tms_utime * 1000) / SYS_CLK_TCK;
+ total_sys = (now.tms_stime * 1000) / SYS_CLK_TCK;
+
+ if (ms_user != NULL)
+ *ms_user = total_user;
+ if (ms_sys != NULL)
+ *ms_sys = total_sys;
+
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ prev_total_user = (t_start.tms_utime * 1000) / SYS_CLK_TCK;
+ prev_total_sys = (t_start.tms_stime * 1000) / SYS_CLK_TCK;
+ t_start = now;
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+
+ if (ms_user_diff != NULL)
+ *ms_user_diff = total_user - prev_total_user;
+
+ if (ms_sys_diff != NULL)
+ *ms_sys_diff = total_sys - prev_total_sys;
+}
+
+
+/* wall clock routines */
+
+void
+wall_clock_elapsed_time_both(unsigned long *ms_total, unsigned long *ms_diff)
+{
+ unsigned long prev_total;
+ SysTimeval tv;
+
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ get_tolerant_timeofday(&tv);
+
+ *ms_total = 1000 * (tv.tv_sec - inittv.tv_sec) +
+ (tv.tv_usec - inittv.tv_usec) / 1000;
+
+ prev_total = 1000 * (gtv.tv_sec - inittv.tv_sec) +
+ (gtv.tv_usec - inittv.tv_usec) / 1000;
+ *ms_diff = *ms_total - prev_total;
+ gtv = tv;
+
+ /* must sync the machine's idea of time here */
+ do_erts_deliver_time(&tv);
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+}
+
+/* get current time */
+void
+get_time(int *hour, int *minute, int *second)
+{
+ time_t the_clock;
+ struct tm *tm;
+#ifdef HAVE_LOCALTIME_R
+ struct tm tmbuf;
+#endif
+
+ the_clock = time((time_t *)0);
+#ifdef HAVE_LOCALTIME_R
+ localtime_r(&the_clock, (tm = &tmbuf));
+#else
+ tm = localtime(&the_clock);
+#endif
+ *hour = tm->tm_hour;
+ *minute = tm->tm_min;
+ *second = tm->tm_sec;
+}
+
+/* get current date */
+void
+get_date(int *year, int *month, int *day)
+{
+ time_t the_clock;
+ struct tm *tm;
+#ifdef HAVE_LOCALTIME_R
+ struct tm tmbuf;
+#endif
+
+
+ the_clock = time((time_t *)0);
+#ifdef HAVE_LOCALTIME_R
+ localtime_r(&the_clock, (tm = &tmbuf));
+#else
+ tm = localtime(&the_clock);
+#endif
+ *year = tm->tm_year + 1900;
+ *month = tm->tm_mon +1;
+ *day = tm->tm_mday;
+}
+
+/* get localtime */
+void
+get_localtime(int *year, int *month, int *day,
+ int *hour, int *minute, int *second)
+{
+ time_t the_clock;
+ struct tm *tm;
+#ifdef HAVE_LOCALTIME_R
+ struct tm tmbuf;
+#endif
+
+ the_clock = time((time_t *)0);
+#ifdef HAVE_LOCALTIME_R
+ localtime_r(&the_clock, (tm = &tmbuf));
+#else
+ tm = localtime(&the_clock);
+#endif
+ *year = tm->tm_year + 1900;
+ *month = tm->tm_mon +1;
+ *day = tm->tm_mday;
+ *hour = tm->tm_hour;
+ *minute = tm->tm_min;
+ *second = tm->tm_sec;
+}
+
+
+/* get universaltime */
+void
+get_universaltime(int *year, int *month, int *day,
+ int *hour, int *minute, int *second)
+{
+ time_t the_clock;
+ struct tm *tm;
+#ifdef HAVE_GMTIME_R
+ struct tm tmbuf;
+#endif
+
+ the_clock = time((time_t *)0);
+#ifdef HAVE_GMTIME_R
+ gmtime_r(&the_clock, (tm = &tmbuf));
+#else
+ tm = gmtime(&the_clock);
+#endif
+ *year = tm->tm_year + 1900;
+ *month = tm->tm_mon +1;
+ *day = tm->tm_mday;
+ *hour = tm->tm_hour;
+ *minute = tm->tm_min;
+ *second = tm->tm_sec;
+}
+
+
+/* days in month = 1, 2, ..., 12 */
+static const int mdays[14] = {0, 31, 28, 31, 30, 31, 30,
+ 31, 31, 30, 31, 30, 31};
+
+#define IN_RANGE(a,x,b) (((a) <= (x)) && ((x) <= (b)))
+#define is_leap_year(y) (((((y) % 4) == 0) && \
+ (((y) % 100) != 0)) || \
+ (((y) % 400) == 0))
+
+#define BASEYEAR 1970
+
+/*
+ * gregday
+ *
+ * Returns the number of days since Jan 1, 1600, if year is
+ * greater of equal to 1600 , and month [1-12] and day [1-31]
+ * are within range. Otherwise it returns -1.
+ */
+static int long gregday(int year, int month, int day)
+{
+ int long ndays = 0;
+ int gyear, pyear, m;
+
+ /* number of days in previous years */
+ gyear = year - 1600;
+ if (gyear > 0) {
+ pyear = gyear - 1;
+ ndays = (pyear/4) - (pyear/100) + (pyear/400) + pyear*365 + 366;
+ }
+ /* number of days in all months preceeding month */
+ for (m = 1; m < month; m++)
+ ndays += mdays[m];
+ /* Extra day if leap year and March or later */
+ if (is_leap_year(year) && (month > 2))
+ ndays++;
+ ndays += day - 1;
+ return ndays - 135140; /* 135140 = Jan 1, 1970 */
+}
+
+
+
+int
+local_to_univ(Sint *year, Sint *month, Sint *day,
+ Sint *hour, Sint *minute, Sint *second, int isdst)
+{
+ time_t the_clock;
+ struct tm *tm, t;
+#ifdef HAVE_GMTIME_R
+ struct tm tmbuf;
+#endif
+
+ if (!(IN_RANGE(BASEYEAR, *year, INT_MAX - 1) &&
+ IN_RANGE(1, *month, 12) &&
+ IN_RANGE(1, *day, (mdays[*month] +
+ (*month == 2
+ && (*year % 4 == 0)
+ && (*year % 100 != 0 || *year % 400 == 0)))) &&
+ IN_RANGE(0, *hour, 23) &&
+ IN_RANGE(0, *minute, 59) &&
+ IN_RANGE(0, *second, 59))) {
+ return 0;
+ }
+
+ t.tm_year = *year - 1900;
+ t.tm_mon = *month - 1;
+ t.tm_mday = *day;
+ t.tm_hour = *hour;
+ t.tm_min = *minute;
+ t.tm_sec = *second;
+ t.tm_isdst = isdst;
+ the_clock = mktime(&t);
+#ifdef HAVE_GMTIME_R
+ gmtime_r(&the_clock, (tm = &tmbuf));
+#else
+ tm = gmtime(&the_clock);
+#endif
+ *year = tm->tm_year + 1900;
+ *month = tm->tm_mon +1;
+ *day = tm->tm_mday;
+ *hour = tm->tm_hour;
+ *minute = tm->tm_min;
+ *second = tm->tm_sec;
+ return 1;
+}
+
+int
+univ_to_local(Sint *year, Sint *month, Sint *day,
+ Sint *hour, Sint *minute, Sint *second)
+{
+ time_t the_clock;
+ struct tm *tm;
+#ifdef HAVE_LOCALTIME_R
+ struct tm tmbuf;
+#endif
+
+ if (!(IN_RANGE(BASEYEAR, *year, INT_MAX - 1) &&
+ IN_RANGE(1, *month, 12) &&
+ IN_RANGE(1, *day, (mdays[*month] +
+ (*month == 2
+ && (*year % 4 == 0)
+ && (*year % 100 != 0 || *year % 400 == 0)))) &&
+ IN_RANGE(0, *hour, 23) &&
+ IN_RANGE(0, *minute, 59) &&
+ IN_RANGE(0, *second, 59))) {
+ return 0;
+ }
+
+ the_clock = *second + 60 * (*minute + 60 * (*hour + 24 *
+ gregday(*year, *month, *day)));
+#ifdef HAVE_POSIX2TIME
+ /*
+ * Addition from OpenSource - affects FreeBSD.
+ * No valid test case /PaN
+ *
+ * leap-second correction performed
+ * if system is configured so;
+ * do nothing if not
+ * See FreeBSD 6.x and 7.x
+ * /usr/src/lib/libc/stdtime/localtime.c
+ * for the details
+ */
+ the_clock = posix2time(the_clock);
+#endif
+
+#ifdef HAVE_LOCALTIME_R
+ localtime_r(&the_clock, (tm = &tmbuf));
+#else
+ tm = localtime(&the_clock);
+#endif
+ *year = tm->tm_year + 1900;
+ *month = tm->tm_mon +1;
+ *day = tm->tm_mday;
+ *hour = tm->tm_hour;
+ *minute = tm->tm_min;
+ *second = tm->tm_sec;
+ return 1;
+}
+
+
+/* get a timestamp */
+void
+get_now(Uint* megasec, Uint* sec, Uint* microsec)
+{
+ SysTimeval now;
+
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ get_tolerant_timeofday(&now);
+ do_erts_deliver_time(&now);
+
+ /* Make sure time is later than last */
+ if (then.tv_sec > now.tv_sec ||
+ (then.tv_sec == now.tv_sec && then.tv_usec >= now.tv_usec)) {
+ now = then;
+ now.tv_usec++;
+ }
+ /* Check for carry from above + general reasonability */
+ if (now.tv_usec >= 1000000) {
+ now.tv_usec = 0;
+ now.tv_sec++;
+ }
+ then = now;
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+
+ *megasec = (Uint) (now.tv_sec / 1000000);
+ *sec = (Uint) (now.tv_sec % 1000000);
+ *microsec = (Uint) (now.tv_usec);
+}
+
+void
+get_sys_now(Uint* megasec, Uint* sec, Uint* microsec)
+{
+ SysTimeval now;
+
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ sys_gettimeofday(&now);
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+
+ *megasec = (Uint) (now.tv_sec / 1000000);
+ *sec = (Uint) (now.tv_sec % 1000000);
+ *microsec = (Uint) (now.tv_usec);
+}
+
+
+/* deliver elapsed *ticks* to the machine - takes a pointer
+ to a struct timeval representing current time (to save
+ a gettimeofday() where possible) or NULL */
+
+#if !defined(ERTS_TIMER_THREAD)
+void erts_deliver_time(void) {
+ SysTimeval now;
+
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ get_tolerant_timeofday(&now);
+ do_erts_deliver_time(&now);
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+}
+#endif
+
+/* get *real* time (not ticks) remaining until next timeout - if there
+ isn't one, give a "long" time, that is guaranteed
+ to not cause overflow when we report elapsed time later on */
+
+void erts_time_remaining(SysTimeval *rem_time)
+{
+ int ticks;
+#if !defined(ERTS_TIMER_THREAD)
+ SysTimeval cur_time;
+#endif
+ long elapsed;
+
+ /* next_time() returns no of ticks to next timeout or -1 if none */
+
+ if ((ticks = next_time()) == -1) {
+ /* timer queue empty */
+ /* this will cause at most 100000000 ticks */
+ rem_time->tv_sec = 100000;
+ rem_time->tv_usec = 0;
+ } else {
+ /* next timeout after ticks ticks */
+ ticks *= CLOCK_RESOLUTION;
+
+#if defined(ERTS_TIMER_THREAD)
+ elapsed = 0;
+#else
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ get_tolerant_timeofday(&cur_time);
+ cur_time.tv_usec = 1000 *
+ (cur_time.tv_usec / 1000);/* ms resolution*/
+ elapsed = 1000 * (cur_time.tv_sec - last_delivered.tv_sec) +
+ (cur_time.tv_usec - last_delivered.tv_usec) / 1000;
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+
+ if (ticks <= elapsed) { /* Ooops, better hurry */
+ rem_time->tv_sec = rem_time->tv_usec = 0;
+ return;
+ }
+#endif
+ rem_time->tv_sec = (ticks - elapsed) / 1000;
+ rem_time->tv_usec = 1000 * ((ticks - elapsed) % 1000);
+ }
+}
+
+void erts_get_timeval(SysTimeval *tv)
+{
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+ get_tolerant_timeofday(tv);
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+}
+
+long
+erts_get_time(void)
+{
+ SysTimeval sys_tv;
+
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ get_tolerant_timeofday(&sys_tv);
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+
+ return sys_tv.tv_sec;
+}
+
+#ifdef HAVE_ERTS_NOW_CPU
+void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) {
+ SysCpuTime t;
+ SysTimespec tp;
+
+ sys_get_proc_cputime(t, tp);
+ *microsec = (Uint)(tp.tv_nsec / 1000);
+ t = (tp.tv_sec / 1000000);
+ *megasec = (Uint)(t % 1000000);
+ *sec = (Uint)(tp.tv_sec % 1000000);
+}
+#endif
+
+
+/*
+ * erts_get_emu_time() is similar to get_now(). You will
+ * always get different times from erts_get_emu_time(), but they
+ * may equal a time from get_now().
+ *
+ * erts_get_emu_time() is only used internally in the emulator in
+ * order to order emulator internal events.
+ */
+
+void
+erts_get_emu_time(SysTimeval *this_emu_time_p)
+{
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ get_tolerant_timeofday(this_emu_time_p);
+
+ /* Make sure time is later than last */
+ if (last_emu_time.tv_sec > this_emu_time_p->tv_sec ||
+ (last_emu_time.tv_sec == this_emu_time_p->tv_sec
+ && last_emu_time.tv_usec >= this_emu_time_p->tv_usec)) {
+ *this_emu_time_p = last_emu_time;
+ this_emu_time_p->tv_usec++;
+ }
+ /* Check for carry from above + general reasonability */
+ if (this_emu_time_p->tv_usec >= 1000000) {
+ this_emu_time_p->tv_usec = 0;
+ this_emu_time_p->tv_sec++;
+ }
+
+ last_emu_time = *this_emu_time_p;
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+}
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
new file mode 100644
index 0000000000..2afb16fc52
--- /dev/null
+++ b/erts/emulator/beam/erl_trace.c
@@ -0,0 +1,3260 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1999-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Support functions for tracing.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "big.h"
+#include "bif.h"
+#include "dist.h"
+#include "beam_bp.h"
+#include "error.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+
+#if 0
+#define DEBUG_PRINTOUTS
+#else
+#undef DEBUG_PRINTOUTS
+#endif
+
+extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+
+/* Pseudo export entries. Never filled in with data, only used to
+ yield unique pointers of the correct type. */
+Export exp_send, exp_receive, exp_timeout;
+
+static Eterm system_seq_tracer;
+static Uint default_trace_flags;
+static Eterm default_tracer;
+
+static Eterm system_monitor;
+static Eterm system_profile;
+
+#ifdef HAVE_ERTS_NOW_CPU
+int erts_cpu_timestamp;
+#endif
+
+static erts_smp_mtx_t smq_mtx;
+static erts_smp_mtx_t sys_trace_mtx;
+
+enum ErtsSysMsgType {
+ SYS_MSG_TYPE_UNDEFINED,
+ SYS_MSG_TYPE_TRACE,
+ SYS_MSG_TYPE_SEQTRACE,
+ SYS_MSG_TYPE_SYSMON,
+ SYS_MSG_TYPE_ERRLGR,
+ SYS_MSG_TYPE_PROC_MSG,
+ SYS_MSG_TYPE_SYSPROF
+};
+
+#ifdef ERTS_SMP
+static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
+ Eterm from,
+ Eterm to,
+ Eterm msg,
+ ErlHeapFragment *bp);
+static void enqueue_sys_msg(enum ErtsSysMsgType type,
+ Eterm from,
+ Eterm to,
+ Eterm msg,
+ ErlHeapFragment *bp);
+static void init_sys_msg_dispatcher(void);
+#endif
+
+void erts_init_trace(void) {
+ erts_smp_mtx_init(&sys_trace_mtx, "sys_tracers");
+#ifdef HAVE_ERTS_NOW_CPU
+ erts_cpu_timestamp = 0;
+#endif
+ erts_bif_trace_init();
+ erts_system_monitor_clear(NULL);
+ erts_system_profile_clear(NULL);
+ default_trace_flags = F_INITIAL_TRACE_FLAGS;
+ default_tracer = NIL;
+ system_seq_tracer = am_false;
+#ifdef ERTS_SMP
+ init_sys_msg_dispatcher();
+#endif
+}
+
+static Eterm system_seq_tracer;
+
+#ifdef ERTS_SMP
+#define ERTS_ALLOC_SYSMSG_HEAP(SZ, BPP, OHPP, UNUSED) \
+ (*(BPP) = new_message_buffer((SZ)), \
+ *(OHPP) = &(*(BPP))->off_heap, \
+ (*(BPP))->mem)
+#else
+#define ERTS_ALLOC_SYSMSG_HEAP(SZ, BPP, OHPP, RPP) \
+ erts_alloc_message_heap((SZ), (BPP), (OHPP), (RPP), 0)
+#endif
+
+#ifdef ERTS_SMP
+#define ERTS_ENQ_TRACE_MSG(FPID, TPID, MSG, BP) \
+do { \
+ ERTS_LC_ASSERT(erts_smp_lc_mtx_is_locked(&smq_mtx)); \
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_TRACE, (FPID), (TPID), (MSG), (BP)); \
+} while(0)
+#else
+#define ERTS_ENQ_TRACE_MSG(FPID, TPROC, MSG, BP) \
+ erts_queue_message((TPROC), NULL, (BP), (MSG), NIL)
+#endif
+
+/*
+ * NOTE that the ERTS_GET_TRACER_REF() returns from the function (!!!)
+ * using it, and resets the parameters used if the tracer is invalid, i.e.,
+ * use it with extreme care!
+ */
+#ifdef ERTS_SMP
+#define ERTS_NULL_TRACER_REF NIL
+#define ERTS_TRACER_REF_TYPE Eterm
+ /* In the smp case, we never find the tracer invalid here (the sys
+ message dispatcher thread takes care of that). */
+#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
+do { (RES) = (TPID); } while(0)
+#else
+#define ERTS_NULL_TRACER_REF NULL
+#define ERTS_TRACER_REF_TYPE Process *
+#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
+do { \
+ (RES) = process_tab[internal_pid_index((TPID))]; \
+ if (INVALID_PID((RES), (TPID)) || !((RES)->trace_flags & F_TRACER)) { \
+ (TPID) = NIL; \
+ (TRACEE_FLGS) &= ~TRACEE_FLAGS; \
+ return; \
+ } \
+} while (0)
+#endif
+
+static Uint active_sched;
+
+void
+erts_system_profile_setup_active_schedulers(void)
+{
+ ERTS_SMP_LC_ASSERT(erts_is_system_blocked(0));
+ active_sched = erts_active_schedulers();
+}
+
+void
+erts_trace_check_exiting(Eterm exiting)
+{
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ if (exiting == default_tracer) {
+ default_tracer = NIL;
+ default_trace_flags &= TRACEE_FLAGS;
+#ifdef DEBUG
+ default_trace_flags |= F_INITIAL_TRACE_FLAGS;
+#endif
+ }
+ if (exiting == system_seq_tracer) {
+#ifdef DEBUG_PRINTOUTS
+ erts_fprintf(stderr, "seq tracer %T exited\n", exiting);
+#endif
+ system_seq_tracer = am_false;
+ }
+ if (exiting == system_monitor) {
+#ifdef ERTS_SMP
+ system_monitor = NIL;
+ /* Let the trace message dispatcher clear flags, etc */
+#else
+ erts_system_monitor_clear(NULL);
+#endif
+ }
+ if (exiting == system_profile) {
+#ifdef ERTS_SMP
+ system_profile = NIL;
+ /* Let the trace message dispatcher clear flags, etc */
+#else
+ erts_system_profile_clear(NULL);
+#endif
+ }
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+}
+
+Eterm
+erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, Eterm new)
+{
+ Eterm old = THE_NON_VALUE;
+
+ if (new != am_false) {
+ if (!erts_pid2proc(c_p, c_p_locks, new, 0)
+ && !erts_is_valid_tracer_port(new)) {
+ return old;
+ }
+ }
+
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ old = system_seq_tracer;
+ system_seq_tracer = new;
+
+#ifdef DEBUG_PRINTOUTS
+ erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old);
+#endif
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+ return old;
+}
+
+Eterm
+erts_get_system_seq_tracer(void)
+{
+ Eterm st;
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ st = system_seq_tracer;
+#ifdef DEBUG_PRINTOUTS
+ erts_fprintf(stderr, "get seq tracer %T\n", st);
+#endif
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+ return st;
+}
+
+static ERTS_INLINE void
+get_default_tracing(Uint *flagsp, Eterm *tracerp)
+{
+ if (!(default_trace_flags & TRACEE_FLAGS))
+ default_tracer = NIL;
+
+ if (is_nil(default_tracer)) {
+ default_trace_flags &= ~TRACEE_FLAGS;
+ } else if (is_internal_pid(default_tracer)) {
+ if (!erts_pid2proc(NULL, 0, default_tracer, 0)) {
+ reset_tracer:
+ default_trace_flags &= ~TRACEE_FLAGS;
+ default_tracer = NIL;
+ }
+ } else {
+ ASSERT(is_internal_port(default_tracer));
+ if (!erts_is_valid_tracer_port(default_tracer))
+ goto reset_tracer;
+ }
+
+ if (flagsp)
+ *flagsp = default_trace_flags;
+ if (tracerp)
+ *tracerp = default_tracer;
+}
+
+void
+erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp)
+{
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ if (flagsp) {
+ if (setflags)
+ default_trace_flags |= *flagsp;
+ else
+ default_trace_flags &= ~(*flagsp);
+ }
+ if (tracerp)
+ default_tracer = *tracerp;
+ get_default_tracing(flagsp, tracerp);
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+}
+
+void
+erts_get_default_tracing(Uint *flagsp, Eterm *tracerp)
+{
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ get_default_tracing(flagsp, tracerp);
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+}
+
+void
+erts_set_system_monitor(Eterm monitor)
+{
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ system_monitor = monitor;
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+}
+
+Eterm
+erts_get_system_monitor(void)
+{
+ Eterm monitor;
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ monitor = system_monitor;
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+ return monitor;
+}
+
+/* Performance monitoring */
+void erts_set_system_profile(Eterm profile) {
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ system_profile = profile;
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+}
+
+Eterm
+erts_get_system_profile(void) {
+ Eterm profile;
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ profile = system_profile;
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+ return profile;
+}
+
+
+#ifdef HAVE_ERTS_NOW_CPU
+# define GET_NOW(m, s, u) \
+do { \
+ if (erts_cpu_timestamp) \
+ erts_get_now_cpu(m, s, u); \
+ else \
+ get_now(m, s, u); \
+} while (0)
+#else
+# define GET_NOW(m, s, u) do {get_now(m, s, u);} while (0)
+#endif
+
+
+
+static Eterm* patch_ts(Eterm tuple4, Eterm* hp);
+
+#ifdef ERTS_SMP
+static void
+do_send_to_port(Eterm to,
+ Port* unused_port,
+ Eterm from,
+ enum ErtsSysMsgType type,
+ Eterm message)
+{
+ Uint sz = size_object(message);
+ ErlHeapFragment *bp = new_message_buffer(sz);
+ Uint *hp = bp->mem;
+ Eterm msg = copy_struct(message, sz, &hp, &bp->off_heap);
+
+ enqueue_sys_msg_unlocked(type, from, to, msg, bp);
+}
+
+#define WRITE_SYS_MSG_TO_PORT write_sys_msg_to_port
+#else
+#define WRITE_SYS_MSG_TO_PORT do_send_to_port
+#endif
+
+static void
+WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
+ Port* trace_port,
+ Eterm unused_from,
+ enum ErtsSysMsgType unused_type,
+ Eterm message) {
+ byte *buffer;
+ byte *ptr;
+ unsigned size;
+
+ size = erts_encode_ext_size(message);
+ buffer = (byte *) erts_alloc(ERTS_ALC_T_TMP, size);
+
+ ptr = buffer;
+
+ erts_encode_ext(message, &ptr);
+ if (!(ptr <= buffer+size)) {
+ erl_exit(1, "Internal error in do_send_to_port: %d\n", ptr-buffer);
+ }
+
+#ifndef ERTS_SMP
+ if (!INVALID_TRACER_PORT(trace_port, trace_port->id)) {
+#endif
+ erts_raw_port_command(trace_port, buffer, ptr-buffer);
+#ifndef ERTS_SMP
+ erts_port_release(trace_port);
+ }
+#endif
+
+ erts_free(ERTS_ALC_T_TMP, (void *) buffer);
+}
+
+
+#ifndef ERTS_SMP
+/* Send {trace_ts, Pid, out, 0, Timestamp}
+ * followed by {trace_ts, Pid, in, 0, NewTimestamp}
+ *
+ * 'NewTimestamp' is fetched from GET_NOW() through patch_ts().
+ */
+static void
+do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
+ Eterm local_heap[4+5+5];
+ Eterm message;
+ Eterm *hp;
+ Eterm mfarity;
+
+ ASSERT(is_pid(pid));
+ ASSERT(is_tuple(timestamp));
+ ASSERT(*tuple_val(timestamp) == make_arityval(3));
+
+ hp = local_heap;
+ mfarity = make_small(0);
+ message = TUPLE5(hp, am_trace_ts, pid, am_out, mfarity, timestamp);
+ /* Note, hp is deliberately NOT incremented since it will be reused */
+
+ do_send_to_port(trace_port->id,
+ trace_port,
+ pid,
+ SYS_MSG_TYPE_UNDEFINED,
+ message);
+
+ message = TUPLE4(hp, am_trace_ts, pid, am_in, mfarity);
+ hp += 5;
+ hp = patch_ts(message, hp);
+
+ do_send_to_port(trace_port->id,
+ trace_port,
+ pid,
+ SYS_MSG_TYPE_UNDEFINED,
+ message);
+}
+#endif
+
+/* If (c_p != NULL), a fake schedule out/in message pair will be sent,
+ * if the driver so requests.
+ * It is assumed that 'message' is not an 'out' message.
+ *
+ * 'c_p' is the currently executing process, "tracee" is the traced process
+ * which 'message' concerns => if (*tracee_flags & F_TIMESTAMP),
+ * 'message' must contain a timestamp.
+ */
+static void
+send_to_port(Process *c_p, Eterm message,
+ Eterm *tracer_pid, Uint *tracee_flags) {
+ Port* trace_port;
+#ifndef ERTS_SMP
+ Eterm ts, local_heap[4], *hp;
+#endif
+
+ ASSERT(is_internal_port(*tracer_pid));
+#ifdef ERTS_SMP
+ if (is_not_internal_port(*tracer_pid))
+ return;
+
+ trace_port = NULL;
+#else
+ if (is_not_internal_port(*tracer_pid))
+ goto invalid_tracer_port;
+
+ trace_port = &erts_port[internal_port_index(*tracer_pid)];
+
+ if (INVALID_TRACER_PORT(trace_port, *tracer_pid)) {
+ invalid_tracer_port:
+ *tracee_flags &= ~TRACEE_FLAGS;
+ *tracer_pid = NIL;
+ return;
+ }
+
+ /*
+ * Make a fake schedule only if the current process is traced
+ * with 'running' and 'timestamp'.
+ */
+
+ if ( c_p == NULL ||
+ (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP))) {
+#endif
+ do_send_to_port(*tracer_pid,
+ trace_port,
+ c_p ? c_p->id : NIL,
+ SYS_MSG_TYPE_TRACE,
+ message);
+#ifndef ERTS_SMP
+ return;
+ }
+
+ /*
+ * Note that the process being traced for some type of trace messages
+ * (e.g. getting_linked) need not be the current process. That other
+ * process might not have timestamps enabled.
+ */
+ if (*tracee_flags & F_TIMESTAMP) {
+ ASSERT(is_tuple(message));
+ hp = tuple_val(message);
+ ts = hp[arityval(hp[0])];
+ } else {
+ /* A fake schedule might be needed,
+ * but this message does not contain a timestamp.
+ * Create a dummy trace message with timestamp to be
+ * passed to do_send_schedfix_to_port().
+ */
+ Uint ms,s,us;
+ GET_NOW(&ms, &s, &us);
+ hp = local_heap;
+ ts = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
+ hp += 4;
+ }
+
+ trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
+ do_send_to_port(*tracer_pid,
+ trace_port,
+ c_p ? c_p->id : NIL,
+ SYS_MSG_TYPE_TRACE,
+ message);
+
+ if (trace_port->control_flags & PORT_CONTROL_FLAG_HEAVY) {
+ /* The driver has just informed us that the last write took a
+ * non-neglectible amount of time.
+ *
+ * We need to fake some trace messages to compensate for the time the
+ * current process had to sacrifice for the writing of the previous
+ * trace message. We pretend that the process got scheduled out
+ * just after writning the real trace message, and now gets scheduled
+ * in again.
+ */
+ do_send_schedfix_to_port(trace_port, c_p->id, ts);
+ }
+#endif
+}
+
+#ifndef ERTS_SMP
+/* Profile send
+ * Checks if profiler is port or process
+ * Eterm msg is local, need copying.
+ */
+
+static void
+profile_send(Eterm message) {
+ Uint sz = 0;
+ ErlHeapFragment *bp = NULL;
+ Uint *hp = NULL;
+ Eterm msg = NIL;
+ Process *profile_p = NULL;
+ ErlOffHeap *off_heap = NULL;
+
+ Eterm profiler = erts_get_system_profile();
+
+ if (is_internal_port(profiler)) {
+ Port *profiler_port = NULL;
+
+ /* not smp */
+
+
+ profiler_port = &erts_port[internal_port_index(profiler)];
+
+ do_send_to_port(profiler,
+ profiler_port,
+ NIL, /* or current process->id */
+ SYS_MSG_TYPE_SYSPROF,
+ message);
+
+ } else {
+ ASSERT(is_internal_pid(profiler)
+ && internal_pid_index(profiler) < erts_max_processes);
+
+ profile_p = process_tab[internal_pid_index(profiler)];
+
+ if (INVALID_PID(profile_p, profiler)) return;
+
+ sz = size_object(message);
+ hp = erts_alloc_message_heap(sz, &bp, &off_heap, profile_p, 0);
+ msg = copy_struct(message, sz, &hp, &bp->off_heap);
+
+ erts_queue_message(profile_p, NULL, bp, msg, NIL);
+ }
+}
+
+#endif
+
+
+/* A fake schedule out/in message pair will be sent,
+ * if the driver so requests.
+ * If (timestamp == NIL), one is fetched from GET_NOW().
+ *
+ * 'c_p' is the currently executing process, may be NULL.
+ */
+static void
+seq_trace_send_to_port(Process *c_p,
+ Eterm seq_tracer,
+ Eterm message,
+ Eterm timestamp)
+{
+ Port* trace_port;
+#ifndef ERTS_SMP
+ Eterm ts, local_heap[4], *hp;
+#endif
+
+ ASSERT(is_internal_port(seq_tracer));
+#ifdef ERTS_SMP
+ if (is_not_internal_port(seq_tracer))
+ return;
+
+ trace_port = NULL;
+#else
+ if (is_not_internal_port(seq_tracer))
+ goto invalid_tracer_port;
+
+ trace_port = &erts_port[internal_port_index(seq_tracer)];
+
+ if (INVALID_TRACER_PORT(trace_port, seq_tracer)) {
+ invalid_tracer_port:
+ system_seq_tracer = am_false;
+ return;
+ }
+
+ if (c_p == NULL
+ || (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP))) {
+#endif
+ do_send_to_port(seq_tracer,
+ trace_port,
+ c_p ? c_p->id : NIL,
+ SYS_MSG_TYPE_SEQTRACE,
+ message);
+
+#ifndef ERTS_SMP
+ return;
+ }
+ /* Make a fake schedule only if the current process is traced
+ * with 'running' and 'timestamp'.
+ */
+
+ if (timestamp != NIL) {
+ ts = timestamp;
+ } else {
+ /* A fake schedule might be needed,
+ * but this message does not contain a timestamp.
+ * Create a dummy trace message with timestamp to be
+ * passed to do_send_schedfix_to_port().
+ */
+ Uint ms,s,us;
+ GET_NOW(&ms, &s, &us);
+ hp = local_heap;
+ ts = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
+ hp += 4;
+ }
+
+ trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
+ do_send_to_port(seq_tracer,
+ trace_port,
+ c_p ? c_p->id : NIL,
+ SYS_MSG_TYPE_SEQTRACE,
+ message);
+
+ if (trace_port->control_flags & PORT_CONTROL_FLAG_HEAVY) {
+ /* The driver has just informed us that the last write took a
+ * non-neglectible amount of time.
+ *
+ * We need to fake some trace messages to compensate for the time the
+ * current process had to sacrifice for the writing of the previous
+ * trace message. We pretend that the process got scheduled out
+ * just after writing the real trace message, and now gets scheduled
+ * in again.
+ */
+ do_send_schedfix_to_port(trace_port, c_p->id, ts);
+ }
+#endif
+}
+
+#define TS_HEAP_WORDS 5
+#define TS_SIZE(p) (((p)->trace_flags & F_TIMESTAMP) ? TS_HEAP_WORDS : 0)
+
+/*
+ * Patch a timestamp into a tuple. The tuple must be the last thing
+ * built on the heap.
+ *
+ * Returns the new hp pointer.
+*/
+static Eterm*
+patch_ts(Eterm tuple, Eterm* hp)
+{
+ Uint ms, s, us;
+ Eterm* ptr = tuple_val(tuple);
+ int arity = arityval(*ptr);
+
+ ASSERT((ptr+arity+1) == hp);
+ ptr[0] = make_arityval(arity+1);
+ ptr[1] = am_trace_ts;
+ GET_NOW(&ms, &s, &us);
+ *hp = TUPLE3(hp+1, make_small(ms), make_small(s), make_small(us));
+ return hp+5;
+}
+
+static ERTS_INLINE void
+send_to_tracer(Process *tracee,
+ ERTS_TRACER_REF_TYPE tracer_ref,
+ Eterm msg,
+ Eterm **hpp,
+ ErlHeapFragment *bp,
+ int no_fake_sched)
+{
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(tracee));
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (tracee->trace_flags & F_TIMESTAMP)
+ *hpp = patch_ts(msg, *hpp);
+
+ if (is_internal_pid(tracee->tracer_proc))
+ ERTS_ENQ_TRACE_MSG(tracee->id, tracer_ref, msg, bp);
+ else {
+ ASSERT(is_internal_port(tracee->tracer_proc));
+ send_to_port(no_fake_sched ? NULL : tracee,
+ msg,
+ &tracee->tracer_proc,
+ &tracee->trace_flags);
+ }
+
+ erts_smp_mtx_unlock(&smq_mtx);
+
+}
+
+static void
+trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
+{
+ Eterm local_heap[5+4+1+TS_HEAP_WORDS];
+ Eterm tmp, mess, *hp;
+ ErlHeapFragment *bp = NULL;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF;
+ int sched_no, curr_func, to_port, no_fake_sched;
+
+ if (is_nil(p->tracer_proc))
+ return;
+
+ no_fake_sched = never_fake_sched;
+
+ switch (what) {
+ case am_out:
+ case am_out_exiting:
+ case am_out_exited:
+ no_fake_sched = 1;
+ break;
+ case am_in:
+ case am_in_exiting:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ sched_no = IS_TRACED_FL(p, F_TRACE_SCHED_NO);
+ to_port = is_internal_port(p->tracer_proc);
+
+ if (!to_port) {
+ ASSERT(is_internal_pid(p->tracer_proc)
+ && internal_pid_index(p->tracer_proc) < erts_max_processes);
+
+ ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ }
+
+ if (ERTS_PROC_IS_EXITING(p)
+#ifndef ERTS_SMP
+ || p->status == P_FREE
+#endif
+ ) {
+ curr_func = 0;
+ }
+ else {
+ if (!p->current)
+ p->current = find_function_from_pc(p->i);
+ curr_func = p->current != NULL;
+ }
+
+ if (to_port)
+ hp = &local_heap[0];
+ else {
+ Uint size = 5;
+ if (curr_func)
+ size += 4;
+ if (sched_no)
+ size += 1;
+ size += TS_SIZE(p);
+ hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
+ }
+
+ if (!curr_func) {
+ tmp = make_small(0);
+ } else {
+ tmp = TUPLE3(hp,p->current[0],p->current[1],make_small(p->current[2]));
+ hp += 4;
+ }
+
+ if (!sched_no) {
+ mess = TUPLE4(hp, am_trace, p->id, what, tmp);
+ hp += 5;
+ }
+ else {
+#ifdef ERTS_SMP
+ Eterm sched_id = make_small(p->scheduler_data->no);
+#else
+ Eterm sched_id = make_small(1);
+#endif
+ mess = TUPLE5(hp, am_trace, p->id, what, sched_id, tmp);
+ hp += 6;
+ }
+
+ send_to_tracer(p, tracer_ref, mess, &hp, bp, no_fake_sched);
+}
+
+/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
+ * or {trace, Pid, What, {Mod, Func, Arity}}
+ *
+ * where 'What' is supposed to be 'in', 'out', 'in_exiting',
+ * 'out_exiting', or 'out_exited'.
+ */
+void
+trace_sched(Process *p, Eterm what)
+{
+ trace_sched_aux(p, what, 0);
+}
+
+/* Send {trace_ts, Pid, Send, Msg, DestPid, Timestamp}
+ * or {trace, Pid, Send, Msg, DestPid}
+ *
+ * where 'Send' is 'send' or 'send_to_non_existing_process'.
+ */
+void
+trace_send(Process *p, Eterm to, Eterm msg)
+{
+ Eterm operation;
+ unsigned sz_msg;
+ unsigned sz_to;
+ Eterm* hp;
+ Eterm mess;
+
+ if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND)) {
+ return;
+ }
+
+ operation = am_send;
+ if (is_internal_pid(to)) {
+ if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, to, 0))
+ goto send_to_non_existing_process;
+ }
+ else if(is_external_pid(to)
+ && external_pid_dist_entry(to) == erts_this_dist_entry) {
+ char *s;
+ send_to_non_existing_process:
+ s = "send_to_non_existing_process";
+ operation = am_atom_put(s, sys_strlen(s));
+ }
+
+ if (is_internal_port(p->tracer_proc)) {
+ Eterm local_heap[11];
+ hp = local_heap;
+ mess = TUPLE5(hp, am_trace, p->id, operation, msg, to);
+ hp += 6;
+ erts_smp_mtx_lock(&smq_mtx);
+ if (p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+ send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+ Uint need;
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+
+ ASSERT(is_internal_pid(p->tracer_proc)
+ && internal_pid_index(p->tracer_proc) < erts_max_processes);
+
+ ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+
+ sz_msg = size_object(msg);
+ sz_to = size_object(to);
+ need = sz_msg + sz_to + 6 + TS_SIZE(p);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
+
+ to = copy_struct(to,
+ sz_to,
+ &hp,
+ off_heap);
+ msg = copy_struct(msg,
+ sz_msg,
+ &hp,
+ off_heap);
+ mess = TUPLE5(hp, am_trace, p->id/* Local pid */, operation, msg, to);
+ hp += 6;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (p->trace_flags & F_TIMESTAMP) {
+ patch_ts(mess, hp);
+ }
+
+ ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ }
+}
+
+/* Send {trace_ts, Pid, receive, Msg, Timestamp}
+ * or {trace, Pid, receive, Msg}
+ */
+void
+trace_receive(Process *rp, Eterm msg)
+{
+ Eterm mess;
+ size_t sz_msg;
+ Eterm* hp;
+
+ if (is_internal_port(rp->tracer_proc)) {
+ Eterm local_heap[10];
+ hp = local_heap;
+ mess = TUPLE4(hp, am_trace, rp->id, am_receive, msg);
+ hp += 5;
+ erts_smp_mtx_lock(&smq_mtx);
+ if (rp->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+ send_to_port(rp, mess, &rp->tracer_proc, &rp->trace_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+ Uint hsz;
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+
+ ASSERT(is_internal_pid(rp->tracer_proc)
+ && internal_pid_index(rp->tracer_proc) < erts_max_processes);
+
+ ERTS_GET_TRACER_REF(tracer_ref, rp->tracer_proc, rp->trace_flags);
+
+ sz_msg = size_object(msg);
+
+ hsz = sz_msg + 5 + TS_SIZE(rp);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, tracer_ref);
+
+ msg = copy_struct(msg, sz_msg, &hp, off_heap);
+ mess = TUPLE4(hp, am_trace, rp->id/* Local pid */, am_receive, msg);
+ hp += 5;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (rp->trace_flags & F_TIMESTAMP) {
+ patch_ts(mess, hp);
+ }
+
+ ERTS_ENQ_TRACE_MSG(rp->id, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ }
+}
+
+int
+seq_trace_update_send(Process *p)
+{
+ Eterm seq_tracer = erts_get_system_seq_tracer();
+ ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
+ if ( (p->id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL))
+ return 0;
+
+ SEQ_TRACE_TOKEN_SENDER(p) = p->id; /* Internal pid */
+ SEQ_TRACE_TOKEN_SERIAL(p) =
+ make_small(++(p -> seq_trace_clock));
+ SEQ_TRACE_TOKEN_LASTCNT(p) =
+ make_small(p -> seq_trace_lastcnt);
+ return 1;
+}
+
+
+/* Send a sequential trace message to the sequential tracer.
+ * p is the caller (which contains the trace token),
+ * msg is the original message, type is trace type (SEQ_TRACE_SEND etc),
+ * and receiver is the receiver of the message.
+ *
+ * The message to be received by the sequential tracer is:
+ *
+ * TraceMsg =
+ * {seq_trace, Label, {Type, {Lastcnt, Serial}, Sender, Receiver, Msg} [,Timestamp] }
+ *
+ */
+void
+seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
+ Eterm receiver, Process *process, Eterm exitfrom)
+{
+ Eterm mess;
+ ErlHeapFragment* bp;
+ Eterm* hp;
+ Eterm label;
+ Eterm lastcnt_serial;
+ Eterm type_atom;
+ int sz_exit;
+ Eterm seq_tracer;
+
+ seq_tracer = erts_get_system_seq_tracer();
+
+ ASSERT(is_tuple(token) || is_nil(token));
+ if (SEQ_TRACE_T_SENDER(token) == seq_tracer || token == NIL ||
+ (process && process->trace_flags & F_SENSITIVE)) {
+ return;
+ }
+
+ switch (type) {
+ case SEQ_TRACE_SEND: type_atom = am_send; break;
+ case SEQ_TRACE_PRINT: type_atom = am_print; break;
+ case SEQ_TRACE_RECEIVE: type_atom = am_receive; break;
+ default:
+ erl_exit(1, "invalid type in seq_trace_output_generic: %d:\n", type);
+ return; /* To avoid warning */
+ }
+
+ if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & type) == 0) {
+ /* No flags set, nothing to do */
+ return;
+ }
+
+ if (seq_tracer == am_false) {
+ return; /* no need to send anything */
+ }
+
+ if (is_internal_port(seq_tracer)) {
+ Eterm local_heap[64];
+ hp = local_heap;
+ label = SEQ_TRACE_T_LABEL(token);
+ lastcnt_serial = TUPLE2(hp, SEQ_TRACE_T_LASTCNT(token),
+ SEQ_TRACE_T_SERIAL(token));
+ hp += 3;
+ if (exitfrom != NIL) {
+ msg = TUPLE3(hp, am_EXIT, exitfrom, msg);
+ hp += 4;
+ }
+ mess = TUPLE5(hp, type_atom, lastcnt_serial, SEQ_TRACE_T_SENDER(token),
+ receiver, msg);
+ hp += 6;
+ erts_smp_mtx_lock(&smq_mtx);
+ if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & SEQ_TRACE_TIMESTAMP) == 0) {
+ mess = TUPLE3(hp, am_seq_trace, label, mess);
+ seq_trace_send_to_port(NULL, seq_tracer, mess, NIL);
+ } else {
+ Uint ms,s,us,ts;
+ GET_NOW(&ms, &s, &us);
+ ts = TUPLE3(hp, make_small(ms),make_small(s), make_small(us));
+ hp += 4;
+ mess = TUPLE4(hp, am_seq_trace, label, mess, ts);
+ seq_trace_send_to_port(process, seq_tracer, mess, ts);
+ }
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+#ifndef ERTS_SMP
+ Process* tracer;
+#endif
+ Eterm sender_copy;
+ Eterm receiver_copy;
+ Eterm m2;
+ Uint sz_label, sz_lastcnt_serial, sz_msg, sz_ts, sz_sender,
+ sz_exitfrom, sz_receiver;
+
+ ASSERT(is_internal_pid(seq_tracer)
+ && internal_pid_index(seq_tracer) < erts_max_processes);
+
+#ifndef ERTS_SMP
+
+ tracer = process_tab[internal_pid_index(seq_tracer)];
+ if (INVALID_PID(tracer, tracer->id)) {
+ system_seq_tracer = am_false;
+ return; /* no need to send anything */
+ }
+#endif
+ if (receiver == seq_tracer) {
+ return; /* no need to send anything */
+ }
+
+ sz_label = size_object(SEQ_TRACE_T_LABEL(token));
+ sz_sender = size_object(SEQ_TRACE_T_SENDER(token));
+ sz_receiver = size_object(receiver);
+ sz_lastcnt_serial = 3; /* TUPLE2 */
+ sz_msg = size_object(msg);
+
+ sz_ts = ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & SEQ_TRACE_TIMESTAMP) ?
+ 5 : 0);
+ if (exitfrom != NIL) {
+ sz_exit = 4; /* create {'EXIT',exitfrom,msg} */
+ sz_exitfrom = size_object(exitfrom);
+ }
+ else {
+ sz_exit = 0;
+ sz_exitfrom = 0;
+ }
+ bp = new_message_buffer(4 /* TUPLE3 */ + sz_ts + 6 /* TUPLE5 */
+ + sz_lastcnt_serial + sz_label + sz_msg
+ + sz_exit + sz_exitfrom
+ + sz_sender + sz_receiver);
+ hp = bp->mem;
+ label = copy_struct(SEQ_TRACE_T_LABEL(token), sz_label, &hp, &bp->off_heap);
+ lastcnt_serial = TUPLE2(hp,SEQ_TRACE_T_LASTCNT(token),SEQ_TRACE_T_SERIAL(token));
+ hp += 3;
+ m2 = copy_struct(msg, sz_msg, &hp, &bp->off_heap);
+ if (sz_exit) {
+ Eterm exitfrom_copy = copy_struct(exitfrom,
+ sz_exitfrom,
+ &hp,
+ &bp->off_heap);
+ m2 = TUPLE3(hp, am_EXIT, exitfrom_copy, m2);
+ hp += 4;
+ }
+ sender_copy = copy_struct(SEQ_TRACE_T_SENDER(token),
+ sz_sender,
+ &hp,
+ &bp->off_heap);
+ receiver_copy = copy_struct(receiver,
+ sz_receiver,
+ &hp,
+ &bp->off_heap);
+ mess = TUPLE5(hp,
+ type_atom,
+ lastcnt_serial,
+ sender_copy,
+ receiver_copy,
+ m2);
+ hp += 6;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (sz_ts) {/* timestamp should be included */
+ Uint ms,s,us,ts;
+ GET_NOW(&ms, &s, &us);
+ ts = TUPLE3(hp, make_small(ms),make_small(s), make_small(us));
+ hp += 4;
+ mess = TUPLE4(hp, am_seq_trace, label, mess, ts);
+ } else {
+ mess = TUPLE3(hp, am_seq_trace, label, mess);
+ }
+
+#ifdef ERTS_SMP
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SEQTRACE, NIL, NIL, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+#else
+ erts_queue_message(tracer, NULL, bp, mess, NIL); /* trace_token must be NIL here */
+#endif
+ }
+}
+
+/* Send {trace_ts, Pid, return_to, {Mod, Func, Arity}, Timestamp}
+ * or {trace, Pid, return_to, {Mod, Func, Arity}}
+ */
+void
+erts_trace_return_to(Process *p, Uint *pc)
+{
+ Eterm* hp;
+ Eterm mfa;
+ Eterm mess;
+ Eterm local_heap[4+5+5];
+
+ Eterm *code_ptr = find_function_from_pc(pc);
+
+ hp = local_heap;
+
+ if (!code_ptr) {
+ mfa = am_undefined;
+ } else {
+ mfa = TUPLE3(hp, code_ptr[0], code_ptr[1], make_small(code_ptr[2]));
+ hp += 4;
+ }
+
+ mess = TUPLE4(hp, am_trace, p->id, am_return_to, mfa);
+ hp += 5;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ if (is_internal_port(p->tracer_proc)) {
+ send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ } else {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+ unsigned size;
+
+ /*
+ * Find the tracer.
+ */
+ ASSERT(is_internal_pid(p->tracer_proc)
+ && internal_pid_index(p->tracer_proc) < erts_max_processes);
+
+ ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+
+ size = size_object(mess);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
+
+ /*
+ * Copy the trace message into the buffer and enqueue it.
+ */
+ mess = copy_struct(mess, size, &hp, off_heap);
+ ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ }
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+
+/* Send {trace_ts, Pid, return_from, {Mod, Name, Arity}, Retval, Timestamp}
+ * or {trace, Pid, return_from, {Mod, Name, Arity}, Retval}
+ */
+void
+erts_trace_return(Process* p, Eterm* fi, Eterm retval, Eterm *tracer_pid)
+{
+ Eterm* hp;
+ Eterm mfa;
+ Eterm mess;
+ Eterm mod, name;
+ int arity;
+ Uint meta_flags, *tracee_flags;
+#ifdef ERTS_SMP
+ Eterm tracee;
+#endif
+
+ ASSERT(tracer_pid);
+ if (*tracer_pid == am_true) {
+ /* Breakpoint trace enabled without specifying tracer =>
+ * use process tracer and flags
+ */
+ tracer_pid = &p->tracer_proc;
+ }
+ if (is_nil(*tracer_pid)) {
+ /* Trace disabled */
+ return;
+ }
+ ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
+ if (*tracer_pid == p->id) {
+ /* Do not generate trace messages to oneself */
+ return;
+ }
+ if (tracer_pid == &p->tracer_proc) {
+ /* Tracer specified in process structure =>
+ * non-breakpoint trace =>
+ * use process flags
+ */
+ tracee_flags = &p->trace_flags;
+#ifdef ERTS_SMP
+ tracee = p->id;
+#endif
+ } else {
+ /* Tracer not specified in process structure =>
+ * tracer specified in breakpoint =>
+ * meta trace =>
+ * use fixed flag set instead of process flags
+ */
+ meta_flags = F_TRACE_CALLS | F_TIMESTAMP;
+ tracee_flags = &meta_flags;
+#ifdef ERTS_SMP
+ tracee = NIL;
+#endif
+ }
+ if (! (*tracee_flags & F_TRACE_CALLS)) {
+ return;
+ }
+
+ mod = fi[0];
+ name = fi[1];
+ arity = fi[2];
+
+ if (is_internal_port(*tracer_pid)) {
+ Eterm local_heap[4+6+5];
+ hp = local_heap;
+ mfa = TUPLE3(hp, mod, name, make_small(arity));
+ hp += 4;
+ mess = TUPLE5(hp, am_trace, p->id, am_return_from, mfa, retval);
+ hp += 6;
+ erts_smp_mtx_lock(&smq_mtx);
+ if (*tracee_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+ send_to_port(p, mess, tracer_pid, tracee_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+ unsigned size;
+ unsigned retval_size;
+#ifdef DEBUG
+ Eterm* limit;
+#endif
+
+ ASSERT(is_internal_pid(*tracer_pid)
+ && internal_pid_index(*tracer_pid) < erts_max_processes);
+
+ ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
+
+ retval_size = size_object(retval);
+ size = 6 + 4 + retval_size;
+ if (*tracee_flags & F_TIMESTAMP) {
+ size += 1+4;
+ }
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
+#ifdef DEBUG
+ limit = hp + size;
+#endif
+
+ /*
+ * Build the trace tuple and put it into receive queue of the tracer process.
+ */
+
+ mfa = TUPLE3(hp, mod, name, make_small(arity));
+ hp += 4;
+ retval = copy_struct(retval, retval_size, &hp, off_heap);
+ mess = TUPLE5(hp, am_trace, p->id/* Local pid */, am_return_from, mfa, retval);
+ hp += 6;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (*tracee_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ ASSERT(hp == limit);
+
+ ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ }
+}
+
+/* Send {trace_ts, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
+ * Timestamp}
+ * or {trace, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
+ * Timestamp}
+ *
+ * Where Class is atomic but Value is any term.
+ */
+void
+erts_trace_exception(Process* p, Eterm mfa[3], Eterm class, Eterm value,
+ Eterm *tracer_pid)
+{
+ Eterm* hp;
+ Eterm mfa_tuple;
+ Eterm cv;
+ Eterm mess;
+ Uint meta_flags, *tracee_flags;
+#ifdef ERTS_SMP
+ Eterm tracee;
+#endif
+
+ ASSERT(tracer_pid);
+ if (*tracer_pid == am_true) {
+ /* Breakpoint trace enabled without specifying tracer =>
+ * use process tracer and flags
+ */
+ tracer_pid = &p->tracer_proc;
+ }
+ if (is_nil(*tracer_pid)) {
+ /* Trace disabled */
+ return;
+ }
+ ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
+ if (*tracer_pid == p->id) {
+ /* Do not generate trace messages to oneself */
+ return;
+ }
+ if (tracer_pid == &p->tracer_proc) {
+ /* Tracer specified in process structure =>
+ * non-breakpoint trace =>
+ * use process flags
+ */
+ tracee_flags = &p->trace_flags;
+#ifdef ERTS_SMP
+ tracee = p->id;
+#endif
+ if (! (*tracee_flags & F_TRACE_CALLS)) {
+ return;
+ }
+ } else {
+ /* Tracer not specified in process structure =>
+ * tracer specified in breakpoint =>
+ * meta trace =>
+ * use fixed flag set instead of process flags
+ */
+ meta_flags = F_TRACE_CALLS | F_TIMESTAMP;
+ tracee_flags = &meta_flags;
+#ifdef ERTS_SMP
+ tracee = NIL;
+#endif
+ }
+
+ if (is_internal_port(*tracer_pid)) {
+ Eterm local_heap[4+3+6+5];
+ hp = local_heap;
+ mfa_tuple = TUPLE3(hp, mfa[0], mfa[1], make_small(mfa[2]));
+ hp += 4;
+ cv = TUPLE2(hp, class, value);
+ hp += 3;
+ mess = TUPLE5(hp, am_trace, p->id, am_exception_from, mfa_tuple, cv);
+ hp += 6;
+ ASSERT((hp - local_heap)*sizeof(*hp) <= sizeof(local_heap));
+ erts_smp_mtx_lock(&smq_mtx);
+ if (*tracee_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp); /* hp += 5 */
+ ASSERT((hp - local_heap)*sizeof(*hp) == sizeof(local_heap));
+ }
+ send_to_port(p, mess, tracer_pid, tracee_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+ unsigned size;
+ unsigned value_size;
+#ifdef DEBUG
+ Eterm* limit;
+#endif
+
+ ASSERT(is_internal_pid(*tracer_pid)
+ && internal_pid_index(*tracer_pid) < erts_max_processes);
+
+ ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
+
+ value_size = size_object(value);
+ size = 6 + 4 + 3 + value_size;
+ if (*tracee_flags & F_TIMESTAMP) {
+ size += 1+4;
+ }
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
+#ifdef DEBUG
+ limit = hp + size;
+#endif
+
+ /*
+ * Build the trace tuple and put it into receive queue of the tracer process.
+ */
+
+ mfa_tuple = TUPLE3(hp, mfa[0], mfa[1], make_small(mfa[2]));
+ hp += 4;
+ value = copy_struct(value, value_size, &hp, off_heap);
+ cv = TUPLE2(hp, class, value);
+ hp += 3;
+ mess = TUPLE5(hp, am_trace, p->id/* Local pid */,
+ am_exception_from, mfa_tuple, cv);
+ hp += 6;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (*tracee_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ ASSERT(hp == limit);
+
+ ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ }
+}
+
+/*
+ * This function implements the new call trace.
+ *
+ * Send {trace_ts, Pid, call, {Mod, Func, A}, PamResult, Timestamp}
+ * or {trace_ts, Pid, call, {Mod, Func, A}, Timestamp}
+ * or {trace, Pid, call, {Mod, Func, A}, PamResult}
+ * or {trace, Pid, call, {Mod, Func, A}
+ *
+ * where 'A' is arity or argument list depending on trace flag 'arity'.
+ *
+ * If *tracer_pid is am_true, it is a breakpoint trace that shall use
+ * the process tracer, if it is NIL no trace message is generated,
+ * if it is a pid or port we do a meta trace.
+ */
+Uint32
+erts_call_trace(Process* p, Eterm mfa[3], Binary *match_spec,
+ Eterm* args, int local, Eterm *tracer_pid)
+{
+ Eterm* hp;
+ Eterm mfa_tuple;
+ int arity;
+ int i;
+ Uint32 return_flags;
+ Eterm pam_result = am_true;
+ Eterm mess;
+ Uint meta_flags, *tracee_flags;
+#ifdef ERTS_SMP
+ Eterm tracee;
+#endif
+ Eterm transformed_args[MAX_ARG];
+ ErlSubBin sub_bin_heap;
+
+ ASSERT(tracer_pid);
+ if (*tracer_pid == am_true) {
+ /* Breakpoint trace enabled without specifying tracer =>
+ * use process tracer and flags
+ */
+ tracer_pid = &p->tracer_proc;
+ }
+ if (is_nil(*tracer_pid)) {
+ /* Trace disabled */
+ return 0;
+ }
+ ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
+ if (*tracer_pid == p->id) {
+ /* Do not generate trace messages to oneself */
+ return 0;
+ }
+ if (tracer_pid == &p->tracer_proc) {
+ /* Tracer specified in process structure =>
+ * non-breakpoint trace =>
+ * use process flags
+ */
+ tracee_flags = &p->trace_flags;
+#ifdef ERTS_SMP
+ tracee = p->id;
+#endif
+ } else {
+ /* Tracer not specified in process structure =>
+ * tracer specified in breakpoint =>
+ * meta trace =>
+ * use fixed flag set instead of process flags
+ */
+ if (p->trace_flags & F_SENSITIVE) {
+ /* No trace messages for sensitive processes. */
+ return 0;
+ }
+ meta_flags = F_TRACE_CALLS | F_TIMESTAMP;
+ tracee_flags = &meta_flags;
+#ifdef ERTS_SMP
+ tracee = NIL;
+#endif
+ }
+
+ /*
+ * Because of the delayed sub-binary creation optimization introduced in
+ * R12B, (at most) one of arguments can be a match context instead of
+ * a binary. Since we don't want to handle match contexts in utility functions
+ * such as size_object() and copy_struct(), we must make sure that we
+ * temporarily convert any match contexts to sub binaries.
+ */
+ arity = mfa[2];
+#ifdef DEBUG
+ sub_bin_heap.thing_word = 0;
+#endif
+ for (i = 0; i < arity; i++) {
+ Eterm arg = args[i];
+ if (is_boxed(arg) && header_is_bin_matchstate(*boxed_val(arg))) {
+ ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(arg);
+ ErlBinMatchBuffer* mb = &ms->mb;
+ ErlSubBin* sb = &sub_bin_heap;
+ Uint bit_size;
+
+ ASSERT(sub_bin_heap.thing_word == 0); /* At most one of match context */
+
+ bit_size = mb->size - mb->offset;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = BYTE_OFFSET(bit_size);
+ sb->bitsize = BIT_OFFSET(bit_size);
+ sb->offs = BYTE_OFFSET(mb->offset);
+ sb->bitoffs = BIT_OFFSET(mb->offset);
+ sb->is_writable = 0;
+ sb->orig = mb->orig;
+
+ arg = make_binary(sb);
+ }
+ transformed_args[i] = arg;
+ }
+ args = transformed_args;
+
+ if (is_internal_port(*tracer_pid)) {
+ Eterm local_heap[64+MAX_ARG];
+ hp = local_heap;
+
+ if (!erts_is_valid_tracer_port(*tracer_pid)) {
+#ifdef ERTS_SMP
+ ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc);
+ if (is_not_nil(tracee))
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+#endif
+ *tracee_flags &= ~TRACEE_FLAGS;
+ *tracer_pid = NIL;
+#ifdef ERTS_SMP
+ if (is_not_nil(tracee))
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+#endif
+ return 0;
+ }
+
+ /*
+ * If there is a PAM program, run it. Return if it fails.
+ *
+ * Some precedence rules:
+ *
+ * - No proc flags, e.g 'silent' or 'return_to'
+ * has any effect on meta trace.
+ * - The 'silent' process trace flag silences all call
+ * related messages, e.g 'call', 'return_to' and 'return_from'.
+ * - The {message,_} PAM function does not affect {return_trace}.
+ * - The {message,false} PAM function shall give the same
+ * 'call' trace message as no PAM match.
+ * - The {message,true} PAM function shall give the same
+ * 'call' trace message as a nonexistent PAM program.
+ */
+
+ /* BEGIN this code should be the same for port and pid trace */
+ return_flags = 0;
+ if (match_spec) {
+ pam_result = erts_match_set_run(p, match_spec, args, arity,
+ &return_flags);
+ if (is_non_value(pam_result)) {
+ erts_match_set_release_result(p);
+ return 0;
+ }
+ }
+ if (tracee_flags == &meta_flags) {
+ /* Meta trace */
+ if (pam_result == am_false) {
+ erts_match_set_release_result(p);
+ return return_flags;
+ }
+ } else {
+ /* Non-meta trace */
+ if (*tracee_flags & F_TRACE_SILENT) {
+ erts_match_set_release_result(p);
+ return 0;
+ }
+ if (pam_result == am_false) {
+ erts_match_set_release_result(p);
+ return return_flags;
+ }
+ if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
+ return_flags |= MATCH_SET_RETURN_TO_TRACE;
+ }
+ }
+ /* END this code should be the same for port and pid trace */
+
+ /*
+ * Build the the {M,F,A} tuple in the local heap.
+ * (A is arguments or arity.)
+ */
+
+ if (*tracee_flags & F_TRACE_ARITY_ONLY) {
+ mfa_tuple = make_small(arity);
+ } else {
+ mfa_tuple = NIL;
+ for (i = arity-1; i >= 0; i--) {
+ mfa_tuple = CONS(hp, args[i], mfa_tuple);
+ hp += 2;
+ }
+ }
+ mfa_tuple = TUPLE3(hp, mfa[0], mfa[1], mfa_tuple);
+ hp += 4;
+
+ /*
+ * Build the trace tuple and send it to the port.
+ */
+
+ mess = TUPLE4(hp, am_trace, p->id, am_call, mfa_tuple);
+ hp += 5;
+ if (pam_result != am_true) {
+ hp[-5] = make_arityval(5);
+ *hp++ = pam_result;
+ }
+ erts_smp_mtx_lock(&smq_mtx);
+ if (*tracee_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+ send_to_port(p, mess, tracer_pid, tracee_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ erts_match_set_release_result(p);
+ return *tracer_pid == NIL ? 0 : return_flags;
+
+ } else {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ Process *tracer;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+#ifdef ERTS_SMP
+ Eterm tpid;
+#endif
+ unsigned size;
+ unsigned sizes[MAX_ARG];
+ unsigned pam_result_size = 0;
+ int invalid_tracer;
+#ifdef DEBUG
+ Eterm* limit;
+#endif
+
+ ASSERT(is_internal_pid(*tracer_pid)
+ && internal_pid_index(*tracer_pid) < erts_max_processes);
+
+ tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
+ *tracer_pid, ERTS_PROC_LOCK_STATUS);
+ if (!tracer)
+ invalid_tracer = 1;
+ else {
+ invalid_tracer = (tracer->trace_flags & F_TRACER) == 0;
+ erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
+ }
+
+ if (invalid_tracer) {
+#ifdef ERTS_SMP
+ ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc);
+ if (is_not_nil(tracee))
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+#endif
+ *tracee_flags &= ~TRACEE_FLAGS;
+ *tracer_pid = NIL;
+#ifdef ERTS_SMP
+ if (is_not_nil(tracee))
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+#endif
+ return 0;
+ }
+
+#ifdef ERTS_SMP
+ tpid = *tracer_pid; /* Need to save tracer pid,
+ since *tracer_pid might
+ be reset by erts_match_set_run() */
+ tracer_ref = tpid;
+#else
+ tracer_ref = tracer;
+#endif
+
+ /*
+ * If there is a PAM program, run it. Return if it fails.
+ *
+ * See the rules above in the port trace code.
+ */
+
+ /* BEGIN this code should be the same for port and pid trace */
+ return_flags = 0;
+ if (match_spec) {
+ pam_result = erts_match_set_run(p, match_spec, args, arity,
+ &return_flags);
+ if (is_non_value(pam_result)) {
+ erts_match_set_release_result(p);
+ return 0;
+ }
+ }
+ if (tracee_flags == &meta_flags) {
+ /* Meta trace */
+ if (pam_result == am_false) {
+ erts_match_set_release_result(p);
+ return return_flags;
+ }
+ } else {
+ /* Non-meta trace */
+ if (*tracee_flags & F_TRACE_SILENT) {
+ erts_match_set_release_result(p);
+ return 0;
+ }
+ if (pam_result == am_false) {
+ erts_match_set_release_result(p);
+ return return_flags;
+ }
+ if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
+ return_flags |= MATCH_SET_RETURN_TO_TRACE;
+ }
+ }
+ /* END this code should be the same for port and pid trace */
+
+ /*
+ * Calculate number of words needed on heap.
+ */
+
+ size = 4 + 5; /* Trace tuple + MFA tuple. */
+ if (! (*tracee_flags & F_TRACE_ARITY_ONLY)) {
+ size += 2*arity;
+ for (i = arity-1; i >= 0; i--) {
+ sizes[i] = size_object(args[i]);
+ size += sizes[i];
+ }
+ }
+ if (*tracee_flags & F_TIMESTAMP) {
+ size += 1 + 4;
+ /* One element in trace tuple + timestamp tuple. */
+ }
+ if (pam_result != am_true) {
+ pam_result_size = size_object(pam_result);
+ size += 1 + pam_result_size;
+ /* One element in trace tuple + term size. */
+ }
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
+#ifdef DEBUG
+ limit = hp + size;
+#endif
+
+ /*
+ * Build the the {M,F,A} tuple in the message buffer.
+ * (A is arguments or arity.)
+ */
+
+ if (*tracee_flags & F_TRACE_ARITY_ONLY) {
+ mfa_tuple = make_small(arity);
+ } else {
+ mfa_tuple = NIL;
+ for (i = arity-1; i >= 0; i--) {
+ Eterm term = copy_struct(args[i], sizes[i], &hp, off_heap);
+ mfa_tuple = CONS(hp, term, mfa_tuple);
+ hp += 2;
+ }
+ }
+ mfa_tuple = TUPLE3(hp, mfa[0], mfa[1], mfa_tuple);
+ hp += 4;
+
+ /*
+ * Copy the PAM result (if any) onto the heap.
+ */
+
+ if (pam_result != am_true) {
+ pam_result = copy_struct(pam_result, pam_result_size, &hp, off_heap);
+ }
+
+ erts_match_set_release_result(p);
+
+ /*
+ * Build the trace tuple and enqueue it.
+ */
+
+ mess = TUPLE4(hp, am_trace, p->id/* Local pid */, am_call, mfa_tuple);
+ hp += 5;
+ if (pam_result != am_true) {
+ hp[-5] = make_arityval(5);
+ *hp++ = pam_result;
+ }
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (*tracee_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ ASSERT(hp == limit);
+ ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ return return_flags;
+ }
+}
+
+/* Sends trace message:
+ * {trace_ts, ProcessPid, What, Data, Timestamp}
+ * or {trace, ProcessPid, What, Data}
+ *
+ * 'what' must be atomic, 'data' may be a deep term.
+ * 'c_p' is the currently executing process, may be NULL.
+ * 't_p' is the traced process.
+ */
+void
+trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
+{
+ Eterm mess;
+ Eterm* hp;
+ int need;
+
+ if (is_internal_port(t_p->tracer_proc)) {
+ Eterm local_heap[5+5];
+ hp = local_heap;
+ mess = TUPLE4(hp, am_trace, t_p->id, what, data);
+ hp += 5;
+ erts_smp_mtx_lock(&smq_mtx);
+ if (t_p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+ send_to_port(
+#ifndef ERTS_SMP
+ /* No fake schedule out and in again after an exit */
+ what == am_exit ? NULL : c_p,
+#else
+ /* Fake schedule out and in are never sent when smp enabled */
+ c_p,
+#endif
+ mess, &t_p->tracer_proc, &t_p->trace_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+ Eterm tmp;
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+ size_t sz_data;
+
+ ASSERT(is_internal_pid(t_p->tracer_proc)
+ && internal_pid_index(t_p->tracer_proc) < erts_max_processes);
+
+ ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags);
+
+ sz_data = size_object(data);
+
+ need = sz_data + 5 + TS_SIZE(t_p);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
+
+ tmp = copy_struct(data, sz_data, &hp, off_heap);
+ mess = TUPLE4(hp, am_trace, t_p->id/* Local pid */, what, tmp);
+ hp += 5;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (t_p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ ERTS_ENQ_TRACE_MSG(t_p->id, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ }
+}
+
+
+/* Sends trace message:
+ * {trace_ts, ParentPid, spawn, ChildPid, {Mod, Func, Args}, Timestamp}
+ * or {trace, ParentPid, spawn, ChildPid, {Mod, Func, Args}}
+ *
+ * 'pid' is the ChildPid, 'mod' and 'func' must be atomic,
+ * and 'args' may be a deep term.
+ */
+void
+trace_proc_spawn(Process *p, Eterm pid,
+ Eterm mod, Eterm func, Eterm args)
+{
+ Eterm mfa;
+ Eterm mess;
+ Eterm* hp;
+
+ if (is_internal_port(p->tracer_proc)) {
+ Eterm local_heap[4+6+5];
+ hp = local_heap;
+ mfa = TUPLE3(hp, mod, func, args);
+ hp += 4;
+ mess = TUPLE5(hp, am_trace, p->id, am_spawn, pid, mfa);
+ hp += 6;
+ erts_smp_mtx_lock(&smq_mtx);
+ if (p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+ send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+ Eterm tmp;
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+ size_t sz_args, sz_pid;
+ Uint need;
+
+ ASSERT(is_internal_pid(p->tracer_proc)
+ && internal_pid_index(p->tracer_proc) < erts_max_processes);
+
+ ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+
+ sz_args = size_object(args);
+ sz_pid = size_object(pid);
+ need = sz_args + 4 + 6 + TS_SIZE(p);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
+
+ tmp = copy_struct(args, sz_args, &hp, off_heap);
+ mfa = TUPLE3(hp, mod, func, tmp);
+ hp += 4;
+ tmp = copy_struct(pid, sz_pid, &hp, off_heap);
+ mess = TUPLE5(hp, am_trace, p->id, am_spawn, tmp, mfa);
+ hp += 6;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ }
+}
+
+void save_calls(Process *p, Export *e)
+{
+ struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
+ if (scb) {
+ Export **ct = &scb->ct[0];
+ int len = scb->len;
+
+ ct[scb->cur] = e;
+ if (++scb->cur >= len)
+ scb->cur = 0;
+ if (scb->n < len)
+ scb->n++;
+ }
+}
+
+/*
+ * Entry point called by the trace wrap functions in erl_bif_wrap.c
+ *
+ * The trace wrap functions are themselves called through the export
+ * entries instead of the original BIF functions.
+ */
+Eterm
+erts_bif_trace(int bif_index, Process* p,
+ Eterm arg1, Eterm arg2, Eterm arg3, Uint *I)
+{
+ Eterm result;
+ int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_CALLS) && (! meta)) {
+ /* Warning! This is an Optimization.
+ *
+ * If neither meta trace is active nor process trace flags then
+ * no tracing will occur. Doing the whole else branch will
+ * also do nothing, only slower.
+ */
+ Eterm (*func)(Process*, Eterm, Eterm, Eterm, Uint*) = bif_table[bif_index].f;
+ result = func(p, arg1, arg2, arg3, I);
+ } else {
+ Eterm (*func)(Process*, Eterm, Eterm, Eterm, Uint*);
+ Export* ep = bif_export[bif_index];
+ Uint32 flags = 0, flags_meta = 0;
+ int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
+ int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);
+ Eterm meta_tracer_pid = NIL;
+ int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
+ Eterm *cp = p->cp;
+
+#ifndef _OSE_
+ Eterm args[3] = {arg1, arg2, arg3};
+#else
+ Eterm args[3];
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+#endif
+
+ /*
+ * Make continuation pointer OK, it is not during direct BIF calls,
+ * but it is correct during apply of bif.
+ */
+ if (!applying) {
+ p->cp = I;
+ }
+ if (global || local) {
+ flags = erts_call_trace(p, ep->code, ep->match_prog_set, args,
+ local, &p->tracer_proc);
+ }
+ if (meta) {
+ flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
+ &meta_tracer_pid);
+ }
+ /* Restore original continuation pointer (if changed). */
+ p->cp = cp;
+
+ func = bif_table[bif_index].f;
+
+ result = func(p, arg1, arg2, arg3, I);
+
+ if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
+ Uint i_return_trace = beam_return_trace[0];
+ Uint i_return_to_trace = beam_return_to_trace[0];
+ Eterm *cpp;
+ /* Maybe advance cp to skip trace stack frames */
+ for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
+ ASSERT(is_CP((Eterm) cp));
+ if (*cp_val((Eterm) cp) == i_return_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 2; /* Skip return_trace parameters */
+ } else if (*cp_val((Eterm) cp) == i_return_to_trace) {
+ /* A return_to trace message is going to be generated
+ * by normal means, so we do not have to.
+ */
+ cp = NULL;
+ break;
+ } else break;
+ }
+ }
+
+ /* Try to get these in the order
+ * they usually appear in normal code... */
+ if (is_non_value(result)) {
+ Uint reason = p->freason;
+ if (reason != TRAP) {
+ Eterm class;
+ Eterm value = p->fvalue;
+ Eterm nocatch[3];
+ /* Expand error value like in handle_error() */
+ if (reason & EXF_ARGLIST) {
+ Eterm *tp;
+ ASSERT(is_tuple(value));
+ tp = tuple_val(value);
+ value = tp[1];
+ }
+ if ((reason & EXF_THROWN) && (p->catches <= 0)) {
+ value = TUPLE2(nocatch, am_nocatch, value);
+ reason = EXC_ERROR;
+ }
+ /* Note: expand_error_value() could theoretically
+ * allocate on the heap, but not for any error
+ * returned by a BIF, and it would do no harm,
+ * just be annoying.
+ */
+ value = expand_error_value(p, reason, value);
+ class = exception_tag[GET_EXC_CLASS(reason)];
+
+ if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &meta_tracer_pid);
+ }
+ if (flags & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &p->tracer_proc);
+ }
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
+ /* can only happen if(local)*/
+ Eterm *ptr = p->stop;
+ ASSERT(is_CP(*ptr));
+ ASSERT(ptr <= STACK_START(p));
+ /* Search the nearest stack frame for a catch */
+ while (++ptr < STACK_START(p)) {
+ if (is_CP(*ptr)) break;
+ if (is_catch(*ptr)) {
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into
+ * calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ }
+ if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ p->trace_flags |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ } else {
+ if (flags_meta & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &meta_tracer_pid);
+ }
+ /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
+ if (flags & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &p->tracer_proc);
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE) {
+ /* can only happen if(local)*/
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ }
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ return result;
+}
+
+/* Sends trace message:
+ * {trace_ts, Pid, What, Msg, Timestamp}
+ * or {trace, Pid, What, Msg}
+ *
+ * where 'What' must be atomic and 'Msg' is:
+ * [{heap_size, HeapSize}, {old_heap_size, OldHeapSize},
+ * {stack_size, StackSize}, {recent_size, RecentSize},
+ * {mbuf_size, MbufSize}]
+ *
+ * where 'HeapSize', 'OldHeapSize', 'StackSize', 'RecentSize and 'MbufSize'
+ * are all small (atomic) integers.
+ */
+void
+trace_gc(Process *p, Eterm what)
+{
+ ErlHeapFragment *bp = NULL;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF; /* Initialized
+ to eliminate
+ compiler
+ warning */
+ Eterm* hp;
+ Eterm msg = NIL;
+ Uint size;
+ Eterm tags[] = {
+ am_old_heap_block_size,
+ am_heap_block_size,
+ am_mbuf_size,
+ am_recent_size,
+ am_stack_size,
+ am_old_heap_size,
+ am_heap_size
+ };
+ Uint values[] = {
+ OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
+ HEAP_SIZE(p),
+ MBUF_SIZE(p),
+ HIGH_WATER(p) - HEAP_START(p),
+ STACK_START(p) - p->stop,
+ OLD_HEAP(p) ? OLD_HTOP(p) - OLD_HEAP(p) : 0,
+ HEAP_TOP(p) - HEAP_START(p)
+ };
+ Eterm local_heap[(sizeof(values)/sizeof(Uint))
+ *(2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE)
+ + 5/*4-tuple */ + TS_HEAP_WORDS];
+#ifdef DEBUG
+ Eterm* limit;
+#endif
+
+ ASSERT(sizeof(values)/sizeof(Uint) == sizeof(tags)/sizeof(Eterm));
+
+ if (is_internal_port(p->tracer_proc)) {
+ hp = local_heap;
+#ifdef DEBUG
+ size = 0;
+ (void) erts_bld_atom_uint_2tup_list(NULL,
+ &size,
+ sizeof(values)/sizeof(Uint),
+ tags,
+ values);
+ size += 5/*4-tuple*/ + TS_SIZE(p);
+#endif
+ } else {
+ ASSERT(is_internal_pid(p->tracer_proc)
+ && internal_pid_index(p->tracer_proc) < erts_max_processes);
+
+ ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+
+ size = 0;
+ (void) erts_bld_atom_uint_2tup_list(NULL,
+ &size,
+ sizeof(values)/sizeof(Uint),
+ tags,
+ values);
+ size += 5/*4-tuple*/ + TS_SIZE(p);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
+ }
+
+#ifdef DEBUG
+ limit = hp + size;
+ ASSERT(size <= sizeof(local_heap)/sizeof(Eterm));
+#endif
+
+ msg = erts_bld_atom_uint_2tup_list(&hp,
+ NULL,
+ sizeof(values)/sizeof(Uint),
+ tags,
+ values);
+
+ msg = TUPLE4(hp, am_trace, p->id/* Local pid */, what, msg);
+ hp += 5;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(msg, hp);
+ }
+ ASSERT(hp == limit);
+ if (is_internal_port(p->tracer_proc))
+ send_to_port(p, msg, &p->tracer_proc, &p->trace_flags);
+ else
+ ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, msg, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+
+
+void
+monitor_long_gc(Process *p, Uint time) {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+#ifndef ERTS_SMP
+ Process *monitor_p;
+#endif
+ Uint hsz;
+ Eterm *hp, list, msg;
+ Eterm tags[] = {
+ am_timeout,
+ am_old_heap_block_size,
+ am_heap_block_size,
+ am_mbuf_size,
+ am_stack_size,
+ am_old_heap_size,
+ am_heap_size
+ };
+ Eterm values[] = {
+ time,
+ OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
+ HEAP_SIZE(p),
+ MBUF_SIZE(p),
+ STACK_START(p) - p->stop,
+ OLD_HEAP(p) ? OLD_HTOP(p) - OLD_HEAP(p) : 0,
+ HEAP_TOP(p) - HEAP_START(p)
+ };
+#ifdef DEBUG
+ Eterm *hp_end;
+#endif
+
+#ifndef ERTS_SMP
+ ASSERT(is_internal_pid(system_monitor)
+ && internal_pid_index(system_monitor) < erts_max_processes);
+ monitor_p = process_tab[internal_pid_index(system_monitor)];
+ if (INVALID_PID(monitor_p, system_monitor) || p == monitor_p) {
+ return;
+ }
+#endif
+
+ hsz = 0;
+ (void) erts_bld_atom_uint_2tup_list(NULL,
+ &hsz,
+ sizeof(values)/sizeof(Uint),
+ tags,
+ values);
+ hsz += 5 /* 4-tuple */;
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
+
+#ifdef DEBUG
+ hp_end = hp + hsz;
+#endif
+
+ list = erts_bld_atom_uint_2tup_list(&hp,
+ NULL,
+ sizeof(values)/sizeof(Uint),
+ tags,
+ values);
+ msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, am_long_gc, list);
+
+#ifdef DEBUG
+ hp += 5 /* 4-tuple */;
+ ASSERT(hp == hp_end);
+#endif
+
+#ifdef ERTS_SMP
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
+#else
+ erts_queue_message(monitor_p, NULL, bp, msg, NIL);
+#endif
+}
+
+void
+monitor_large_heap(Process *p) {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+#ifndef ERTS_SMP
+ Process *monitor_p;
+#endif
+ Uint hsz;
+ Eterm *hp, list, msg;
+ Eterm tags[] = {
+ am_old_heap_block_size,
+ am_heap_block_size,
+ am_mbuf_size,
+ am_stack_size,
+ am_old_heap_size,
+ am_heap_size
+ };
+ Uint values[] = {
+ OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
+ HEAP_SIZE(p),
+ MBUF_SIZE(p),
+ STACK_START(p) - p->stop,
+ OLD_HEAP(p) ? OLD_HTOP(p) - OLD_HEAP(p) : 0,
+ HEAP_TOP(p) - HEAP_START(p)
+ };
+#ifdef DEBUG
+ Eterm *hp_end;
+#endif
+
+
+#ifndef ERTS_SMP
+ ASSERT(is_internal_pid(system_monitor)
+ && internal_pid_index(system_monitor) < erts_max_processes);
+ monitor_p = process_tab[internal_pid_index(system_monitor)];
+ if (INVALID_PID(monitor_p, system_monitor) || p == monitor_p) {
+ return;
+ }
+#endif
+
+ hsz = 0;
+ (void) erts_bld_atom_uint_2tup_list(NULL,
+ &hsz,
+ sizeof(values)/sizeof(Uint),
+ tags,
+ values);
+ hsz += 5 /* 4-tuple */;
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, monitor_p);
+
+#ifdef DEBUG
+ hp_end = hp + hsz;
+#endif
+
+ list = erts_bld_atom_uint_2tup_list(&hp,
+ NULL,
+ sizeof(values)/sizeof(Uint),
+ tags,
+ values);
+ msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, am_large_heap, list);
+
+#ifdef DEBUG
+ hp += 5 /* 4-tuple */;
+ ASSERT(hp == hp_end);
+#endif
+
+#ifdef ERTS_SMP
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
+#else
+ erts_queue_message(monitor_p, NULL, bp, msg, NIL);
+#endif
+}
+
+void
+monitor_generic(Process *p, Eterm type, Eterm spec) {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+#ifndef ERTS_SMP
+ Process *monitor_p;
+#endif
+ Eterm *hp, msg;
+
+#ifndef ERTS_SMP
+ ASSERT(is_internal_pid(system_monitor)
+ && internal_pid_index(system_monitor) < erts_max_processes);
+ monitor_p = process_tab[internal_pid_index(system_monitor)];
+ if (INVALID_PID(monitor_p, system_monitor) || p == monitor_p) {
+ return;
+ }
+#endif
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p);
+
+ msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, type, spec);
+ hp += 5;
+
+#ifdef ERTS_SMP
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
+#else
+ erts_queue_message(monitor_p, NULL, bp, msg, NIL);
+#endif
+
+}
+
+
+/* Begin system_profile tracing */
+/* Scheduler profiling */
+
+void
+profile_scheduler(Eterm scheduler_id, Eterm state) {
+ Eterm *hp, msg, timestamp;
+ Uint Ms, s, us;
+
+#ifndef ERTS_SMP
+ Eterm local_heap[4 + 7];
+ hp = local_heap;
+#else
+ ErlHeapFragment *bp;
+ Uint hsz;
+
+ hsz = 4 + 7;
+
+ bp = new_message_buffer(hsz);
+ hp = bp->mem;
+#endif
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ switch (state) {
+ case am_active:
+ active_sched++;
+ break;
+ case am_inactive:
+ active_sched--;
+ break;
+ default:
+ ASSERT(!"Invalid state");
+ break;
+ }
+
+ GET_NOW(&Ms, &s, &us);
+ timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
+ msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id, state,
+ make_small(active_sched), timestamp); hp += 7;
+
+#ifndef ERTS_SMP
+ profile_send(msg);
+#else
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
+#endif
+ erts_smp_mtx_unlock(&smq_mtx);
+
+}
+
+void
+profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint Ms, Uint s, Uint us) {
+ Eterm *hp, msg, timestamp;
+
+#ifndef ERTS_SMP
+ Eterm local_heap[4 + 7];
+ hp = local_heap;
+#else
+ ErlHeapFragment *bp;
+ Uint hsz;
+
+ hsz = 4 + 7;
+
+ bp = new_message_buffer(hsz);
+ hp = bp->mem;
+#endif
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
+ msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id, state, no_schedulers, timestamp); hp += 7;
+#ifndef ERTS_SMP
+ profile_send(msg);
+#else
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
+#endif
+ erts_smp_mtx_unlock(&smq_mtx);
+
+}
+
+
+/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
+ * or {trace, Pid, What, {Mod, Func, Arity}}
+ *
+ * where 'What' is supposed to be 'in' or 'out'.
+ *
+ * Virtual scheduling do not fake scheduling for ports.
+ */
+
+
+void trace_virtual_sched(Process *p, Eterm what)
+{
+ trace_sched_aux(p, what, 1);
+}
+
+/* Port profiling */
+
+void
+trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
+ Eterm mess;
+ Eterm* hp;
+
+ if (is_internal_port(p->tracer_proc)) {
+ Eterm local_heap[5+6];
+ hp = local_heap;
+
+ mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name);
+ hp += 6;
+ erts_smp_mtx_lock(&smq_mtx);
+ if (p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+ /* No fake schedule */
+ send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ size_t sz_data;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+
+ ASSERT(is_internal_pid(p->tracer_proc)
+ && internal_pid_index(p->tracer_proc) < erts_max_processes);
+
+ sz_data = 6 + TS_SIZE(p);
+
+ ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
+
+ mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name);
+ hp += 6;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ }
+
+}
+
+/* Sends trace message:
+ * {trace_ts, PortPid, What, Data, Timestamp}
+ * or {trace, PortPid, What, Data}
+ *
+ * 'what' must be atomic, 'data' must be atomic.
+ * 't_p' is the traced port.
+ */
+void
+trace_port(Port *t_p, Eterm what, Eterm data) {
+ Eterm mess;
+ Eterm* hp;
+
+ if (is_internal_port(t_p->tracer_proc)) {
+ Eterm local_heap[5+5];
+ hp = local_heap;
+ mess = TUPLE4(hp, am_trace, t_p->id, what, data);
+ hp += 5;
+ erts_smp_mtx_lock(&smq_mtx);
+ if (t_p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+ /* No fake schedule */
+ send_to_port(NULL, mess, &t_p->tracer_proc, &t_p->trace_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ size_t sz_data;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+
+ ASSERT(is_internal_pid(t_p->tracer_proc)
+ && internal_pid_index(t_p->tracer_proc) < erts_max_processes);
+
+ sz_data = 5 + TS_SIZE(t_p);
+
+ ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
+
+ mess = TUPLE4(hp, am_trace, t_p->id, what, data);
+ hp += 5;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (t_p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ ERTS_ENQ_TRACE_MSG(t_p->id, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ }
+}
+
+/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
+ * or {trace, Pid, What, {Mod, Func, Arity}}
+ *
+ * where 'What' is supposed to be 'in' or 'out' and
+ * where 'where' is supposed to be location (callback)
+ * for the port.
+ */
+
+void
+trace_sched_ports(Port *p, Eterm what) {
+ trace_sched_ports_where(p,what, make_small(0));
+}
+
+void
+trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
+ Eterm mess;
+ Eterm* hp;
+ int ws = 5;
+ Eterm sched_id = am_undefined;
+
+ if (is_internal_port(p->tracer_proc)) {
+ Eterm local_heap[5+6];
+ hp = local_heap;
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
+#ifdef ERTS_SMP
+ ErtsSchedulerData *esd = erts_get_scheduler_data();
+ if (esd) sched_id = make_small(esd->no);
+ else sched_id = am_undefined;
+#else
+ sched_id = make_small(1);
+#endif
+ mess = TUPLE5(hp, am_trace, p->id, what, sched_id, where);
+ ws = 6;
+ } else {
+ mess = TUPLE4(hp, am_trace, p->id, what, where);
+ ws = 5;
+ }
+ hp += ws;
+
+ erts_smp_mtx_lock(&smq_mtx);
+ if (p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ /* No fake scheduling */
+ send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ erts_smp_mtx_unlock(&smq_mtx);
+ } else {
+ ErlHeapFragment *bp;
+ ErlOffHeap *off_heap;
+ ERTS_TRACER_REF_TYPE tracer_ref;
+
+ ASSERT(is_internal_pid(p->tracer_proc)
+ && internal_pid_index(p->tracer_proc) < erts_max_processes);
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) ws = 6; /* Make place for scheduler id */
+
+ ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+
+ hp = ERTS_ALLOC_SYSMSG_HEAP(ws+TS_SIZE(p), &bp, &off_heap, tracer_ref);
+
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
+#ifdef ERTS_SMP
+ ErtsSchedulerData *esd = erts_get_scheduler_data();
+ if (esd) sched_id = make_small(esd->no);
+ else sched_id = am_undefined;
+#else
+ sched_id = make_small(1);
+#endif
+ mess = TUPLE5(hp, am_trace, p->id, what, sched_id, where);
+ } else {
+ mess = TUPLE4(hp, am_trace, p->id, what, where);
+ }
+ hp += ws;
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ if (p->trace_flags & F_TIMESTAMP) {
+ hp = patch_ts(mess, hp);
+ }
+
+ ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+ }
+}
+
+/* Port profiling */
+
+void
+profile_runnable_port(Port *p, Eterm status) {
+ Uint Ms, s, us;
+ Eterm *hp, msg, timestamp;
+
+ Eterm count = make_small(0);
+
+#ifndef ERTS_SMP
+ Eterm local_heap[4 + 6];
+ hp = local_heap;
+
+#else
+ ErlHeapFragment *bp;
+ Uint hsz;
+
+ hsz = 4 + 6;
+
+ bp = new_message_buffer(hsz);
+ hp = bp->mem;
+#endif
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ GET_NOW(&Ms, &s, &us);
+ timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
+ msg = TUPLE5(hp, am_profile, p->id, status, count, timestamp); hp += 6;
+
+#ifndef ERTS_SMP
+ profile_send(msg);
+#else
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
+#endif
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+/* Process profiling */
+void
+profile_runnable_proc(Process *p, Eterm status){
+ Uint Ms, s, us;
+ Eterm *hp, msg, timestamp;
+ Eterm where = am_undefined;
+
+#ifndef ERTS_SMP
+ Eterm local_heap[4 + 6 + 4];
+ hp = local_heap;
+#else
+ ErlHeapFragment *bp;
+ Uint hsz = 4 + 6 + 4;
+#endif
+
+ if (!p->current) {
+ p->current = find_function_from_pc(p->i);
+ }
+
+#ifdef ERTS_SMP
+ if (!p->current) {
+ hsz = 4 + 6;
+ }
+
+ bp = new_message_buffer(hsz);
+ hp = bp->mem;
+#endif
+
+ if (p->current) {
+ where = TUPLE3(hp, p->current[0], p->current[1], make_small(p->current[2])); hp += 4;
+ } else {
+ where = make_small(0);
+ }
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ GET_NOW(&Ms, &s, &us);
+ timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
+ msg = TUPLE5(hp, am_profile, p->id, status, where, timestamp); hp += 6;
+#ifndef ERTS_SMP
+ profile_send(msg);
+#else
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
+#endif
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+/* End system_profile tracing */
+
+
+
+#ifdef ERTS_SMP
+
+void
+erts_check_my_tracer_proc(Process *p)
+{
+ if (is_internal_pid(p->tracer_proc)) {
+ Process *tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
+ p->tracer_proc, ERTS_PROC_LOCK_STATUS);
+ int invalid_tracer = !tracer || !(tracer->trace_flags & F_TRACER);
+ if (tracer)
+ erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
+ if (invalid_tracer) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ p->trace_flags &= ~TRACEE_FLAGS;
+ p->tracer_proc = NIL;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+}
+
+
+typedef struct ErtsSysMsgQ_ ErtsSysMsgQ;
+struct ErtsSysMsgQ_ {
+ ErtsSysMsgQ *next;
+ enum ErtsSysMsgType type;
+ Eterm from;
+ Eterm to;
+ Eterm msg;
+ ErlHeapFragment *bp;
+};
+
+static ErtsSysMsgQ *sys_message_queue;
+static ErtsSysMsgQ *sys_message_queue_end;
+
+static erts_tid_t sys_msg_dispatcher_tid;
+static erts_cnd_t smq_cnd;
+
+static int dispatcher_waiting;
+
+ERTS_QUALLOC_IMPL(smq_element, ErtsSysMsgQ, 20, ERTS_ALC_T_SYS_MSG_Q)
+
+static void
+enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
+ Eterm from,
+ Eterm to,
+ Eterm msg,
+ ErlHeapFragment *bp)
+{
+ ErtsSysMsgQ *smqp;
+
+ smqp = smq_element_alloc();
+ smqp->next = NULL;
+ smqp->type = type;
+ smqp->from = from;
+ smqp->to = to;
+ smqp->msg = msg;
+ smqp->bp = bp;
+
+ if (sys_message_queue_end) {
+ ASSERT(sys_message_queue);
+ sys_message_queue_end->next = smqp;
+ }
+ else {
+ ASSERT(!sys_message_queue);
+ sys_message_queue = smqp;
+ }
+ sys_message_queue_end = smqp;
+ erts_smp_cnd_signal(&smq_cnd);
+}
+
+static void
+enqueue_sys_msg(enum ErtsSysMsgType type,
+ Eterm from,
+ Eterm to,
+ Eterm msg,
+ ErlHeapFragment *bp)
+{
+ erts_smp_mtx_lock(&smq_mtx);
+ enqueue_sys_msg_unlocked(type, from, to, msg, bp);
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+static void
+prepare_for_block(void *unused)
+{
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+static void
+resume_after_block(void *unused)
+{
+ erts_smp_mtx_lock(&smq_mtx);
+}
+
+void
+erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp)
+{
+ enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_error_logger, msg, bp);
+}
+
+void
+erts_send_sys_msg_proc(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp)
+{
+ ASSERT(is_internal_pid(to));
+ enqueue_sys_msg(SYS_MSG_TYPE_PROC_MSG, from, to, msg, bp);
+}
+
+#ifdef DEBUG_PRINTOUTS
+static void
+print_msg_type(ErtsSysMsgQ *smqp)
+{
+ switch (smqp->type) {
+ case SYS_MSG_TYPE_TRACE:
+ erts_fprintf(stderr, "TRACE ");
+ break;
+ case SYS_MSG_TYPE_SEQTRACE:
+ erts_fprintf(stderr, "SEQTRACE ");
+ break;
+ case SYS_MSG_TYPE_SYSMON:
+ erts_fprintf(stderr, "SYSMON ");
+ break;
+ case SYS_MSG_TYPE_SYSPROF:
+ erts_fprintf(stderr, "SYSPROF ");
+ break;
+ case SYS_MSG_TYPE_ERRLGR:
+ erts_fprintf(stderr, "ERRLGR ");
+ break;
+ case SYS_MSG_TYPE_PROC_MSG:
+ erts_fprintf(stderr, "PROC_MSG ");
+ break;
+ default:
+ erts_fprintf(stderr, "??? ");
+ break;
+ }
+}
+#endif
+
+static void
+sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
+{
+ switch (smqp->type) {
+ case SYS_MSG_TYPE_TRACE:
+ /* Invalid tracer_proc's are removed when processes
+ are scheduled in. */
+ break;
+ case SYS_MSG_TYPE_SEQTRACE:
+ /* Reset seq_tracer if it hasn't changed */
+ erts_smp_mtx_lock(&sys_trace_mtx);
+ if (system_seq_tracer == receiver)
+ system_seq_tracer = am_false;
+ erts_smp_mtx_unlock(&sys_trace_mtx);
+ break;
+ case SYS_MSG_TYPE_SYSMON:
+ if (receiver == NIL
+ && !erts_system_monitor_long_gc
+ && !erts_system_monitor_large_heap
+ && !erts_system_monitor_flags.busy_port
+ && !erts_system_monitor_flags.busy_dist_port)
+ break; /* Everything is disabled */
+ erts_smp_block_system(ERTS_BS_FLG_ALLOW_GC);
+ if (system_monitor == receiver || receiver == NIL)
+ erts_system_monitor_clear(NULL);
+ erts_smp_release_system();
+ break;
+ case SYS_MSG_TYPE_SYSPROF:
+ if (receiver == NIL
+ && !erts_system_profile_flags.runnable_procs
+ && !erts_system_profile_flags.runnable_ports
+ && !erts_system_profile_flags.exclusive
+ && !erts_system_profile_flags.scheduler)
+ break;
+ /* Block system to clear flags */
+ erts_smp_block_system(0);
+ if (system_profile == receiver || receiver == NIL) {
+ erts_system_profile_clear(NULL);
+ }
+ erts_smp_release_system();
+ break;
+ case SYS_MSG_TYPE_ERRLGR: {
+ char *no_elgger = "(no error logger present)";
+ Eterm *tp;
+ Eterm tag;
+ if (is_not_tuple(smqp->msg)) {
+ unexpected_elmsg:
+ erts_fprintf(stderr,
+ "%s unexpected error logger message: %T\n",
+ no_elgger,
+ smqp->msg);
+ }
+
+ tp = tuple_val(smqp->msg);
+ if (arityval(tp[0]) != 2)
+ goto unexpected_elmsg;
+ if (is_not_tuple(tp[2]))
+ goto unexpected_elmsg;
+ tp = tuple_val(tp[2]);
+ if (arityval(tp[0]) != 3)
+ goto unexpected_elmsg;
+ tag = tp[1];
+ if (is_not_tuple(tp[3]))
+ goto unexpected_elmsg;
+ tp = tuple_val(tp[3]);
+ if (arityval(tp[0]) != 3)
+ goto unexpected_elmsg;
+ if (is_not_list(tp[3]))
+ goto unexpected_elmsg;
+ erts_fprintf(stderr, "%s %T: %T\n",
+ no_elgger, tag, CAR(list_val(tp[3])));
+ break;
+ }
+ case SYS_MSG_TYPE_PROC_MSG:
+ break;
+ default:
+ ASSERT(0);
+ }
+}
+
+static void *
+sys_msg_dispatcher_func(void *unused)
+{
+ ErtsSysMsgQ *local_sys_message_queue = NULL;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_set_thread_name("system message dispatcher");
+#endif
+
+ erts_register_blockable_thread();
+ erts_smp_activity_begin(ERTS_ACTIVITY_IO, NULL, NULL, NULL);
+
+ while (1) {
+ ErtsSysMsgQ *smqp;
+
+ ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+
+ erts_smp_mtx_lock(&smq_mtx);
+
+ /* Free previously used queue ... */
+ while (local_sys_message_queue) {
+ smqp = local_sys_message_queue;
+ local_sys_message_queue = smqp->next;
+ smq_element_free(smqp);
+ }
+
+ /* Fetch current trace message queue ... */
+ erts_smp_activity_change(ERTS_ACTIVITY_IO,
+ ERTS_ACTIVITY_WAIT,
+ prepare_for_block,
+ resume_after_block,
+ NULL);
+ dispatcher_waiting = 1;
+ while (!sys_message_queue)
+ erts_smp_cnd_wait(&smq_cnd, &smq_mtx);
+ dispatcher_waiting = 0;
+ erts_smp_activity_change(ERTS_ACTIVITY_WAIT,
+ ERTS_ACTIVITY_IO,
+ prepare_for_block,
+ resume_after_block,
+ NULL);
+
+ local_sys_message_queue = sys_message_queue;
+ sys_message_queue = NULL;
+ sys_message_queue_end = NULL;
+
+ erts_smp_mtx_unlock(&smq_mtx);
+
+ /* Send trace messages ... */
+
+ ASSERT(local_sys_message_queue);
+
+ for (smqp = local_sys_message_queue; smqp; smqp = smqp->next) {
+ Eterm receiver;
+ ErtsProcLocks proc_locks = ERTS_PROC_LOCKS_MSG_SEND;
+ Process *proc = NULL;
+ Port *port = NULL;
+
+#ifdef DEBUG_PRINTOUTS
+ print_msg_type(smqp);
+#endif
+ switch (smqp->type) {
+ case SYS_MSG_TYPE_TRACE:
+ case SYS_MSG_TYPE_PROC_MSG:
+ receiver = smqp->to;
+ break;
+ case SYS_MSG_TYPE_SEQTRACE:
+ receiver = erts_get_system_seq_tracer();
+ break;
+ case SYS_MSG_TYPE_SYSMON:
+ receiver = erts_get_system_monitor();
+ if (smqp->from == receiver) {
+#ifdef DEBUG_PRINTOUTS
+ erts_fprintf(stderr, "MSG=%T to %T... ",
+ smqp->msg, receiver);
+#endif
+ goto drop_sys_msg;
+ }
+ break;
+ case SYS_MSG_TYPE_SYSPROF:
+ receiver = erts_get_system_profile();
+ if (smqp->from == receiver) {
+#ifdef DEBUG_PRINTOUTS
+ erts_fprintf(stderr, "MSG=%T to %T... ",
+ smqp->msg, receiver);
+#endif
+ goto drop_sys_msg;
+ }
+ break;
+ case SYS_MSG_TYPE_ERRLGR:
+ receiver = am_error_logger;
+ break;
+ default:
+ receiver = NIL;
+ break;
+ }
+
+#ifdef DEBUG_PRINTOUTS
+ erts_fprintf(stderr, "MSG=%T to %T... ", smqp->msg, receiver);
+#endif
+
+ if (is_internal_pid(receiver)) {
+ proc = erts_pid2proc(NULL, 0, receiver, proc_locks);
+ if (!proc
+ || (smqp->type == SYS_MSG_TYPE_TRACE
+ && !(proc->trace_flags & F_TRACER))) {
+ /* Bad tracer */
+#ifdef DEBUG_PRINTOUTS
+ if (smqp->type == SYS_MSG_TYPE_TRACE && proc)
+ erts_fprintf(stderr,
+ "<tracer alive but missing "
+ "F_TRACER flag> ");
+#endif
+ goto failure;
+ }
+ else {
+ queue_proc_msg:
+ erts_queue_message(proc,&proc_locks,smqp->bp,smqp->msg,NIL);
+#ifdef DEBUG_PRINTOUTS
+ erts_fprintf(stderr, "delivered\n");
+#endif
+ erts_smp_proc_unlock(proc, proc_locks);
+ }
+ }
+ else if (receiver == am_error_logger) {
+ proc = erts_whereis_process(NULL,0,receiver,proc_locks,0);
+ if (!proc)
+ goto failure;
+ else if (smqp->from == proc->id)
+ goto drop_sys_msg;
+ else
+ goto queue_proc_msg;
+ }
+ else if (is_internal_port(receiver)) {
+ port = erts_id2port(receiver, NULL, 0);
+ if (INVALID_TRACER_PORT(port, receiver)) {
+ if (port)
+ erts_port_release(port);
+ goto failure;
+ }
+ else {
+ write_sys_msg_to_port(receiver,
+ port,
+ smqp->from,
+ smqp->type,
+ smqp->msg);
+ if (port->control_flags & PORT_CONTROL_FLAG_HEAVY)
+ port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
+#ifdef DEBUG_PRINTOUTS
+ erts_fprintf(stderr, "delivered\n");
+#endif
+ erts_port_release(port);
+ if (smqp->bp)
+ free_message_buffer(smqp->bp);
+ }
+ }
+ else {
+ failure:
+ sys_msg_disp_failure(smqp, receiver);
+ drop_sys_msg:
+ if (proc)
+ erts_smp_proc_unlock(proc, proc_locks);
+ if (smqp->bp)
+ free_message_buffer(smqp->bp);
+#ifdef DEBUG_PRINTOUTS
+ erts_fprintf(stderr, "dropped\n");
+#endif
+ }
+ }
+ }
+
+ erts_smp_activity_end(ERTS_ACTIVITY_IO, NULL, NULL, NULL);
+ return NULL;
+}
+
+void
+erts_foreach_sys_msg_in_q(void (*func)(Eterm,
+ Eterm,
+ Eterm,
+ ErlHeapFragment *))
+{
+ ErtsSysMsgQ *sm;
+ erts_smp_mtx_lock(&smq_mtx);
+ for (sm = sys_message_queue; sm; sm = sm->next) {
+ Eterm to;
+ switch (sm->type) {
+ case SYS_MSG_TYPE_TRACE:
+ to = sm->to;
+ break;
+ case SYS_MSG_TYPE_SEQTRACE:
+ to = erts_get_system_seq_tracer();
+ break;
+ case SYS_MSG_TYPE_SYSMON:
+ to = erts_get_system_monitor();
+ break;
+ case SYS_MSG_TYPE_SYSPROF:
+ to = erts_get_system_profile();
+ break;
+ case SYS_MSG_TYPE_ERRLGR:
+ to = am_error_logger;
+ break;
+ default:
+ to = NIL;
+ break;
+ }
+ (*func)(sm->from, to, sm->msg, sm->bp);
+ }
+ erts_smp_mtx_unlock(&smq_mtx);
+}
+
+
+static void
+init_sys_msg_dispatcher(void)
+{
+ erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ thr_opts.detached = 1;
+ init_smq_element_alloc();
+ sys_message_queue = NULL;
+ sys_message_queue_end = NULL;
+ erts_smp_cnd_init(&smq_cnd);
+ erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
+ dispatcher_waiting = 0;
+ erts_smp_thr_create(&sys_msg_dispatcher_tid,
+ sys_msg_dispatcher_func,
+ NULL,
+ &thr_opts);
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
new file mode 100644
index 0000000000..ab5811c70f
--- /dev/null
+++ b/erts/emulator/beam/erl_unicode.c
@@ -0,0 +1,1815 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "bif.h"
+#include "erl_binary.h"
+#include "big.h"
+
+#include "erl_unicode.h"
+
+typedef struct _restart_context {
+ byte *bytes;
+ Uint num_processed_bytes;
+ Uint num_bytes_to_process;
+ Uint num_resulting_chars;
+ int state;
+} RestartContext;
+
+
+#define LOOP_FACTOR 10
+#define LOOP_FACTOR_SIMPLE 50 /* When just counting */
+
+static Uint max_loop_limit;
+
+static BIF_RETTYPE utf8_to_list(BIF_ALIST_1);
+static BIF_RETTYPE finalize_list_to_list(Process *p,
+ byte *bytes,
+ Eterm rest,
+ Uint num_processed_bytes,
+ Uint num_bytes_to_process,
+ Uint num_resulting_chars,
+ int state, int left,
+ Eterm tail);
+static int analyze_utf8(byte *source, Uint size,
+ byte **err_pos, Uint *num_chars, int *left);
+#define UTF8_OK 0
+#define UTF8_INCOMPLETE 1
+#define UTF8_ERROR 2
+#define UTF8_ANALYZE_MORE 3
+
+static BIF_RETTYPE characters_to_utf8_trap(BIF_ALIST_3);
+static BIF_RETTYPE characters_to_list_trap_1(BIF_ALIST_3);
+static BIF_RETTYPE characters_to_list_trap_2(BIF_ALIST_3);
+
+static BIF_RETTYPE characters_to_list_trap_3(BIF_ALIST_3);
+static BIF_RETTYPE characters_to_list_trap_4(BIF_ALIST_1);
+
+static Export characters_to_utf8_trap_exp;
+static Export characters_to_list_trap_1_exp;
+static Export characters_to_list_trap_2_exp;
+
+static Export characters_to_list_trap_3_exp;
+static Export characters_to_list_trap_4_exp;
+
+static Export *c_to_b_int_trap_exportp = NULL;
+static Export *c_to_l_int_trap_exportp = NULL;
+
+void erts_init_unicode(void)
+{
+ max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
+ /* Non visual BIFs to trap to. */
+ memset(&characters_to_utf8_trap_exp, 0, sizeof(Export));
+ characters_to_utf8_trap_exp.address =
+ &characters_to_utf8_trap_exp.code[3];
+ characters_to_utf8_trap_exp.code[0] = am_erlang;
+ characters_to_utf8_trap_exp.code[1] =
+ am_atom_put("characters_to_utf8_trap",23);
+ characters_to_utf8_trap_exp.code[2] = 3;
+ characters_to_utf8_trap_exp.code[3] =
+ (Eterm) em_apply_bif;
+ characters_to_utf8_trap_exp.code[4] =
+ (Eterm) &characters_to_utf8_trap;
+
+ memset(&characters_to_list_trap_1_exp, 0, sizeof(Export));
+ characters_to_list_trap_1_exp.address =
+ &characters_to_list_trap_1_exp.code[3];
+ characters_to_list_trap_1_exp.code[0] = am_erlang;
+ characters_to_list_trap_1_exp.code[1] =
+ am_atom_put("characters_to_list_trap_1",25);
+ characters_to_list_trap_1_exp.code[2] = 3;
+ characters_to_list_trap_1_exp.code[3] =
+ (Eterm) em_apply_bif;
+ characters_to_list_trap_1_exp.code[4] =
+ (Eterm) &characters_to_list_trap_1;
+
+ memset(&characters_to_list_trap_2_exp, 0, sizeof(Export));
+ characters_to_list_trap_2_exp.address =
+ &characters_to_list_trap_2_exp.code[3];
+ characters_to_list_trap_2_exp.code[0] = am_erlang;
+ characters_to_list_trap_2_exp.code[1] =
+ am_atom_put("characters_to_list_trap_2",25);
+ characters_to_list_trap_2_exp.code[2] = 3;
+ characters_to_list_trap_2_exp.code[3] =
+ (Eterm) em_apply_bif;
+ characters_to_list_trap_2_exp.code[4] =
+ (Eterm) &characters_to_list_trap_2;
+
+
+ memset(&characters_to_list_trap_3_exp, 0, sizeof(Export));
+ characters_to_list_trap_3_exp.address =
+ &characters_to_list_trap_3_exp.code[3];
+ characters_to_list_trap_3_exp.code[0] = am_erlang;
+ characters_to_list_trap_3_exp.code[1] =
+ am_atom_put("characters_to_list_trap_3",25);
+ characters_to_list_trap_3_exp.code[2] = 3;
+ characters_to_list_trap_3_exp.code[3] =
+ (Eterm) em_apply_bif;
+ characters_to_list_trap_3_exp.code[4] =
+ (Eterm) &characters_to_list_trap_3;
+
+ memset(&characters_to_list_trap_4_exp, 0, sizeof(Export));
+ characters_to_list_trap_4_exp.address =
+ &characters_to_list_trap_4_exp.code[3];
+ characters_to_list_trap_4_exp.code[0] = am_erlang;
+ characters_to_list_trap_4_exp.code[1] =
+ am_atom_put("characters_to_list_trap_4",25);
+ characters_to_list_trap_4_exp.code[2] = 1;
+ characters_to_list_trap_4_exp.code[3] =
+ (Eterm) em_apply_bif;
+ characters_to_list_trap_4_exp.code[4] =
+ (Eterm) &characters_to_list_trap_4;
+
+ c_to_b_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_binary_int,2);
+ c_to_l_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_list_int,2);
+
+
+}
+
+
+static ERTS_INLINE void *alloc_restart(size_t size)
+{
+ return erts_alloc(ERTS_ALC_T_UNICODE_BUFFER,size);
+}
+
+static ERTS_INLINE void free_restart(void *ptr)
+{
+ erts_free(ERTS_ALC_T_UNICODE_BUFFER, ptr);
+}
+
+static void cleanup_restart_context(RestartContext *rc)
+{
+ if (rc->bytes != NULL) {
+ free_restart(rc->bytes);
+ rc->bytes = NULL;
+ }
+}
+
+static void cleanup_restart_context_bin(Binary *bp)
+{
+ RestartContext *rc = ERTS_MAGIC_BIN_DATA(bp);
+ cleanup_restart_context(rc);
+}
+
+static RestartContext *get_rc_from_bin(Eterm bin)
+{
+ Binary *mbp;
+ ASSERT(ERTS_TERM_IS_MAGIC_BINARY(bin));
+
+ mbp = ((ProcBin *) binary_val(bin))->val;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
+ == cleanup_restart_context_bin);
+ return (RestartContext *) ERTS_MAGIC_BIN_DATA(mbp);
+}
+
+static Eterm make_magic_bin_for_restart(Process *p, RestartContext *rc)
+{
+ Binary *mbp = erts_create_magic_binary(sizeof(RestartContext),
+ cleanup_restart_context_bin);
+ RestartContext *restartp = ERTS_MAGIC_BIN_DATA(mbp);
+ Eterm *hp;
+ memcpy(restartp,rc,sizeof(RestartContext));
+ hp = HAlloc(p, PROC_BIN_SIZE);
+ return erts_mk_magic_binary_term(&hp, &MSO(p), mbp);
+}
+
+
+Sint erts_unicode_set_loop_limit(Sint limit)
+{
+ Sint save = (Sint) max_loop_limit;
+ if (limit <= 0) {
+ max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
+ } else {
+ max_loop_limit = (Uint) limit;
+ }
+ return save;
+}
+
+static ERTS_INLINE int allowed_iterations(Process *p)
+{
+ int tmp = ERTS_BIF_REDS_LEFT(p) * LOOP_FACTOR;
+ int tmp2 = max_loop_limit;
+ if (tmp2 < tmp)
+ return tmp2;
+ else
+ return tmp;
+}
+static ERTS_INLINE int cost_to_proc(Process *p, int cost)
+{
+ int x = (cost / LOOP_FACTOR);
+ BUMP_REDS(p,x);
+ return x;
+}
+static ERTS_INLINE int simple_loops_to_common(int cost)
+{
+ int factor = (LOOP_FACTOR_SIMPLE / LOOP_FACTOR);
+ return (cost / factor);
+}
+
+static Sint aligned_binary_size(Eterm binary)
+{
+ unsigned char *bytes;
+ Uint bitoffs;
+ Uint bitsize;
+
+ ERTS_GET_BINARY_BYTES(binary, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ return (Sint) -1;
+ }
+ return binary_size(binary);
+}
+
+static Sint latin1_binary_need(Eterm binary)
+{
+ unsigned char *bytes;
+ byte *temp_alloc = NULL;
+ Uint bitoffs;
+ Uint bitsize;
+ Uint size;
+ Sint need = 0;
+ Sint i;
+
+ ERTS_GET_BINARY_BYTES(binary, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ return (Sint) -1;
+ }
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(binary, &temp_alloc);
+ /* The call to erts_get_aligned_binary_bytes cannot fail as
+ we'we already checked bitsize and that this is a binary */
+ }
+ size = binary_size(binary);
+ for(i = 0; i < size; ++i) {
+ if (bytes[i] & ((byte) 0x80)) {
+ need += 2;
+ } else {
+ need += 1;
+ }
+ }
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return need;
+}
+
+static int utf8_len(byte first)
+{
+ if ((first & ((byte) 0x80)) == 0) {
+ return 1;
+ } else if ((first & ((byte) 0xE0)) == 0xC0) {
+ return 2;
+ } else if ((first & ((byte) 0xF0)) == 0xE0) {
+ return 3;
+ } else if ((first & ((byte) 0xF8)) == 0xF0) {
+ return 4;
+ }
+ return -1;
+}
+
+static int copy_utf8_bin(byte *target, byte *source, Uint size,
+ byte *leftover, int *num_leftovers,
+ byte **err_pos, Uint *characters) {
+ int copied = 0;
+ if (leftover != NULL && *num_leftovers) {
+ int need = utf8_len(leftover[0]);
+ int from_source = need - (*num_leftovers);
+ int c;
+ byte *tmp_err_pos = NULL;
+ ASSERT(need > 0);
+ ASSERT(from_source > 0);
+ if (size < from_source) {
+ memcpy(leftover + (*num_leftovers), source, size);
+ *num_leftovers += size;
+ return 0;
+ }
+ /* leftover has room for four bytes (see bif) */
+ memcpy(leftover + (*num_leftovers),source,from_source);
+ c = copy_utf8_bin(target, leftover, need, NULL, NULL, &tmp_err_pos, characters);
+ if (tmp_err_pos != 0) {
+ *err_pos = source;
+ return 0;
+ }
+ copied += c;
+ *num_leftovers = 0;
+ size -= from_source;
+ target += c;
+ source += from_source;
+ }
+ while (size) {
+ if (((*source) & ((byte) 0x80)) == 0) {
+ *(target++) = *(source++);
+ --size; ++copied;
+ } else if (((*source) & ((byte) 0xE0)) == 0xC0) {
+ if (leftover && size < 2) {
+ *leftover = *source;
+ *num_leftovers = 1;
+ break;
+ }
+ if (size < 2 || ((source[1] & ((byte) 0xC0)) != 0x80) ||
+ ((*source) < 0xC2) /* overlong */) {
+ *err_pos = source;
+ return copied;
+ }
+ *(target++) = *(source++);
+ *(target++) = *(source++);
+ size -= 2; copied += 2;
+ } else if (((*source) & ((byte) 0xF0)) == 0xE0) {
+ if (leftover && size < 3) {
+ memcpy(leftover, source, (int) size);
+ *num_leftovers = (int) size;
+ break;
+ }
+ if (size < 3 || ((source[1] & ((byte) 0xC0)) != 0x80) ||
+ ((source[2] & ((byte) 0xC0)) != 0x80) ||
+ (((*source) == 0xE0) && (source[1] < 0xA0)) /* overlong */ ) {
+ *err_pos = source;
+ return copied;
+ }
+ if ((((*source) & ((byte) 0xF)) == 0xD) &&
+ ((source[1] & 0x20) != 0)) {
+ *err_pos = source;
+ return copied;
+ }
+
+ if (((*source) == 0xEF) && (source[1] == 0xBF) &&
+ ((source[2] == 0xBE) || (source[2] == 0xBF))) {
+ *err_pos = source;
+ return copied;
+ }
+
+ *(target++) = *(source++);
+ *(target++) = *(source++);
+ *(target++) = *(source++);
+ size -= 3; copied += 3;
+ } else if (((*source) & ((byte) 0xF8)) == 0xF0) {
+ if (leftover && size < 4) {
+ memcpy(leftover, source, (int) size);
+ *num_leftovers = (int) size;
+ break;
+ }
+ if (size < 4 || ((source[1] & ((byte) 0xC0)) != 0x80) ||
+ ((source[2] & ((byte) 0xC0)) != 0x80) ||
+ ((source[3] & ((byte) 0xC0)) != 0x80) ||
+ (((*source) == 0xF0) && (source[1] < 0x90)) /* overlong */) {
+ *err_pos = source;
+ return copied;
+ }
+ if ((((*source) & ((byte)0x7)) > 0x4U) ||
+ ((((*source) & ((byte)0x7)) == 0x4U) &&
+ ((source[1] & ((byte)0x3F)) > 0xFU))) {
+ *err_pos = source;
+ return copied;
+ }
+ *(target++) = *(source++);
+ *(target++) = *(source++);
+ *(target++) = *(source++);
+ *(target++) = *(source++);
+ size -= 4; copied +=4;
+ } else {
+ *err_pos = source;
+ return copied;
+ }
+ ++(*characters);
+ }
+ return copied;
+}
+
+
+
+static Sint utf8_need(Eterm ioterm, int latin1, Uint *costp)
+{
+ Eterm *objp;
+ Eterm obj;
+ DECLARE_ESTACK(stack);
+ Sint need = 0;
+ Uint cost = 0;
+
+ if (is_nil(ioterm)) {
+ DESTROY_ESTACK(stack);
+ *costp = 0;
+ return need;
+ }
+ if(is_binary(ioterm)) {
+ DESTROY_ESTACK(stack);
+ if (latin1) {
+ Sint x = latin1_binary_need(ioterm);
+ *costp = x;
+ return x;
+ } else {
+ *costp = 1;
+ return aligned_binary_size(ioterm);
+ }
+ }
+
+ if (!is_list(ioterm)) {
+ DESTROY_ESTACK(stack);
+ *costp = 0;
+ return (Sint) -1;
+ }
+ /* OK a list, needs to be processed in order, handling each flat list-level
+ as they occur, just like io_list_to_binary would */
+ ESTACK_PUSH(stack,ioterm);
+ while (!ESTACK_ISEMPTY(stack)) {
+ ioterm = ESTACK_POP(stack);
+ if (is_nil(ioterm)) {
+ /* ignore empty lists */
+ continue;
+ }
+ if(is_list(ioterm)) {
+L_Again: /* Restart with sublist, old listend was pushed on stack */
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ for(;;) { /* loop over one flat list of bytes and binaries
+ until sublist or list end is encountered */
+ if (is_small(obj)) { /* Always small */
+ for(;;) {
+ Uint x = unsigned_val(obj);
+ if (x < 0x80)
+ need +=1;
+ else if (x < 0x800)
+ need += 2;
+ else if (x < 0x10000)
+ need += 3;
+ else
+ need += 4;
+ /* everything else will give badarg later
+ in the process, so we dont check */
+ ++cost;
+ ioterm = CDR(objp);
+ if (!is_list(ioterm)) {
+ break;
+ }
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ if (!is_byte(obj))
+ break;
+ }
+ } else if (is_nil(obj)) {
+ ioterm = CDR(objp);
+ if (!is_list(ioterm)) {
+ break;
+ }
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ } else if (is_list(obj)) {
+ /* push rest of list for later processing, start
+ again with sublist */
+ ESTACK_PUSH(stack,CDR(objp));
+ ioterm = obj;
+ goto L_Again;
+ } else if (is_binary(obj)) {
+ Sint x;
+
+ if (latin1) {
+ x = latin1_binary_need(obj);
+ if (x < 0) {
+ DESTROY_ESTACK(stack);
+ *costp = cost;
+ return x;
+ }
+ cost += x;
+ } else {
+ x = aligned_binary_size(obj);
+ if (x < 0) {
+ DESTROY_ESTACK(stack);
+ *costp = cost;
+ return x;
+ }
+ ++cost;
+ }
+ need += x;
+ ioterm = CDR(objp);
+ if (is_list(ioterm)) {
+ /* objp and obj need to be updated if
+ loop is to continue */
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ }
+ } else {
+ DESTROY_ESTACK(stack);
+ *costp = cost;
+ return ((Sint) -1);
+ }
+ if (is_nil(ioterm) || !is_list(ioterm)) {
+ break;
+ }
+ } /* for(;;) */
+ } /* is_list(ioterm) */
+
+ if (!is_list(ioterm) && !is_nil(ioterm)) {
+ /* inproper list end */
+ if (is_binary(ioterm)) {
+ Sint x;
+ if (latin1) {
+ x = latin1_binary_need(ioterm);
+ if (x < 0) {
+ DESTROY_ESTACK(stack);
+ *costp = cost;
+ return x;
+ }
+ cost += x;
+ } else {
+ x = aligned_binary_size(ioterm);
+ if (x < 0) {
+ DESTROY_ESTACK(stack);
+ *costp = cost;
+ return x;
+ }
+ ++cost;
+ }
+ need += x;
+ } else {
+ DESTROY_ESTACK(stack);
+ *costp = cost;
+ return ((Sint) -1);
+ }
+ }
+ } /* while not estack empty */
+ DESTROY_ESTACK(stack);
+ *costp = cost;
+ return need;
+}
+
+
+static Eterm do_build_utf8(Process *p, Eterm ioterm, int *left, int latin1,
+ byte *target, int *pos, Uint *characters, int *err,
+ byte *leftover, int *num_leftovers)
+{
+ int c;
+ Eterm *objp;
+ Eterm obj;
+ DECLARE_ESTACK(stack);
+
+ *err = 0;
+ if ((*left) <= 0 || is_nil(ioterm)) {
+ DESTROY_ESTACK(stack);
+ return ioterm;
+ }
+ if(is_binary(ioterm)) {
+ Uint bitoffs;
+ Uint bitsize;
+ Uint size;
+ Uint i;
+ Eterm res_term = NIL;
+ unsigned char *bytes;
+ byte *temp_alloc = NULL;
+ Uint orig_size;
+
+ ERTS_GET_BINARY_BYTES(ioterm, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ *err = 1;
+ DESTROY_ESTACK(stack);
+ return ioterm;
+ }
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(ioterm, &temp_alloc);
+ /* The call to erts_get_aligned_binary_bytes cannot fail as
+ we'we already checked bitsize and that this is a binary */
+ }
+
+ orig_size = size = binary_size(ioterm);
+
+ /* This is done to avoid splitting binaries in two
+ and then create an unnecessary rest that eventually gives an error.
+ For cases where errors are not returned this is unnecessary */
+ if (!latin1) {
+ /* Find a valid character boundary */
+ while (size > (*left) &&
+ (((byte) bytes[(*left)]) & ((byte) 0xC0)) == ((byte) 0x80)) {
+ ++(*left);
+ }
+ }
+
+ if (size > (*left)) {
+ Eterm *hp;
+ ErlSubBin *sb;
+ Eterm orig;
+ Uint offset;
+ /* Split the binary in two parts, of which we
+ only process the first */
+ hp = HAlloc(p, ERL_SUB_BIN_SIZE);
+ sb = (ErlSubBin *) hp;
+ ERTS_GET_REAL_BIN(ioterm, orig, offset, bitoffs, bitsize);
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = size - (*left);
+ sb->offs = offset + (*left);
+ sb->orig = orig;
+ sb->bitoffs = bitoffs;
+ sb->bitsize = bitsize;
+ sb->is_writable = 0;
+ res_term = make_binary(sb);
+ size = (*left);
+ }
+
+ if (!latin1) {
+ int num;
+ byte *err_pos = NULL;
+ num = copy_utf8_bin(target + (*pos), bytes,
+ size, leftover, num_leftovers,&err_pos,characters);
+ *pos += num;
+ if (err_pos != NULL) {
+ int rest_bin_offset;
+ int rest_bin_size;
+ Eterm *hp;
+ ErlSubBin *sb;
+ Eterm orig;
+ Uint offset;
+
+ *err = 1;
+ /* we have no real stack, just build a list of the binaries
+ we have not decoded... */
+ DESTROY_ESTACK(stack);
+
+ rest_bin_offset = (err_pos - bytes);
+ rest_bin_size = orig_size - rest_bin_offset;
+
+ hp = HAlloc(p, ERL_SUB_BIN_SIZE);
+ sb = (ErlSubBin *) hp;
+ ERTS_GET_REAL_BIN(ioterm, orig, offset, bitoffs, bitsize);
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = rest_bin_size;
+ sb->offs = offset + rest_bin_offset;
+ sb->orig = orig;
+ sb->bitoffs = bitoffs;
+ sb->bitsize = bitsize;
+ sb->is_writable = 0;
+ res_term = make_binary(sb);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return res_term;
+ }
+ } else {
+ i = 0;
+ while(i < size) {
+ if (bytes[i] < 0x80) {
+ target[(*pos)++] = bytes[i++];
+ } else {
+ target[(*pos)++] = ((bytes[i] >> 6) | ((byte) 0xC0));
+ target[(*pos)++] = ((bytes[i] & 0x3F) | ((byte) 0x80));
+ ++i;
+ }
+ ++(*characters);
+ }
+ }
+ *left -= size;
+ DESTROY_ESTACK(stack);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return res_term;
+ }
+
+ if (!is_list(ioterm)) {
+ *err = 1;
+ goto done;
+ }
+
+ /* OK a list, needs to be processed in order, handling each flat list-level
+ as they occur, just like io_list_to_binary would */
+ ESTACK_PUSH(stack,ioterm);
+ while (!ESTACK_ISEMPTY(stack) && (*left)) {
+ ioterm = ESTACK_POP(stack);
+ if (is_nil(ioterm)) {
+ /* ignore empty lists */
+ continue;
+ }
+ if(is_list(ioterm)) {
+L_Again: /* Restart with sublist, old listend was pushed on stack */
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ for(;;) { /* loop over one flat list of bytes and binaries
+ until sublist or list end is encountered */
+ if (is_small(obj)) { /* Always small in unicode*/
+ if (*num_leftovers) {
+ /* Have rest from previous bin and this is an integer, not allowed */
+ *err = 1;
+ goto done;
+ }
+ for(;;) {
+ Uint x = unsigned_val(obj);
+ if (latin1 && x > 255) {
+ *err = 1;
+ goto done;
+ }
+ if (x < 0x80) {
+ target[(*pos)++] = (byte) x;
+ }
+ else if (x < 0x800) {
+ target[(*pos)++] = (((byte) (x >> 6)) |
+ ((byte) 0xC0));
+ target[(*pos)++] = (((byte) (x & 0x3F)) |
+ ((byte) 0x80));
+ } else if (x < 0x10000) {
+ if ((x >= 0xD800 && x <= 0xDFFF) ||
+ (x == 0xFFFE) ||
+ (x == 0xFFFF)) { /* Invalid unicode range */
+ *err = 1;
+ goto done;
+ }
+ target[(*pos)++] = (((byte) (x >> 12)) |
+ ((byte) 0xE0));
+ target[(*pos)++] = ((((byte) (x >> 6)) & 0x3F) |
+ ((byte) 0x80));
+ target[(*pos)++] = (((byte) (x & 0x3F)) |
+ ((byte) 0x80));
+ } else if (x < 0x110000) { /* Standard imposed max */
+ target[(*pos)++] = (((byte) (x >> 18)) |
+ ((byte) 0xF0));
+ target[(*pos)++] = ((((byte) (x >> 12)) & 0x3F) |
+ ((byte) 0x80));
+ target[(*pos)++] = ((((byte) (x >> 6)) & 0x3F) |
+ ((byte) 0x80));
+ target[(*pos)++] = (((byte) (x & 0x3F)) |
+ ((byte) 0x80));
+ } else {
+ *err = 1;
+ goto done;
+ }
+ ++(*characters);
+ --(*left);
+ ioterm = CDR(objp);
+ if (!is_list(ioterm) || !(*left)) {
+ break;
+ }
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ if (!is_small(obj))
+ break;
+ }
+ } else if (is_nil(obj)) {
+ ioterm = CDR(objp);
+ if (!is_list(ioterm)) {
+ break;
+ }
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ } else if (is_list(obj)) {
+ /* push rest of list for later processing, start
+ again with sublist */
+ ESTACK_PUSH(stack,CDR(objp));
+ ioterm = obj;
+ goto L_Again;
+ } else if (is_binary(obj)) {
+ Eterm rest_term;
+ rest_term = do_build_utf8(p,obj,left,latin1,target,pos, characters, err,
+ leftover, num_leftovers);
+ if ((*err) != 0) {
+ Eterm *hp;
+ hp = HAlloc(p, 2);
+ obj = CDR(objp);
+ ioterm = CONS(hp, rest_term, obj);
+ //(*left) = 0;
+ goto done;
+ }
+ if (rest_term != NIL) {
+ Eterm *hp;
+ hp = HAlloc(p, 2);
+ obj = CDR(objp);
+ ioterm = CONS(hp, rest_term, obj);
+ (*left) = 0;
+ break;
+ }
+ ioterm = CDR(objp);
+ if (is_list(ioterm)) {
+ /* objp and obj need to be updated if
+ loop is to continue */
+ objp = list_val(ioterm);
+ obj = CAR(objp);
+ }
+ } else {
+ *err = 1;
+ goto done;
+ }
+ if (!(*left) || is_nil(ioterm) || !is_list(ioterm)) {
+ break;
+ }
+ } /* for(;;) */
+ } /* is_list(ioterm) */
+
+ if ((*left) && !is_list(ioterm) && !is_nil(ioterm)) {
+ /* inproper list end */
+ if (is_binary(ioterm)) {
+ ioterm = do_build_utf8(p,ioterm,left,latin1,target,pos,characters,err,leftover,num_leftovers);
+ if ((*err) != 0) {
+ goto done;
+ }
+ } else {
+ *err = 1;
+ goto done;
+ }
+ }
+ } /* while left and not estack empty */
+ done:
+ c = ESTACK_COUNT(stack);
+ if (c > 0) {
+ Eterm *hp = HAlloc(p,2*c);
+ while(!ESTACK_ISEMPTY(stack)) {
+ Eterm st = ESTACK_POP(stack);
+ ioterm = CONS(hp, ioterm, st);
+ hp += 2;
+ }
+ }
+ DESTROY_ESTACK(stack);
+ return ioterm;
+
+}
+
+static int check_leftovers(byte *source, int size)
+{
+ if (((*source) & ((byte) 0xE0)) == 0xC0) {
+ return 0;
+ } else if (((*source) & ((byte) 0xF0)) == 0xE0) {
+ if (size < 2 ||
+ (size < 3 && ((source[1] & ((byte) 0xC0)) == 0x80))) {
+ return 0;
+ }
+ } else if (((*source) & ((byte) 0xF8)) == 0xF0) {
+ if (size < 2 ||
+ (size < 3 && ((source[1] & ((byte) 0xC0)) == 0x80)) ||
+ (size < 4 &&
+ ((source[1] & ((byte) 0xC0)) == 0x80) &&
+ ((source[2] & ((byte) 0xC0)) == 0x80))) {
+ return 0;
+ }
+ }
+ return -1;
+}
+
+
+
+static BIF_RETTYPE build_utf8_return(Process *p,Eterm bin,int pos,
+ Eterm rest_term,int err,
+ byte *leftover,int num_leftovers,Eterm latin1)
+{
+ Eterm *hp;
+ Eterm ret;
+
+ binary_size(bin) = pos;
+ if (err) {
+ if (num_leftovers > 0) {
+ Eterm leftover_bin = new_binary(p, leftover, num_leftovers);
+ hp = HAlloc(p,8);
+ rest_term = CONS(hp,rest_term,NIL);
+ hp += 2;
+ rest_term = CONS(hp,leftover_bin,rest_term);
+ hp += 2;
+ } else {
+ hp = HAlloc(p,4);
+ }
+ ret = TUPLE3(hp,am_error,bin,rest_term);
+ } else if (rest_term == NIL && num_leftovers != 0) {
+ Eterm leftover_bin = new_binary(p, leftover, num_leftovers);
+ if (check_leftovers(leftover,num_leftovers) != 0) {
+ hp = HAlloc(p,4);
+ ret = TUPLE3(hp,am_error,bin,leftover_bin);
+ } else {
+ hp = HAlloc(p,4);
+ ret = TUPLE3(hp,am_incomplete,bin,leftover_bin);
+ }
+ } else { /* All OK */
+ if (rest_term != NIL) { /* Trap */
+ if (num_leftovers > 0) {
+ Eterm rest_bin = new_binary(p, leftover, num_leftovers);
+ hp = HAlloc(p,2);
+ rest_term = CONS(hp,rest_bin,rest_term);
+ }
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(&characters_to_utf8_trap_exp, p, bin, rest_term, latin1);
+ } else { /* Success */
+ /*hp = HAlloc(p,5);
+ ret = TUPLE4(hp,bin,rest_term,make_small(pos),make_small(err));*/
+ ret = bin;
+ }
+ }
+ BIF_RET(ret);
+}
+
+
+static BIF_RETTYPE characters_to_utf8_trap(BIF_ALIST_3)
+{
+ Eterm *real_bin;
+ Sint need;
+ byte* bytes;
+ Eterm rest_term;
+ int left, sleft;
+ int pos;
+ int err;
+ byte leftover[4]; /* used for temp buffer too,
+ otherwise 3 bytes would have been enough */
+ int num_leftovers = 0;
+ int latin1 = 0;
+ Uint characters = 0;
+
+ /*erts_printf("Trap %T!\r\n",BIF_ARG_2);*/
+ ASSERT(is_binary(BIF_ARG_1));
+ real_bin = binary_val(BIF_ARG_1);
+ ASSERT(*real_bin == HEADER_PROC_BIN);
+ need = ((ProcBin *) real_bin)->val->orig_size;
+ pos = (int) binary_size(BIF_ARG_1);
+ bytes = binary_bytes(BIF_ARG_1);
+ sleft = left = allowed_iterations(BIF_P);
+ err = 0;
+ if (BIF_ARG_3 == am_latin1) {
+ latin1 = 1;
+ }
+ rest_term = do_build_utf8(BIF_P, BIF_ARG_2, &left, latin1,
+ bytes, &pos, &characters, &err, leftover, &num_leftovers);
+ cost_to_proc(BIF_P, sleft - left);
+ return build_utf8_return(BIF_P,BIF_ARG_1,pos,rest_term,err,
+ leftover,num_leftovers,BIF_ARG_3);
+}
+
+BIF_RETTYPE unicode_bin_is_7bit_1(BIF_ALIST_1)
+{
+ Sint need;
+ if(!is_binary(BIF_ARG_1)) {
+ BIF_RET(am_false);
+ }
+ need = latin1_binary_need(BIF_ARG_1);
+ if(need >= 0 && aligned_binary_size(BIF_ARG_1) == need) {
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+}
+
+static int is_valid_utf8(Eterm orig_bin)
+{
+ Uint bitoffs;
+ Uint bitsize;
+ Uint size;
+ byte *temp_alloc = NULL;
+ byte *endpos;
+ Uint numchar;
+ byte *bytes;
+ int ret;
+
+ ERTS_GET_BINARY_BYTES(orig_bin, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ return 0;
+ }
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(orig_bin, &temp_alloc);
+ }
+ size = binary_size(orig_bin);
+ ret = analyze_utf8(bytes,
+ size,
+ &endpos,&numchar,NULL);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ return (ret == UTF8_OK);
+}
+
+BIF_RETTYPE unicode_characters_to_binary_2(BIF_ALIST_2)
+{
+ Sint need;
+ Uint characters;
+ int latin1;
+ Eterm bin;
+ byte *bytes;
+ int pos;
+ int err;
+ int left, sleft;
+ Eterm rest_term, subject;
+ byte leftover[4]; /* used for temp buffer too, o
+ therwise 3 bytes would have been enough */
+ int num_leftovers = 0;
+ Uint cost_of_utf8_need;
+
+
+ if (BIF_ARG_2 == am_latin1) {
+ latin1 = 1;
+ } else if (BIF_ARG_2 == am_unicode || BIF_ARG_2 == am_utf8) {
+ latin1 = 0;
+ } else {
+ BIF_TRAP2(c_to_b_int_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+ if (is_list(BIF_ARG_1) && is_binary(CAR(list_val(BIF_ARG_1))) &&
+ is_nil(CDR(list_val(BIF_ARG_1)))) {
+ subject = CAR(list_val(BIF_ARG_1));
+ } else {
+ subject = BIF_ARG_1;
+ }
+
+ need = utf8_need(subject,latin1,&cost_of_utf8_need);
+ if (need < 0) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ if (is_binary(subject) && need >= 0 && aligned_binary_size(subject) == need
+ && (latin1 || is_valid_utf8(subject))) {
+ cost_to_proc(BIF_P, simple_loops_to_common(cost_of_utf8_need));
+ BIF_RET(subject);
+ }
+
+
+ bin = erts_new_mso_binary(BIF_P, (byte *)NULL, need);
+ bytes = binary_bytes(bin);
+ cost_to_proc(BIF_P, simple_loops_to_common(cost_of_utf8_need));
+ left = allowed_iterations(BIF_P) -
+ simple_loops_to_common(cost_of_utf8_need);
+ if (left <= 0) {
+ /* simplified - let everything be setup by setting left to 1 */
+ left = 1;
+ }
+ sleft = left;
+ pos = 0;
+ err = 0;
+
+
+ rest_term = do_build_utf8(BIF_P, subject, &left, latin1,
+ bytes, &pos, &characters, &err, leftover, &num_leftovers);
+#ifdef HARDDEBUG
+ if (left == 0) {
+ Eterm bin;
+ if (is_binary(subject)) {
+ bin = subject;
+ } else if(is_list(subject) && is_binary(CAR(list_val(subject)))) {
+ bin = CAR(list_val(subject));
+ } else {
+ bin = NIL;
+ }
+ if (is_binary(bin)) {
+ byte *t = NULL;
+ Uint sz = binary_size(bin);
+ byte *by = erts_get_aligned_binary_bytes(bin,&t);
+ int i;
+ erts_printf("<<");
+ for (i = 0;i < sz; ++i) {
+ erts_printf((i == sz -1) ? "0x%X" : "0x%X, ", (unsigned) by[i]);
+ }
+ erts_printf(">>: ");
+ erts_free_aligned_binary_bytes(t);
+ }
+ erts_printf("%d - %d = %d\n",sleft,left,sleft - left);
+ }
+#endif
+ cost_to_proc(BIF_P, sleft - left);
+ return build_utf8_return(BIF_P,bin,pos,rest_term,err,
+ leftover,num_leftovers,BIF_ARG_2);
+}
+
+static BIF_RETTYPE build_list_return(Process *p, byte *bytes, int pos, Uint characters,
+ Eterm rest_term, int err,
+ byte *leftover, int num_leftovers,
+ Eterm latin1, int left)
+{
+ Eterm *hp;
+
+ if (left <= 0) {
+ left = 1;
+ }
+
+ if (err) {
+ if (num_leftovers > 0) {
+ Eterm leftover_bin = new_binary(p, leftover, num_leftovers);
+ hp = HAlloc(p,4);
+ rest_term = CONS(hp,rest_term,NIL);
+ hp += 2;
+ rest_term = CONS(hp,leftover_bin,rest_term);
+ }
+ BIF_RET(finalize_list_to_list(p, bytes, rest_term, 0U, pos, characters, UTF8_ERROR, left, NIL));
+ } else if (rest_term == NIL && num_leftovers != 0) {
+ Eterm leftover_bin = new_binary(p, leftover, num_leftovers);
+ if (check_leftovers(leftover,num_leftovers) != 0) {
+ BIF_RET(finalize_list_to_list(p, bytes, leftover_bin, 0U, pos, characters, UTF8_ERROR,
+ left, NIL));
+ } else {
+ BIF_RET(finalize_list_to_list(p, bytes, leftover_bin, 0U, pos, characters, UTF8_INCOMPLETE,
+ left, NIL));
+ }
+ } else { /* All OK */
+ if (rest_term != NIL) { /* Trap */
+ RestartContext rc;
+ if (num_leftovers > 0) {
+ Eterm rest_bin = new_binary(p, leftover, num_leftovers);
+ hp = HAlloc(p,2);
+ rest_term = CONS(hp,rest_bin,rest_term);
+ }
+ BUMP_ALL_REDS(p);
+ rc.bytes = bytes;
+ rc.num_processed_bytes = 0; /* not used */
+ rc.num_bytes_to_process = pos;
+ rc.num_resulting_chars = characters;
+ rc.state = UTF8_OK; /* not used */
+ BIF_TRAP3(&characters_to_list_trap_1_exp, p, make_magic_bin_for_restart(p,&rc),
+ rest_term, latin1);
+ } else { /* Success */
+ BIF_RET(finalize_list_to_list(p, bytes, NIL, 0U, pos, characters, UTF8_OK, left, NIL));
+ }
+ }
+}
+
+static BIF_RETTYPE characters_to_list_trap_1(BIF_ALIST_3)
+{
+ RestartContext *rc;
+ byte* bytes;
+ int pos;
+ Uint characters;
+ int err;
+ Eterm rest_term;
+ int left, sleft;
+
+ int latin1 = 0;
+ byte leftover[4]; /* used for temp buffer too,
+ otherwise 3 bytes would have been enough */
+ int num_leftovers = 0;
+
+
+ rc = get_rc_from_bin(BIF_ARG_1);
+
+ bytes = rc->bytes;
+ rc->bytes = NULL; /* to avoid free due to later GC */
+ pos = rc->num_bytes_to_process;
+ characters = rc->num_resulting_chars;
+
+ sleft = left = allowed_iterations(BIF_P);
+ err = 0;
+ if (BIF_ARG_3 == am_latin1) {
+ latin1 = 1;
+ }
+ rest_term = do_build_utf8(BIF_P, BIF_ARG_2, &left, latin1,
+ bytes, &pos, &characters, &err, leftover, &num_leftovers);
+ cost_to_proc(BIF_P, sleft - left);
+ return build_list_return(BIF_P,bytes,pos,characters,rest_term,err,
+ leftover,num_leftovers,BIF_ARG_3,left);
+}
+
+BIF_RETTYPE unicode_characters_to_list_2(BIF_ALIST_2)
+{
+ Sint need;
+ int latin1;
+ Uint characters = 0;
+ byte *bytes;
+ int pos;
+ int err;
+ int left, sleft;
+ Eterm rest_term;
+ byte leftover[4]; /* used for temp buffer too, o
+ therwise 3 bytes would have been enough */
+ int num_leftovers = 0;
+ Uint cost_of_utf8_need;
+
+ if (BIF_ARG_2 == am_latin1) {
+ latin1 = 1;
+ } else if (BIF_ARG_2 == am_unicode || BIF_ARG_2 == am_utf8) {
+ latin1 = 0;
+ } else {
+ BIF_TRAP2(c_to_l_int_trap_exportp, BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+ if (is_binary(BIF_ARG_1) && !latin1) { /* Optimized behaviour for this case */
+ return utf8_to_list(BIF_P,BIF_ARG_1);
+ }
+ need = utf8_need(BIF_ARG_1,latin1,&cost_of_utf8_need);
+ if (need < 0) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ bytes = alloc_restart(need);
+ cost_to_proc(BIF_P, simple_loops_to_common(cost_of_utf8_need));
+ left = allowed_iterations(BIF_P) -
+ simple_loops_to_common(cost_of_utf8_need);
+ if (left <= 0) {
+ /* simplified - let everything be setup by setting left to 1 */
+ left = 1;
+ }
+ sleft = left;
+ pos = 0;
+ err = 0;
+
+
+ rest_term = do_build_utf8(BIF_P, BIF_ARG_1, &left, latin1,
+ bytes, &pos, &characters, &err, leftover, &num_leftovers);
+ cost_to_proc(BIF_P, sleft - left);
+ return build_list_return(BIF_P,bytes,pos,characters,rest_term,err,
+ leftover,num_leftovers,BIF_ARG_2,left);
+}
+
+
+/*
+ * When input to characters_to_list is a plain binary and the format is 'unicode', we do
+ * a faster analyze and size count with this function.
+ */
+static int analyze_utf8(byte *source, Uint size,
+ byte **err_pos, Uint *num_chars, int *left)
+{
+ *err_pos = source;
+ *num_chars = 0;
+ while (size) {
+ if (((*source) & ((byte) 0x80)) == 0) {
+ source++;
+ --size;
+ } else if (((*source) & ((byte) 0xE0)) == 0xC0) {
+ if (size < 2) {
+ return UTF8_INCOMPLETE;
+ }
+ if (((source[1] & ((byte) 0xC0)) != 0x80) ||
+ ((*source) < 0xC2) /* overlong */) {
+ return UTF8_ERROR;
+ }
+ source += 2;
+ size -= 2;
+ } else if (((*source) & ((byte) 0xF0)) == 0xE0) {
+ if (size < 3) {
+ return UTF8_INCOMPLETE;
+ }
+ if (((source[1] & ((byte) 0xC0)) != 0x80) ||
+ ((source[2] & ((byte) 0xC0)) != 0x80) ||
+ (((*source) == 0xE0) && (source[1] < 0xA0)) /* overlong */ ) {
+ return UTF8_ERROR;
+ }
+ if ((((*source) & ((byte) 0xF)) == 0xD) &&
+ ((source[1] & 0x20) != 0)) {
+ return UTF8_ERROR;
+ }
+ if (((*source) == 0xEF) && (source[1] == 0xBF) &&
+ ((source[2] == 0xBE) || (source[2] == 0xBF))) {
+ return UTF8_ERROR;
+ }
+ source += 3;
+ size -= 3;
+ } else if (((*source) & ((byte) 0xF8)) == 0xF0) {
+ if (size < 4) {
+ return UTF8_INCOMPLETE;
+ }
+ if (((source[1] & ((byte) 0xC0)) != 0x80) ||
+ ((source[2] & ((byte) 0xC0)) != 0x80) ||
+ ((source[3] & ((byte) 0xC0)) != 0x80) ||
+ (((*source) == 0xF0) && (source[1] < 0x90)) /* overlong */) {
+ return UTF8_ERROR;
+ }
+ if ((((*source) & ((byte)0x7)) > 0x4U) ||
+ ((((*source) & ((byte)0x7)) == 0x4U) &&
+ ((source[1] & ((byte)0x3F)) > 0xFU))) {
+ return UTF8_ERROR;
+ }
+ source += 4;
+ size -= 4;
+ } else {
+ return UTF8_ERROR;
+ }
+ ++(*num_chars);
+ *err_pos = source;
+ if (left && --(*left) <= 0) {
+ return UTF8_ANALYZE_MORE;
+ }
+ }
+ return UTF8_OK;
+}
+
+/*
+ * No errors should be able to occur - no overlongs, no malformed, no nothing
+ */
+static Eterm do_utf8_to_list(Process *p, Uint num, byte *bytes, Uint sz,
+ Uint left,
+ Uint *num_built, Uint *num_eaten, Eterm tail)
+{
+ Eterm *hp;
+ Eterm ret;
+ byte *source, *ssource;
+ Uint unipoint;
+
+ ASSERT(num > 0);
+ if (left < num) {
+ if (left > 0)
+ num = left;
+ else
+ num = 1;
+ }
+
+ *num_built = num; /* Always */
+
+ hp = HAlloc(p,num * 2);
+ ret = tail;
+ source = bytes + sz;
+ ssource = source;
+ while(--source >= bytes) {
+ if (((*source) & ((byte) 0x80)) == 0) {
+ unipoint = (Uint) *source;
+ } else if (((*source) & ((byte) 0xE0)) == 0xC0) {
+ unipoint =
+ (((Uint) ((*source) & ((byte) 0x1F))) << 6) |
+ ((Uint) (source[1] & ((byte) 0x3F)));
+ } else if (((*source) & ((byte) 0xF0)) == 0xE0) {
+ unipoint =
+ (((Uint) ((*source) & ((byte) 0xF))) << 12) |
+ (((Uint) (source[1] & ((byte) 0x3F))) << 6) |
+ ((Uint) (source[2] & ((byte) 0x3F)));
+ } else if (((*source) & ((byte) 0xF8)) == 0xF0) {
+ unipoint =
+ (((Uint) ((*source) & ((byte) 0x7))) << 18) |
+ (((Uint) (source[1] & ((byte) 0x3F))) << 12) |
+ (((Uint) (source[2] & ((byte) 0x3F))) << 6) |
+ ((Uint) (source[3] & ((byte) 0x3F)));
+ } else {
+ /* ignore 2#10XXXXXX */
+ continue;
+ }
+ ret = CONS(hp,make_small(unipoint),ret);
+ hp += 2;
+ if (--num <= 0) {
+ break;
+ }
+ }
+ *num_eaten = (ssource - source);
+ return ret;
+}
+
+/*
+ * The last step of characters_to_list, build a list from the buffer 'bytes' (created in the same way
+ * as for characters_to_utf8). All sizes are known in advance and most data will be held in a
+ * "magic binary" during trapping.
+ */
+static BIF_RETTYPE finalize_list_to_list(Process *p,
+ byte *bytes,
+ Eterm rest,
+ Uint num_processed_bytes,
+ Uint num_bytes_to_process,
+ Uint num_resulting_chars,
+ int state, int left,
+ Eterm tail)
+{
+ Uint num_built; /* characters */
+ Uint num_eaten; /* bytes */
+ Eterm *hp;
+ Eterm converted,ret;
+
+ if (!num_bytes_to_process) {
+ converted = tail;
+ } else {
+ num_built = 0;
+ num_eaten = 0;
+ converted = do_utf8_to_list(p, num_resulting_chars,
+ bytes, num_bytes_to_process,
+ left, &num_built, &num_eaten, tail);
+ cost_to_proc(p,num_built);
+
+ if (num_built != num_resulting_chars) { /* work left to do */
+ RestartContext rc;
+
+ rc.num_resulting_chars = num_resulting_chars - num_built;
+ rc.num_bytes_to_process = num_bytes_to_process - num_eaten;
+ rc.num_processed_bytes = num_processed_bytes + num_eaten;
+ rc.state = state;
+ rc.bytes = bytes;
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(&characters_to_list_trap_2_exp, p,
+ make_magic_bin_for_restart(p, &rc), rest, converted);
+ }
+ }
+
+ /*
+ * OK, no more trapping, let's get rid of the temporary array...
+ */
+
+ free_restart(bytes);
+ if (state == UTF8_INCOMPLETE) {
+ hp = HAlloc(p,4);
+ ret = TUPLE3(hp,am_incomplete,converted,rest);
+ } else if (state == UTF8_ERROR) {
+ hp = HAlloc(p,4);
+ ret = TUPLE3(hp,am_error,converted,rest);
+ } else {
+ ret = converted;
+ }
+
+ BIF_RET(ret);
+}
+
+static BIF_RETTYPE characters_to_list_trap_2(BIF_ALIST_3)
+{
+ RestartContext *rc;
+ byte *bytes;
+
+ rc = get_rc_from_bin(BIF_ARG_1);
+
+ bytes = rc->bytes;
+ rc->bytes = NULL; /* Don't want this freed just yet... */
+ return finalize_list_to_list(BIF_P, bytes, BIF_ARG_2, rc->num_processed_bytes,
+ rc->num_bytes_to_process, rc->num_resulting_chars,
+ rc->state, allowed_iterations(BIF_P), BIF_ARG_3);
+}
+
+
+/*
+ * Hooks into the process of decoding a binary depending on state.
+ * If last_state is UTF8_ANALYZE_MORE, num_bytes_to_process
+ * and num_resulting_chars will grow
+ * until we're done analyzing the binary. Then we'll eat
+ * the bytes to process, lowering num_bytes_to_process and num_resulting_chars,
+ * while increasing num_processed_bytes until we're done. the state
+ * indicates how to return (error, incomplete or ok) in this stage.
+ * note that num_processed_bytes and num_bytes_to_process will make up the
+ * length of the binary part to process, not necessarily the length of the
+ * whole binary (if there are errors or an incomplete tail).
+ *
+ * Analyzing happens from the beginning of the binary towards the end,
+ * while result is built from the end of the analyzed/accepted part
+ * towards the beginning.
+ *
+ * Note that this routine is *only* called when original input was a plain utf8 binary,
+ * otherwise the rest and the sizes are known in advance, so finalize_list_to_list is
+ * used to build the resulting list (no analyzing needed).
+ */
+static BIF_RETTYPE do_bif_utf8_to_list(Process *p,
+ Eterm orig_bin,
+ Uint num_processed_bytes,
+ Uint num_bytes_to_process,
+ Uint num_resulting_chars,
+ int state,
+ Eterm tail)
+{
+ int left;
+ Uint bitoffs;
+ Uint bitsize;
+ Uint size;
+ byte *bytes;
+ Eterm converted = NIL;
+ Eterm rest = NIL;
+ Eterm *hp;
+ Eterm ret;
+ byte *temp_alloc = NULL;
+ byte *endpos;
+ Uint numchar;
+
+ Uint b_sz; /* size of the non analyzed tail */
+ Uint num_built; /* characters */
+ Uint num_eaten; /* bytes */
+
+ ERTS_GET_BINARY_BYTES(orig_bin, bytes, bitoffs, bitsize);
+ if (bitsize != 0) {
+ converted = NIL;
+ rest = orig_bin;
+ goto error_return;
+ }
+ if (bitoffs != 0) {
+ bytes = erts_get_aligned_binary_bytes(orig_bin, &temp_alloc);
+ }
+
+ size = binary_size(orig_bin);
+
+ left = allowed_iterations(p);
+
+ if (state == UTF8_ANALYZE_MORE) {
+ state = analyze_utf8(bytes + num_bytes_to_process,
+ size - num_bytes_to_process,
+ &endpos,&numchar,&left);
+ cost_to_proc(p,numchar);
+ num_resulting_chars += numchar;
+ num_bytes_to_process = endpos - bytes;
+ if (state == UTF8_ANALYZE_MORE) {
+ Eterm epos = erts_make_integer(num_bytes_to_process,p);
+ Eterm enumchar = erts_make_integer(num_resulting_chars,p);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BUMP_ALL_REDS(p);
+ BIF_TRAP3(&characters_to_list_trap_3_exp, p, orig_bin, epos,
+ enumchar);
+ }
+ }
+
+ /*
+ * If we're here, we have everything analyzed and are instead building
+ */
+
+
+ if (!num_bytes_to_process) {
+ converted = tail;
+ } else {
+ num_built = 0;
+ num_eaten = 0;
+ converted = do_utf8_to_list(p, num_resulting_chars,
+ bytes, num_bytes_to_process,
+ left, &num_built, &num_eaten, tail);
+ cost_to_proc(p,num_built);
+
+ if (num_built != num_resulting_chars) { /* work left to do */
+ Eterm newnum_resulting_chars =
+ erts_make_integer(num_resulting_chars - num_built,p);
+ Eterm newnum_bytes_to_process =
+ erts_make_integer(num_bytes_to_process - num_eaten,p);
+ Eterm newnum_processed_bytes =
+ erts_make_integer(num_processed_bytes + num_eaten,p);
+ Eterm traptuple;
+ hp = HAlloc(p,7);
+ traptuple = TUPLE6(hp,orig_bin,newnum_processed_bytes,
+ newnum_bytes_to_process,
+ newnum_resulting_chars,
+ make_small(state),
+ converted);
+ BUMP_ALL_REDS(p);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_TRAP1(&characters_to_list_trap_4_exp,p,traptuple);
+ }
+ }
+
+ /*
+ * OK, no more trapping, let's build rest binary if there should
+ * be one.
+ */
+
+ b_sz = size - (num_bytes_to_process + num_processed_bytes);
+
+ if (b_sz) {
+ ErlSubBin *sb;
+ Eterm orig;
+ Uint offset;
+ ASSERT(state != UTF8_OK);
+ hp = HAlloc(p, ERL_SUB_BIN_SIZE);
+ sb = (ErlSubBin *) hp;
+ ERTS_GET_REAL_BIN(orig_bin, orig, offset, bitoffs, bitsize);
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = b_sz;
+ sb->offs = num_bytes_to_process + num_processed_bytes;
+ sb->orig = orig;
+ sb->bitoffs = bitoffs;
+ sb->bitsize = bitsize;
+ sb->is_writable = 0;
+ rest = make_binary(sb);
+ }
+
+ /* Done */
+
+ if (state == UTF8_INCOMPLETE) {
+ if (check_leftovers(bytes + num_bytes_to_process + num_processed_bytes,
+ b_sz) != 0) {
+ goto error_return;
+ }
+ hp = HAlloc(p,4);
+ ret = TUPLE3(hp,am_incomplete,converted,rest);
+ } else if (state == UTF8_ERROR) {
+ error_return:
+ hp = HAlloc(p,4);
+ ret = TUPLE3(hp,am_error,converted,rest);
+ } else {
+ ret = converted;
+ }
+
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(ret);
+}
+
+
+/*
+ * This is called when there's still analyzing left to do,
+ * we only reach this if original input was a binary.
+ */
+
+static BIF_RETTYPE characters_to_list_trap_3(BIF_ALIST_3)
+{
+ Uint num_bytes_to_process;
+ Uint num_resulting_chars;
+
+ term_to_Uint(BIF_ARG_2, &num_bytes_to_process); /* The number of already
+ analyzed and accepted
+ bytes */
+ term_to_Uint(BIF_ARG_3, &num_resulting_chars); /* The number of chars
+ procuced by the
+ already analyzed
+ part of the binary */
+
+ /*erts_printf("Trap: %T, %T, %T\n",BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);*/
+
+ return do_bif_utf8_to_list(BIF_P,
+ BIF_ARG_1, /* the binary */
+ 0U, /* nothing processed yet */
+ num_bytes_to_process,
+ num_resulting_chars,
+ UTF8_ANALYZE_MORE, /* always this state here */
+ NIL); /* Nothing built -> no tail yet */
+
+}
+
+/*
+ * This is called when analyzing is done and we are trapped during building,
+ * we only reach this if original input was a binary.
+ */
+static BIF_RETTYPE characters_to_list_trap_4(BIF_ALIST_1)
+{
+ Uint num_processed_bytes;
+ Uint num_bytes_to_process;
+ Uint num_resulting_chars;
+ Eterm orig_bin, tail;
+ int last_state;
+ Eterm *tplp = tuple_val(BIF_ARG_1);
+
+ orig_bin = tplp[1];
+ term_to_Uint(tplp[2], &num_processed_bytes);
+ term_to_Uint(tplp[3], &num_bytes_to_process);
+ term_to_Uint(tplp[4], &num_resulting_chars);
+ last_state = (int) signed_val(tplp[5]);
+ tail = tplp[6];
+
+ /*erts_printf("Trap: {%T, %lu, %lu, %lu, %d, %T}\n",
+ orig_bin, num_processed_bytes, num_bytes_to_process,
+ num_resulting_chars, last_state, tail);*/
+
+ return do_bif_utf8_to_list(BIF_P,
+ orig_bin, /* The whole binary */
+ num_processed_bytes, /* Number of bytes
+ already processed */
+ num_bytes_to_process, /* Bytes left to proc. */
+ num_resulting_chars, /* Num chars left to
+ build */
+ last_state, /* The current state
+ (never ANALYZE_MORE)*/
+ tail); /* The already built
+ tail */
+
+}
+/*
+ * This is only used when characters are a plain unicode (utf8) binary.
+ * Instead of building an utf8 buffer, we analyze the binary given and use that.
+ */
+
+static BIF_RETTYPE utf8_to_list(BIF_ALIST_1)
+{
+ if (!is_binary(BIF_ARG_1) || aligned_binary_size(BIF_ARG_1) < 0) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
+ return do_bif_utf8_to_list(BIF_P, BIF_ARG_1, 0U, 0U, 0U,
+ UTF8_ANALYZE_MORE,NIL);
+}
+
+
+BIF_RETTYPE atom_to_binary_2(BIF_ALIST_2)
+{
+ Atom* ap;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ goto error;
+ }
+
+ ap = atom_tab(atom_val(BIF_ARG_1));
+
+ if (BIF_ARG_2 == am_latin1) {
+ BIF_RET(new_binary(BIF_P, ap->name, ap->len));
+ } else if (BIF_ARG_2 == am_utf8 || BIF_ARG_2 == am_unicode) {
+ int bin_size = 0;
+ int i;
+ Eterm bin_term;
+ byte* bin_p;
+
+ for (i = 0; i < ap->len; i++) {
+ bin_size += (ap->name[i] >= 0x80) ? 2 : 1;
+ }
+ if (bin_size == ap->len) {
+ BIF_RET(new_binary(BIF_P, ap->name, ap->len));
+ }
+ bin_term = new_binary(BIF_P, 0, bin_size);
+ bin_p = binary_bytes(bin_term);
+ for (i = 0; i < ap->len; i++) {
+ byte b = ap->name[i];
+ if (b < 0x80) {
+ *bin_p++ = b;
+ } else {
+ *bin_p++ = 0xC0 | (b >> 6);
+ *bin_p++ = 0x80 | (b & 0x3F);
+ }
+ }
+ BIF_RET(bin_term);
+ } else {
+ error:
+ BIF_ERROR(BIF_P, BADARG);
+ }
+}
+
+static BIF_RETTYPE
+binary_to_atom(Process* p, Eterm bin, Eterm enc, int must_exist)
+{
+ byte* bytes;
+ byte *temp_alloc = NULL;
+ Uint bin_size;
+
+ if ((bytes = erts_get_aligned_binary_bytes(bin, &temp_alloc)) == 0) {
+ BIF_ERROR(p, BADARG);
+ }
+ bin_size = binary_size(bin);
+ if (enc == am_latin1) {
+ Eterm a;
+ if (bin_size > MAX_ATOM_LENGTH) {
+ system_limit:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(p, SYSTEM_LIMIT);
+ }
+ if (!must_exist) {
+ a = am_atom_put((char *)bytes, bin_size);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(a);
+ } else if (erts_atom_get((char *)bytes, bin_size, &a)) {
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(a);
+ } else {
+ goto badarg;
+ }
+ } else if (enc == am_utf8 || enc == am_unicode) {
+ char *buf;
+ char *dst;
+ int i;
+ int num_chars;
+ Eterm res;
+
+ if (bin_size > 2*MAX_ATOM_LENGTH) {
+ byte* err_pos;
+ Uint n;
+ int reds_left = bin_size+1; /* Number of reductions left. */
+
+ if (analyze_utf8(bytes, bin_size, &err_pos,
+ &n, &reds_left) == UTF8_OK) {
+ /*
+ * Correct UTF-8 encoding, but too many characters to
+ * fit in an atom.
+ */
+ goto system_limit;
+ } else {
+ /*
+ * Something wrong in the UTF-8 encoding or Unicode code
+ * points > 255.
+ */
+ goto badarg;
+ }
+ }
+
+ /*
+ * Allocate a temporary buffer the same size as the binary,
+ * so that we don't need an extra overflow test.
+ */
+ buf = (char *) erts_alloc(ERTS_ALC_T_TMP, bin_size);
+ dst = buf;
+ for (i = 0; i < bin_size; i++) {
+ int c = bytes[i];
+ if (c < 0x80) {
+ *dst++ = c;
+ } else if (i < bin_size-1) {
+ int c2;
+ if ((c & 0xE0) != 0xC0) {
+ goto free_badarg;
+ }
+ i++;
+ c = (c & 0x3F) << 6;
+ c2 = bytes[i];
+ if ((c2 & 0xC0) != 0x80) {
+ goto free_badarg;
+ }
+ c = c | (c2 & 0x3F);
+ if (0x80 <= c && c < 256) {
+ *dst++ = c;
+ } else {
+ goto free_badarg;
+ }
+ } else {
+ free_badarg:
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ goto badarg;
+ }
+ }
+ num_chars = dst - buf;
+ if (num_chars > MAX_ATOM_LENGTH) {
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ goto system_limit;
+ }
+ if (!must_exist) {
+ res = am_atom_put(buf, num_chars);
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(res);
+ } else {
+ int exists = erts_atom_get(buf, num_chars, &res);
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ if (exists) {
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_RET(res);
+ } else {
+ goto badarg;
+ }
+ }
+ } else {
+ badarg:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(p, BADARG);
+ }
+}
+
+BIF_RETTYPE binary_to_atom_2(BIF_ALIST_2)
+{
+ return binary_to_atom(BIF_P, BIF_ARG_1, BIF_ARG_2, 0);
+}
+
+BIF_RETTYPE binary_to_existing_atom_2(BIF_ALIST_2)
+{
+ return binary_to_atom(BIF_P, BIF_ARG_1, BIF_ARG_2, 1);
+}
diff --git a/erts/emulator/beam/erl_unicode.h b/erts/emulator/beam/erl_unicode.h
new file mode 100644
index 0000000000..1b63b797c2
--- /dev/null
+++ b/erts/emulator/beam/erl_unicode.h
@@ -0,0 +1,23 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef _ERL_UNICODE_H
+#define _ERL_UNICODE_H
+
+#endif /* _ERL_UNICODE_H */
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
new file mode 100644
index 0000000000..4d8315ab95
--- /dev/null
+++ b/erts/emulator/beam/erl_vm.h
@@ -0,0 +1,204 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ERL_VM_H__
+#define __ERL_VM_H__
+
+/* #define ERTS_OPCODE_COUNTER_SUPPORT */
+
+#if defined(HYBRID)
+/* # define CHECK_FOR_HOLES */
+#endif
+
+#if defined(DEBUG) && !defined(CHECK_FOR_HOLES) && !defined(__WIN32__)
+# define CHECK_FOR_HOLES
+#endif
+
+#if defined(HYBRID)
+/* # define INCREMENTAL 1 */ /* Incremental garbage collection */
+/* # define INC_TIME_BASED 1 */ /* Time-based incremental GC (vs Work-based) */
+#endif
+
+#define BEAM 1
+#define EMULATOR "BEAM"
+#define SEQ_TRACE 1
+
+#define CONTEXT_REDS 2000 /* Swap process out after this number */
+#define MAX_ARG 256 /* Max number of arguments allowed */
+#define MAX_REG 1024 /* Max number of x(N) registers used */
+
+/*
+ * The new arithmetic operations need some extra X registers in the register array.
+ */
+#define ERTS_X_REGS_ALLOCATED (MAX_REG+2)
+
+#define INPUT_REDUCTIONS (2 * CONTEXT_REDS)
+
+#define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */
+
+#ifdef HYBRID
+# define SH_DEFAULT_SIZE 2629425 /* default message area min size */
+#endif
+
+#ifdef INCREMENTAL
+# define INC_NoPAGES 256 /* Number of pages in the old generation */
+# define INC_PAGESIZE 32768 /* The size of each page */
+# define INC_STORAGE_SIZE 1024 /* The size of gray stack and similar */
+#endif
+
+#define CP_SIZE 1
+
+#define ErtsHAllocLockCheck(P) \
+ ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks((P))) \
+ || ((P)->scheduler_data \
+ && (P) == (P)->scheduler_data->match_pseudo_process) \
+ || erts_is_system_blocked(0))
+
+#ifdef DEBUG
+/*
+ * Debug HAlloc that initialize all memory to bad things.
+ *
+ * To get information about where memory is allocated, insert the two
+ * lines below directly after the memset line and use the flag +va.
+ *
+ VERBOSE(DEBUG_ALLOCATION,("HAlloc @ 0x%08lx (%d) %s:%d\n", \
+ (unsigned long)HEAP_TOP(p),(sz),__FILE__,__LINE__)), \
+ */
+#ifdef CHECK_FOR_HOLES
+#define HAlloc(p, sz) \
+ (ASSERT_EXPR((sz) >= 0), \
+ ErtsHAllocLockCheck(p), \
+ ((((HEAP_LIMIT(p) - HEAP_TOP(p)) < (sz))) \
+ ? erts_heap_alloc((p),(sz)) \
+ : (erts_set_hole_marker(HEAP_TOP(p), (sz)), \
+ HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz))))
+#else
+#define HAlloc(p, sz) \
+ (ASSERT_EXPR((sz) >= 0), \
+ ErtsHAllocLockCheck(p), \
+ ((((HEAP_LIMIT(p) - HEAP_TOP(p)) < (sz))) \
+ ? erts_heap_alloc((p),(sz)) \
+ : (memset(HEAP_TOP(p),DEBUG_BAD_BYTE,(sz)*sizeof(Eterm*)), \
+ HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz))))
+#endif
+#else
+
+/*
+ * Allocate heap memory, first on the ordinary heap;
+ * failing that, in a heap fragment.
+ */
+#define HAlloc(p, sz) \
+ (ASSERT_EXPR((sz) >= 0), \
+ ErtsHAllocLockCheck(p), \
+ ((((HEAP_LIMIT(p) - HEAP_TOP(p)) < (sz))) \
+ ? erts_heap_alloc((p),(sz)) \
+ : (HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz))))
+
+#endif /* DEBUG */
+
+#if defined(CHECK_FOR_HOLES)
+# define HRelease(p, endp, ptr) \
+ if ((ptr) == (endp)) { \
+ ; \
+ } else if (HEAP_START(p) <= (ptr) && (ptr) < HEAP_TOP(p)) { \
+ HEAP_TOP(p) = (ptr); \
+ } else { \
+ erts_arith_shrink(p, ptr); \
+ }
+#else
+# define HRelease(p, endp, ptr) \
+ if ((ptr) == (endp)) { \
+ ; \
+ } else if (HEAP_START(p) <= (ptr) && (ptr) < HEAP_TOP(p)) { \
+ HEAP_TOP(p) = (ptr); \
+ }
+#endif
+
+#define HeapWordsLeft(p) (HEAP_LIMIT(p) - HEAP_TOP(p))
+
+#if defined(DEBUG) || defined(CHECK_FOR_HOLES)
+# define ERTS_HOLE_MARKER (((0xaf5e78ccUL << 24) << 8) | 0xaf5e78ccUL)
+#endif
+
+/*
+ * Allocate heap memory on the ordinary heap, NEVER in a heap
+ * segment. The caller must ensure that there is enough words
+ * left on the heap before calling HeapOnlyAlloc() (for instance,
+ * by testing HeapWordsLeft() and calling the garbage collector
+ * if not enough).
+ */
+#ifdef CHECK_FOR_HOLES
+# define HeapOnlyAlloc(p, sz) \
+ (ASSERT_EXPR((sz) >= 0), \
+ (ASSERT_EXPR(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
+ (erts_set_hole_marker(HEAP_TOP(p), (sz)), \
+ (HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz)))))
+#else
+# define HeapOnlyAlloc(p, sz) \
+ (ASSERT_EXPR((sz) >= 0), \
+ (ASSERT_EXPR(((HEAP_LIMIT(p) - HEAP_TOP(p)) >= (sz))), \
+ (HEAP_TOP(p) = HEAP_TOP(p) + (sz), HEAP_TOP(p) - (sz))))
+#endif
+
+
+/*
+ * Description for each instruction (defined here because the name and
+ * count fields are interesting outside the emulator proper).
+ */
+
+typedef struct op_entry {
+ char* name; /* Name of instruction. */
+ Uint32 mask[3]; /* Signature mask. */
+ int sz; /* Number of loaded words. */
+ char* pack; /* Instructions for packing engine. */
+ char* sign; /* Signature string. */
+ unsigned count; /* Number of times executed. */
+} OpEntry;
+
+extern OpEntry opc[]; /* Description of all instructions. */
+extern int num_instructions; /* Number of instruction in opc[]. */
+
+/* some constants for various table sizes etc */
+
+#define ATOM_TEXT_SIZE 32768 /* Increment for allocating atom text space */
+
+#define ITIME 100 /* Number of milliseconds per clock tick */
+#define MAX_PORT_LINK 8 /* Maximum number of links to a port */
+
+extern int H_MIN_SIZE; /* minimum (heap + stack) */
+
+#define ORIG_CREATION 0
+
+/* macros for extracting bytes from uint16's */
+
+#define hi_byte(a) ((a) >> 8)
+#define lo_byte(a) ((a) & 255)
+
+/* macros for combining bytes */
+
+#define make_16(x, y) (((x) << 8) | (y))
+#define make_24(x,y,z) (((x) << 16) | ((y) << 8) | (z))
+#define make_32(x3,x2,x1,x0) (((x3)<<24) | ((x2)<<16) | ((x1)<<8) | (x0))
+
+#define make_signed_24(x,y,z) ((sint32) (((x) << 24) | ((y) << 16) | ((z) << 8)) >> 8)
+#define make_signed_32(x3,x2,x1,x0) ((sint32) (((x3) << 24) | ((x2) << 16) | ((x1) << 8) | (x0)))
+
+#include "erl_term.h"
+
+#endif /* __ERL_VM_H__ */
diff --git a/erts/emulator/beam/erl_zlib.c b/erts/emulator/beam/erl_zlib.c
new file mode 100644
index 0000000000..f73d48b6c2
--- /dev/null
+++ b/erts/emulator/beam/erl_zlib.c
@@ -0,0 +1,113 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* A sparse wrapper around zlib with erts memory allocation.
+ *
+ * erl_zlib_compress2 and erl_zlib_uncompress are erts-adapted versions
+ * of the original compress2 and uncompress from zlib-1.2.3.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_zlib.h"
+
+#include "sys.h"
+#include "erl_alloc.h"
+
+voidpf erl_zlib_zalloc_callback (voidpf opaque, unsigned items, unsigned size)
+{
+ (void) opaque; /* make compiler happy */
+ return erts_alloc_fnf(ERTS_ALC_T_ZLIB, items * size);
+}
+
+void erl_zlib_zfree_callback (voidpf opaque, voidpf ptr)
+{
+ (void) opaque; /* make compiler happy */
+ erts_free(ERTS_ALC_T_ZLIB, ptr);
+}
+
+
+int ZEXPORT erl_zlib_compress2 (Bytef* dest, uLongf* destLen,
+ const Bytef* source, uLong sourceLen,
+ int level)
+{
+ z_stream stream;
+ int err;
+
+ stream.next_in = (Bytef*)source;
+ stream.avail_in = (uInt)sourceLen;
+#ifdef MAXSEG_64K
+ /* Check for source > 64K on 16-bit machine: */
+ if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR;
+#endif
+ stream.next_out = dest;
+ stream.avail_out = (uInt)*destLen;
+ if ((uLong)stream.avail_out != *destLen) return Z_BUF_ERROR;
+
+ erl_zlib_alloc_init(&stream);
+
+ err = deflateInit(&stream, level);
+ if (err != Z_OK) return err;
+
+ err = deflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END) {
+ deflateEnd(&stream);
+ return err == Z_OK ? Z_BUF_ERROR : err;
+ }
+ *destLen = stream.total_out;
+
+ err = deflateEnd(&stream);
+ return err;
+}
+
+int ZEXPORT erl_zlib_uncompress (Bytef* dest, uLongf* destLen,
+ const Bytef* source, uLong sourceLen)
+{
+ z_stream stream;
+ int err;
+
+ stream.next_in = (Bytef*)source;
+ stream.avail_in = (uInt)sourceLen;
+ /* Check for source > 64K on 16-bit machine: */
+ if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR;
+
+ stream.next_out = dest;
+ stream.avail_out = (uInt)*destLen;
+ if ((uLong)stream.avail_out != *destLen) return Z_BUF_ERROR;
+
+ erl_zlib_alloc_init(&stream);
+
+ err = inflateInit(&stream);
+ if (err != Z_OK) return err;
+
+ err = inflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END) {
+ inflateEnd(&stream);
+ if (err == Z_NEED_DICT || (err == Z_BUF_ERROR && stream.avail_in == 0))
+ return Z_DATA_ERROR;
+ return err;
+ }
+ *destLen = stream.total_out;
+
+ err = inflateEnd(&stream);
+ return err;
+}
+
diff --git a/erts/emulator/beam/erl_zlib.h b/erts/emulator/beam/erl_zlib.h
new file mode 100644
index 0000000000..9054a5e428
--- /dev/null
+++ b/erts/emulator/beam/erl_zlib.h
@@ -0,0 +1,52 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* A sparse wrapper interface around zlib with erts memory allocation.
+*/
+
+#include <zlib.h>
+
+
+/* Initialize zalloc, zfree and opaque of a z_stream
+*/
+#define erl_zlib_alloc_init(s) \
+ do { /* 'opaque' not used */ \
+ (s)->zalloc = erl_zlib_zalloc_callback; \
+ (s)->zfree = erl_zlib_zfree_callback; \
+ } while (0)
+
+/* Use instead of compress
+*/
+#define erl_zlib_compress(dest,destLen,source,sourceLen) \
+ erl_zlib_compress2(dest,destLen,source,sourceLen,Z_DEFAULT_COMPRESSION)
+
+/* Use instead of compress2
+*/
+int ZEXPORT erl_zlib_compress2 (Bytef* dest, uLongf* destLen,
+ const Bytef* source, uLong sourceLen,
+ int level);
+/* Use instead of uncompress
+*/
+int ZEXPORT erl_zlib_uncompress (Bytef* dest, uLongf* destLen,
+ const Bytef* source, uLong sourceLen);
+
+
+voidpf erl_zlib_zalloc_callback (voidpf,unsigned,unsigned);
+void erl_zlib_zfree_callback (voidpf,voidpf);
+
diff --git a/erts/emulator/beam/error.h b/erts/emulator/beam/error.h
new file mode 100644
index 0000000000..4930def4ed
--- /dev/null
+++ b/erts/emulator/beam/error.h
@@ -0,0 +1,196 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __ERROR_H__
+#define __ERROR_H__
+
+/*
+ * There are three primary exception classes:
+ *
+ * - exit Process termination - not an error.
+ * - error Error (adds stacktrace; will be logged).
+ * - thrown Nonlocal return (turns into a 'nocatch'
+ * error if not caught by the process).
+ *
+ * In addition, we define a number of exit codes as a convenient
+ * short-hand: instead of building the error descriptor term at the time
+ * the exception is raised, it is built as necessary when the exception
+ * is handled. Examples are EXC_NORMAL, EXC_BADARG, EXC_BADARITH, etc.
+ * Some of these have convenient aliases, like BADARG and BADARITH.
+ */
+
+/*
+ * Bits 0-1 index the 'exception class tag' table.
+ */
+#define EXC_CLASSBITS 3
+#define GET_EXC_CLASS(x) ((x) & EXC_CLASSBITS)
+
+/*
+ * Exception class tags (indices into the 'exception_tag' array)
+ */
+#define EXTAG_ERROR 0
+#define EXTAG_EXIT 1
+#define EXTAG_THROWN 2
+
+#define NUMBER_EXC_TAGS 3 /* The number of exception class tags */
+
+/*
+ * Exit code flags (bits 2-7)
+ *
+ * These flags make is easier and quicker to decide what to do with the
+ * exception in the early stages, before a handler is found, and also
+ * maintains some separation between the class tag and the actions.
+ */
+#define EXF_PANIC (1<<2) /* ignore catches */
+#define EXF_THROWN (1<<3) /* nonlocal return */
+#define EXF_LOG (1<<4) /* write to logger on termination */
+#define EXF_NATIVE (1<<5) /* occurred in native code */
+#define EXF_SAVETRACE (1<<6) /* save stack trace in internal form */
+#define EXF_ARGLIST (1<<7) /* has arglist for top of trace */
+
+#define EXC_FLAGBITS 0x00fc
+
+/*
+ * The primary fields of an exception code
+ */
+#define EXF_PRIMARY (EXF_PANIC | EXF_THROWN | EXF_LOG | EXF_NATIVE)
+#define PRIMARY_EXCEPTION(x) ((x) & (EXF_PRIMARY | EXC_CLASSBITS))
+#define NATIVE_EXCEPTION(x) ((x) | EXF_NATIVE)
+
+/*
+ * Bits 8-12 of the error code are used for indexing into
+ * the short-hand error descriptor table.
+ */
+#define EXC_INDEXBITS 0x1f00
+#define GET_EXC_INDEX(x) (((x) & EXC_INDEXBITS) >> 8)
+
+/*
+ * Exit codes used for raising a fresh exception. The primary exceptions
+ * share index 0 in the descriptor table. EXC_NULL signals that no
+ * exception has occurred. The primary exit codes EXC_EXIT, EXC_ERROR
+ * and EXC_THROWN are the basis for all other exit codes, and must
+ * always have the EXF_SAVETRACE flag set so that a trace is saved
+ * whenever a new exception occurs; the flag is then cleared.
+ */
+#define EXC_NULL 0 /* Initial value for p->freason */
+#define EXC_PRIMARY (0 | EXF_SAVETRACE)
+#define EXC_ERROR (EXC_PRIMARY | EXTAG_ERROR | EXF_LOG)
+ /* Generic error (exit term
+ * in p->fvalue) */
+#define EXC_EXIT (EXC_PRIMARY | EXTAG_EXIT)
+ /* Generic exit (exit term
+ * in p->fvalue) */
+#define EXC_THROWN (EXC_PRIMARY | EXTAG_THROWN | EXF_THROWN)
+ /* Generic nonlocal return
+ * (thrown term in p->fvalue) */
+
+#define EXC_ERROR_2 (EXC_ERROR | EXF_ARGLIST)
+ /* Error with given arglist term
+ * (exit reason in p->fvalue) */
+
+#define EXC_NORMAL ((1 << 8) | EXC_EXIT)
+ /* Normal exit (reason 'normal') */
+#define EXC_INTERNAL_ERROR ((2 << 8) | EXC_ERROR | EXF_PANIC)
+ /* Things that shouldn't happen */
+#define EXC_BADARG ((3 << 8) | EXC_ERROR)
+ /* Bad argument to a BIF */
+#define EXC_BADARITH ((4 << 8) | EXC_ERROR)
+ /* Bad arithmetic */
+#define EXC_BADMATCH ((5 << 8) | EXC_ERROR)
+ /* Bad match in function body */
+#define EXC_FUNCTION_CLAUSE ((6 << 8) | EXC_ERROR)
+ /* No matching function head */
+#define EXC_CASE_CLAUSE ((7 << 8) | EXC_ERROR)
+ /* No matching case clause */
+#define EXC_IF_CLAUSE ((8 << 8) | EXC_ERROR)
+ /* No matching if clause */
+#define EXC_UNDEF ((9 << 8) | EXC_ERROR)
+ /* No farity that matches */
+#define EXC_BADFUN ((10 << 8) | EXC_ERROR)
+ /* Not an existing fun */
+#define EXC_BADARITY ((11 << 8) | EXC_ERROR)
+ /* Attempt to call fun with
+ * wrong number of arguments. */
+#define EXC_TIMEOUT_VALUE ((12 << 8) | EXC_ERROR)
+ /* Bad time out value */
+#define EXC_NOPROC ((13 << 8) | EXC_ERROR)
+ /* No process or port */
+#define EXC_NOTALIVE ((14 << 8) | EXC_ERROR)
+ /* Not distributed */
+#define EXC_SYSTEM_LIMIT ((15 << 8) | EXC_ERROR)
+ /* Ran out of something */
+#define EXC_TRY_CLAUSE ((16 << 8) | EXC_ERROR)
+ /* No matching try clause */
+#define EXC_NOTSUP ((17 << 8) | EXC_ERROR)
+ /* Not supported */
+
+#define NUMBER_EXIT_CODES 18 /* The number of exit code indices */
+
+/*
+ * Internal pseudo-error codes.
+ */
+#define TRAP (1 << 8) /* BIF Trap to erlang code */
+
+/*
+ * Aliases for some common exit codes.
+ */
+#define BADARG EXC_BADARG
+#define BADARITH EXC_BADARITH
+#define BADMATCH EXC_BADMATCH
+#define SYSTEM_LIMIT EXC_SYSTEM_LIMIT
+
+
+/*
+ * Pseudo error codes (these are never seen by the user).
+ */
+#define TLOAD_OK 0 /* The threaded code linking was successful */
+#define TLOAD_MAGIC_NUMBER 1 /* Wrong kind of object file */
+#define TLOAD_FORMAT 2 /* Format error while reading object code */
+#define TLOAD_MODULE 3 /* Module name in object code does not match */
+#define TLOAD_SIZE 4 /* Given size in object code differs from actual size */
+
+/*
+ * The exception stack trace parameters.
+ */
+#define MAX_BACKTRACE_SIZE 64 /* whatever - just not too huge */
+#define DEFAULT_BACKTRACE_SIZE 8
+
+/*
+ * The table translating an exception code to an atom.
+ */
+extern Eterm error_atom[NUMBER_EXIT_CODES];
+
+/*
+ * The exception tag table.
+ */
+extern Eterm exception_tag[NUMBER_EXC_TAGS];
+
+/*
+ * The quick-saved stack trace structure
+ */
+struct StackTrace {
+ Eterm header; /* bignum header - must be first in struct */
+ Eterm freason; /* original exception reason is saved in the struct */
+ Eterm* pc;
+ Eterm* current;
+ int depth; /* number of saved pointers in trace[] */
+ Eterm *trace[1]; /* varying size - must be last in struct */
+};
+
+#endif /* __ERROR_H__ */
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
new file mode 100644
index 0000000000..271b40cf0f
--- /dev/null
+++ b/erts/emulator/beam/export.c
@@ -0,0 +1,296 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "export.h"
+#include "hash.h"
+
+#define EXPORT_INITIAL_SIZE 4000
+#define EXPORT_LIMIT (512*1024)
+
+#define EXPORT_HASH(m,f,a) ((m)*(f)+(a))
+
+static IndexTable export_table; /* Not locked. */
+static Hash secondary_export_table; /* Locked. */
+
+#include "erl_smp.h"
+
+static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table. */
+
+#define export_read_lock() erts_smp_rwmtx_rlock(&export_table_lock)
+#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock)
+#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock)
+#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock)
+#define export_init_lock() erts_smp_rwmtx_init(&export_table_lock, \
+ "export_tab")
+
+extern Eterm* em_call_error_handler;
+extern Uint* em_call_traced_function;
+
+void
+export_info(int to, void *to_arg)
+{
+#ifdef ERTS_SMP
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ export_read_lock();
+#endif
+ index_info(to, to_arg, &export_table);
+ hash_info(to, to_arg, &secondary_export_table);
+#ifdef ERTS_SMP
+ if (lock)
+ export_read_unlock();
+#endif
+}
+
+
+static HashValue
+export_hash(Export* x)
+{
+ return EXPORT_HASH(x->code[0], x->code[1], x->code[2]);
+}
+
+static int
+export_cmp(Export* tmpl, Export* obj)
+{
+ return !(tmpl->code[0] == obj->code[0] &&
+ tmpl->code[1] == obj->code[1] &&
+ tmpl->code[2] == obj->code[2]);
+}
+
+
+static Export*
+export_alloc(Export* tmpl)
+{
+ Export* obj = (Export*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(Export));
+
+ obj->fake_op_func_info_for_hipe[0] = 0;
+ obj->fake_op_func_info_for_hipe[1] = 0;
+ obj->code[0] = tmpl->code[0];
+ obj->code[1] = tmpl->code[1];
+ obj->code[2] = tmpl->code[2];
+ obj->slot.index = -1;
+ obj->address = obj->code+3;
+ obj->code[3] = (Eterm) em_call_error_handler;
+ obj->code[4] = 0;
+ obj->match_prog_set = NULL;
+ return obj;
+}
+
+
+static void
+export_free(Export* obj)
+{
+ erts_free(ERTS_ALC_T_EXPORT, (void*) obj);
+}
+
+
+void
+init_export_table(void)
+{
+ HashFunctions f;
+
+ export_init_lock();
+ f.hash = (H_FUN) export_hash;
+ f.cmp = (HCMP_FUN) export_cmp;
+ f.alloc = (HALLOC_FUN) export_alloc;
+ f.free = (HFREE_FUN) export_free;
+
+ erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_table, "export_list",
+ EXPORT_INITIAL_SIZE, EXPORT_LIMIT, f);
+ hash_init(ERTS_ALC_T_EXPORT_TABLE, &secondary_export_table,
+ "secondary_export_table", 50, f);
+}
+
+/*
+ * Return a pointer to the export entry for the given function,
+ * or NULL otherwise. Notes:
+ *
+ * 1) BIFs have export entries and can be called through
+ * a wrapper in the export entry.
+ * 2) Functions referenced by a loaded module, but not yet loaded
+ * also have export entries. The export entry contains
+ * a wrapper which invokes the error handler if a function is
+ * called through such an export entry.
+ * 3) This function is suitable for the implementation of erlang:apply/3.
+ */
+
+Export*
+erts_find_export_entry(Eterm m, Eterm f, unsigned int a)
+{
+ HashValue hval = EXPORT_HASH(m, f, a);
+ int ix;
+ HashBucket* b;
+
+ ix = hval % export_table.htable.size;
+ b = export_table.htable.bucket[ix];
+
+ /*
+ * Note: We have inlined the code from hash.c for speed.
+ */
+
+ while (b != (HashBucket*) 0) {
+ Export* ep = (Export *) b;
+ if (ep->code[0] == m && ep->code[1] == f && ep->code[2] == a) {
+ break;
+ }
+ b = b->next;
+ }
+ return (Export*)b;
+}
+
+
+/*
+ * Find the export entry for a loaded function.
+ * Returns a NULL pointer if the given function is not loaded, or
+ * a pointer to the export entry.
+ *
+ * Note: This function never returns export entries for BIFs
+ * or functions which are not yet loaded. This makes it suitable
+ * for use by the erlang:function_exported/3 BIF or whenever you
+ * cannot depend on the error_handler.
+ */
+
+Export*
+erts_find_function(Eterm m, Eterm f, unsigned int a)
+{
+ Export e;
+ Export* ep;
+
+ e.code[0] = m;
+ e.code[1] = f;
+ e.code[2] = a;
+
+ ep = hash_get(&export_table.htable, (void*) &e);
+ if (ep != NULL && ep->address == ep->code+3 &&
+ ep->code[3] != (Uint) em_call_traced_function) {
+ ep = NULL;
+ }
+ return ep;
+}
+
+/*
+ * Returns a pointer to an existing export entry for a MFA,
+ * or creates a new one and returns the pointer.
+ *
+ * This function provides unlocked write access to the main export
+ * table. It should only be used during start up or when
+ * all other threads are blocked.
+ */
+
+Export*
+erts_export_put(Eterm mod, Eterm func, unsigned int arity)
+{
+ Export e;
+ int ix;
+
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_smp_is_system_blocked(0));
+ ASSERT(is_atom(mod));
+ ASSERT(is_atom(func));
+ e.code[0] = mod;
+ e.code[1] = func;
+ e.code[2] = arity;
+ ix = index_put(&export_table, (void*) &e);
+ return (Export*) erts_index_lookup(&export_table, ix);
+}
+
+/*
+ * Find the existing export entry for M:F/A. Failing that, create a stub
+ * export entry (making a call through it will cause the error_handler to
+ * be called).
+ *
+ * Stub export entries will be placed in the secondary export table.
+ * erts_export_consolidate() will move all stub export entries into the
+ * main export table (will be done the next time code is loaded).
+ */
+
+Export*
+erts_export_get_or_make_stub(Eterm mod, Eterm func, unsigned int arity)
+{
+ Export e;
+ Export* ep;
+
+ ASSERT(is_atom(mod));
+ ASSERT(is_atom(func));
+
+ e.code[0] = mod;
+ e.code[1] = func;
+ e.code[2] = arity;
+ ep = erts_find_export_entry(mod, func, arity);
+ if (ep == 0) {
+ /*
+ * The code is not loaded (yet). Put the export in the secondary
+ * export table, to avoid having to lock the main export table.
+ */
+ export_write_lock();
+ ep = (Export *) hash_put(&secondary_export_table, (void*) &e);
+ export_write_unlock();
+ }
+ return ep;
+}
+
+/*
+ * To be called before loading code (with other threads blocked).
+ * This function will move all export entries from the secondary
+ * export table into the primary.
+ */
+void
+erts_export_consolidate(void)
+{
+#ifdef DEBUG
+ HashInfo hi;
+#endif
+
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_smp_is_system_blocked(0));
+
+ export_write_lock();
+ erts_index_merge(&secondary_export_table, &export_table);
+ erts_hash_merge(&secondary_export_table, &export_table.htable);
+ export_write_unlock();
+#ifdef DEBUG
+ hash_get_info(&hi, &export_table.htable);
+ ASSERT(export_table.entries == hi.objs);
+#endif
+}
+
+Export *export_list(int i)
+{
+ return (Export*) erts_index_lookup(&export_table, i);
+}
+
+int export_list_size(void)
+{
+ return export_table.entries;
+}
+
+int export_table_sz(void)
+{
+ return index_table_sz(&export_table);
+}
+
+Export *export_get(Export *e)
+{
+ return hash_get(&export_table.htable, e);
+}
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
new file mode 100644
index 0000000000..cd6af6dd85
--- /dev/null
+++ b/erts/emulator/beam/export.h
@@ -0,0 +1,79 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __EXPORT_H__
+#define __EXPORT_H__
+
+#ifndef __SYS_H__
+#include "sys.h"
+#endif
+
+#ifndef __INDEX_H__
+#include "index.h"
+#endif
+
+/*
+** Export entry
+*/
+typedef struct export
+{
+ IndexSlot slot; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
+ void* address; /* Pointer to code for function. */
+ struct binary* match_prog_set; /* Match program for tracing. */
+
+ Eterm fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
+ /*
+ * code[0]: Tagged atom for module.
+ * code[1]: Tagged atom for function.
+ * code[2]: Arity (untagged integer).
+ * code[3]: This entry is 0 unless the 'address' field points to it.
+ * Threaded code instruction to load function
+ * (em_call_error_handler), execute BIF (em_apply_bif,
+ * em_apply_apply), or call a traced function
+ * (em_call_traced_function).
+ * code[4]: Function pointer to BIF function (for BIFs only)
+ * or pointer to threaded code if the module has an
+ * on_load function that has not been run yet.
+ * Otherwise: 0.
+ */
+ Eterm code[5];
+} Export;
+
+
+void init_export_table(void);
+void export_info(int, void *);
+
+Export* erts_find_export_entry(Eterm m, Eterm f, unsigned int a);
+Export* erts_export_put(Eterm mod, Eterm func, unsigned int arity);
+
+
+Export* erts_export_get_or_make_stub(Eterm, Eterm, unsigned);
+void erts_export_consolidate(void);
+
+Export *export_list(int);
+int export_list_size(void);
+int export_table_sz(void);
+Export *export_get(Export*);
+
+#include "beam_load.h" /* For em_* extern declarations */
+#define ExportIsBuiltIn(EntryPtr) \
+(((EntryPtr)->address == (EntryPtr)->code + 3) && \
+ ((EntryPtr)->code[3] == (Uint) em_apply_bif))
+
+#endif
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
new file mode 100644
index 0000000000..f856cce18f
--- /dev/null
+++ b/erts/emulator/beam/external.c
@@ -0,0 +1,2839 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Implementation of the erlang external format
+ *
+ * And a nice cache mechanism which is used just to send a
+ * index indicating a specific atom to a remote node instead of the
+ * entire atom.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERTS_WANT_EXTERNAL_TAGS
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "error.h"
+#include "external.h"
+#include "bif.h"
+#include "big.h"
+#include "dist.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+#include "erl_zlib.h"
+
+#ifdef HIPE
+#include "hipe_mode_switch.h"
+#endif
+#define in_area(ptr,start,nbytes) ((Uint)((char*)(ptr) - (char*)(start)) < (nbytes))
+
+#define MAX_STRING_LEN 0xffff
+#define dec_set_creation(nodename,creat) \
+ (((nodename) == erts_this_node->sysname && (creat) == ORIG_CREATION) \
+ ? erts_this_node->creation \
+ : (creat))
+
+#undef ERTS_DEBUG_USE_DIST_SEP
+#ifdef DEBUG
+# if 0
+/*
+ * Enabling ERTS_DEBUG_USE_DIST_SEP can be useful when debugging, but the
+ * result refuses to talk to nodes without it!
+ */
+# define ERTS_DEBUG_USE_DIST_SEP
+# endif
+#endif
+
+/*
+ * For backward compatibility reasons, only encode integers that
+ * fit in 28 bits (signed) using INTEGER_EXT.
+ */
+#define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
+
+/*
+ * Valid creations for nodes are 1, 2, or 3. 0 can also be sent
+ * as creation, though. When 0 is used as creation, the real creation
+ * is unknown. Creation 0 on data will be changed to current
+ * creation of the node which it belongs to when it enters
+ * that node.
+ * This typically happens when a remote pid is created with
+ * list_to_pid/1 and then sent to the remote node. This behavior
+ * has the undesirable effect that a pid can be passed between nodes,
+ * and as a result of that not being equal to itself (the pid that
+ * comes back isn't equal to the original pid).
+ *
+ */
+
+static byte* enc_term(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
+static Uint is_external_string(Eterm obj, int* p_is_string);
+static byte* enc_atom(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
+static byte* enc_pid(ErtsAtomCacheMap *, Eterm, byte*, Uint32);
+static byte* dec_term(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*);
+static byte* dec_atom(ErtsDistExternal *, byte*, Eterm*);
+static byte* dec_pid(ErtsDistExternal *, Eterm**, byte*, ErlOffHeap*, Eterm*);
+static Sint decoded_size(byte *ep, byte* endp, int only_heap_bins);
+
+
+static Uint encode_size_struct2(ErtsAtomCacheMap *, Eterm, unsigned);
+
+#define ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES 255
+
+#define ERTS_DIST_HDR_ATOM_CACHE_FLAG_BYTE_IX(IIX) \
+ (((((Uint32) (IIX)) >> 1) & 0x7fffffff))
+#define ERTS_DIST_HDR_ATOM_CACHE_FLAG_BIT_IX(IIX) \
+ (((IIX) << 2) & 7)
+#define ERTS_DIST_HDR_ATOM_CACHE_FLAG_BYTES(NO_ATOMS) \
+ (((((Uint32) (NO_ATOMS)) >> 1) & 0x7fffffff)+1)
+
+#define ERTS_DIST_HDR_LONG_ATOMS_FLG (1 << 0)
+
+/* #define ERTS_ATOM_CACHE_HASH */
+#define ERTS_USE_ATOM_CACHE_SIZE 2039
+#if ERTS_ATOM_CACHE_SIZE < ERTS_USE_ATOM_CACHE_SIZE
+#error "ERTS_USE_ATOM_CACHE_SIZE too large"
+#endif
+
+static ERTS_INLINE int
+atom2cix(Eterm atom)
+{
+ Uint val;
+ ASSERT(is_atom(atom));
+ val = atom_val(atom);
+#ifdef ERTS_ATOM_CACHE_HASH
+ val = atom_tab(val)->slot.bucket.hvalue;
+#endif
+#if ERTS_USE_ATOM_CACHE_SIZE == 256
+ return (int) (val & ((Uint) 0xff));
+#else
+ return (int) (val % ERTS_USE_ATOM_CACHE_SIZE);
+#endif
+}
+
+int erts_debug_max_atom_out_cache_index(void)
+{
+ return ERTS_USE_ATOM_CACHE_SIZE-1;
+}
+
+int
+erts_debug_atom_to_out_cache_index(Eterm atom)
+{
+ return atom2cix(atom);
+}
+
+void
+erts_init_atom_cache_map(ErtsAtomCacheMap *acmp)
+{
+ if (acmp) {
+ int ix;
+ for (ix = 0; ix < ERTS_ATOM_CACHE_SIZE; ix++)
+ acmp->cache[ix].iix = -1;
+ acmp->sz = 0;
+ acmp->hdr_sz = -1;
+ }
+}
+
+void
+erts_reset_atom_cache_map(ErtsAtomCacheMap *acmp)
+{
+ if (acmp) {
+ int i;
+ for (i = 0; i < acmp->sz; i++) {
+ ASSERT(0 <= acmp->cix[i] && acmp->cix[i] < ERTS_ATOM_CACHE_SIZE);
+ acmp->cache[acmp->cix[i]].iix = -1;
+ }
+ acmp->sz = 0;
+ acmp->hdr_sz = -1;
+#ifdef DEBUG
+ for (i = 0; i < ERTS_ATOM_CACHE_SIZE; i++) {
+ ASSERT(acmp->cache[i].iix < 0);
+ }
+#endif
+ }
+}
+
+void
+erts_destroy_atom_cache_map(ErtsAtomCacheMap *acmp)
+{
+
+}
+
+static ERTS_INLINE void
+insert_acache_map(ErtsAtomCacheMap *acmp, Eterm atom)
+{
+ if (acmp && acmp->sz < ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES) {
+ int ix;
+ ASSERT(acmp->hdr_sz < 0);
+ ix = atom2cix(atom);
+ if (acmp->cache[ix].iix < 0) {
+ acmp->cache[ix].iix = acmp->sz;
+ acmp->cix[acmp->sz++] = ix;
+ acmp->cache[ix].atom = atom;
+ }
+ }
+}
+
+static ERTS_INLINE int
+get_iix_acache_map(ErtsAtomCacheMap *acmp, Eterm atom)
+{
+ if (!acmp)
+ return -1;
+ else {
+ int ix;
+ ASSERT(is_atom(atom));
+ ix = atom2cix(atom);
+ if (acmp->cache[ix].iix < 0) {
+ ASSERT(acmp->sz == ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES);
+ return -1;
+ }
+ else {
+ ASSERT(acmp->cache[ix].iix < ERTS_ATOM_CACHE_SIZE);
+ return acmp->cache[ix].atom == atom ? acmp->cache[ix].iix : -1;
+ }
+ }
+}
+
+void
+erts_finalize_atom_cache_map(ErtsAtomCacheMap *acmp)
+{
+ if (acmp) {
+#if MAX_ATOM_LENGTH > 255
+#error "This code is not complete; long_atoms info need to be passed to the following stages."
+ int long_atoms = 0; /* !0 if one or more atoms are long than 255. */
+#endif
+ int i;
+ int sz;
+ int fix_sz
+ = 1 /* VERSION_MAGIC */
+ + 1 /* DIST_HEADER */
+ + 1 /* number of internal cache entries */
+ ;
+ int min_sz;
+ ASSERT(acmp->hdr_sz < 0);
+ /* Make sure cache update instructions fit */
+ min_sz = fix_sz+(2+4)*acmp->sz;
+ sz = fix_sz;
+ for (i = 0; i < acmp->sz; i++) {
+ Eterm atom;
+ int len;
+ atom = acmp->cache[acmp->cix[i]].atom;
+ ASSERT(is_atom(atom));
+ len = atom_tab(atom_val(atom))->len;
+#if MAX_ATOM_LENGTH > 255
+ if (!long_atoms && len > 255)
+ long_atoms = 1;
+#endif
+ /* Enough for a new atom cache value */
+ sz += 1 /* cix */ + 1 /* length */ + len /* text */;
+ }
+#if MAX_ATOM_LENGTH > 255
+ if (long_atoms)
+ sz += acmp->sz; /* we need 2 bytes per atom for length */
+#endif
+ /* Dynamically sized flag field */
+ sz += ERTS_DIST_HDR_ATOM_CACHE_FLAG_BYTES(acmp->sz);
+ if (sz < min_sz)
+ sz = min_sz;
+ acmp->hdr_sz = sz;
+ }
+}
+
+Uint
+erts_encode_ext_dist_header_size(ErtsAtomCacheMap *acmp)
+{
+ if (!acmp)
+ return 0;
+ else {
+ ASSERT(acmp->hdr_sz >= 0);
+ return acmp->hdr_sz;
+ }
+}
+
+byte *erts_encode_ext_dist_header_setup(byte *ctl_ext, ErtsAtomCacheMap *acmp)
+{
+#ifndef ARCH_32
+#if ATOM_LIMIT >= (1UL << 32)
+#error "ATOM_LIMIT too large for interal atom cache update instructions. New instructions needed."
+#endif
+#endif
+ if (!acmp)
+ return ctl_ext;
+ else {
+ int i;
+ byte *ep = ctl_ext;
+ ASSERT(acmp->hdr_sz >= 0);
+ /*
+ * Write cache update instructions. Note that this is a purely
+ * internal format, never seen on the wire. This section is later
+ * rewritten by erts_encode_ext_dist_header_finalize() while updating
+ * the cache. We write the header backwards just before the
+ * actual term(s).
+ */
+ for (i = acmp->sz-1; i >= 0; i--) {
+ Uint32 aval;
+ ASSERT(0 <= acmp->cix[i] && acmp->cix[i] < ERTS_ATOM_CACHE_SIZE);
+ ASSERT(i == acmp->cache[acmp->cix[i]].iix);
+ ASSERT(is_atom(acmp->cache[acmp->cix[i]].atom));
+
+ aval = (Uint32) atom_val(acmp->cache[acmp->cix[i]].atom);
+ ep -= 4;
+ put_int32(aval, ep);
+ ep -= 2;
+ put_int16(acmp->cix[i], ep);
+ }
+ --ep;
+ put_int8(acmp->sz, ep);
+ *--ep = DIST_HEADER;
+ *--ep = VERSION_MAGIC;
+ return ep;
+ }
+}
+
+byte *erts_encode_ext_dist_header_finalize(byte *ext, ErtsAtomCache *cache)
+{
+ byte *ip;
+ byte instr_buf[(2+4)*ERTS_ATOM_CACHE_SIZE];
+ int ci, sz;
+ register byte *ep = ext;
+ ASSERT(ep[0] == VERSION_MAGIC);
+ if (ep[1] != DIST_HEADER)
+ return ext;
+
+ /*
+ * Update output atom cache and write the external version of
+ * the dist header. We write the header backwards just
+ * before the actual term(s).
+ */
+ ep += 2;
+ ci = (int) get_int8(ep);
+ ASSERT(0 <= ci && ci < ERTS_ATOM_CACHE_SIZE);
+ ep += 1;
+ sz = (2+4)*ci;
+ ip = &instr_buf[0];
+ sys_memcpy((void *) ip, (void *) ep, sz);
+ ep += sz;
+ /* ep now points to the beginning of the control message term */
+#ifdef ERTS_DEBUG_USE_DIST_SEP
+ ASSERT(*ep == VERSION_MAGIC);
+#endif
+ if (ci > 0) {
+ Uint32 flgs_buf[((ERTS_DIST_HDR_ATOM_CACHE_FLAG_BYTES(
+ ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES)-1)
+ / sizeof(Uint32))+1];
+ register Uint32 flgs;
+ int iix, flgs_bytes, flgs_buf_ix, used_half_bytes;
+#ifdef DEBUG
+ int tot_used_half_bytes;
+#endif
+
+ flgs_bytes = ERTS_DIST_HDR_ATOM_CACHE_FLAG_BYTES(ci);
+
+ ASSERT(flgs_bytes <= sizeof(flgs_buf));
+#if MAX_ATOM_LENGTH > 255
+ /* long_atoms info needs to be passed from previous stages */
+ if (long_atoms)
+ flgs |= ERTS_DIST_HDR_LONG_ATOMS_FLG;
+#endif
+ flgs = 0;
+ flgs_buf_ix = 0;
+ if ((ci & 1) == 0)
+ used_half_bytes = 2;
+ else
+ used_half_bytes = 1;
+#ifdef DEBUG
+ tot_used_half_bytes = used_half_bytes;
+#endif
+ iix = ci-1;
+ while (iix >= 0) {
+ int cix;
+ Eterm atom;
+
+ if (used_half_bytes != 8)
+ flgs <<= 4;
+ else {
+ flgs_buf[flgs_buf_ix++] = flgs;
+ flgs = 0;
+ used_half_bytes = 0;
+ }
+
+ ip = &instr_buf[0] + (2+4)*iix;
+ cix = (int) get_int16(&ip[0]);
+ ASSERT(0 <= cix && cix < ERTS_ATOM_CACHE_SIZE);
+ atom = make_atom((Uint) get_int32(&ip[2]));
+ if (cache->out_arr[cix] == atom) {
+ --ep;
+ put_int8(cix, ep);
+ flgs |= ((cix >> 8) & 7);
+ }
+ else {
+ Atom *a;
+ cache->out_arr[cix] = atom;
+ a = atom_tab(atom_val(atom));
+ sz = a->len;
+ ep -= sz;
+ sys_memcpy((void *) ep, (void *) a->name, sz);
+#if MAX_ATOM_LENGTH > 255
+ if (long_atoms) {
+ ep -= 2;
+ put_int16(sz, ep);
+ }
+ else
+#endif
+ {
+ ASSERT(0 <= sz && sz <= 255);
+ --ep;
+ put_int8(sz, ep);
+ }
+ --ep;
+ put_int8(cix, ep);
+ flgs |= (8 | ((cix >> 8) & 7));
+ }
+ iix--;
+ used_half_bytes++;
+#ifdef DEBUG
+ tot_used_half_bytes++;
+#endif
+ }
+ ASSERT(tot_used_half_bytes == 2*flgs_bytes);
+ flgs_buf[flgs_buf_ix] = flgs;
+ flgs_buf_ix = 0;
+ while (1) {
+ flgs = flgs_buf[flgs_buf_ix];
+ if (flgs_bytes > 4) {
+ *--ep = (byte) ((flgs >> 24) & 0xff);
+ *--ep = (byte) ((flgs >> 16) & 0xff);
+ *--ep = (byte) ((flgs >> 8) & 0xff);
+ *--ep = (byte) (flgs & 0xff);
+ flgs_buf_ix++;
+ flgs_bytes -= 4;
+ }
+ else {
+ switch (flgs_bytes) {
+ case 4:
+ *--ep = (byte) ((flgs >> 24) & 0xff);
+ case 3:
+ *--ep = (byte) ((flgs >> 16) & 0xff);
+ case 2:
+ *--ep = (byte) ((flgs >> 8) & 0xff);
+ case 1:
+ *--ep = (byte) (flgs & 0xff);
+ }
+ break;
+ }
+ }
+ }
+ --ep;
+ put_int8(ci, ep);
+ *--ep = DIST_HEADER;
+ *--ep = VERSION_MAGIC;
+ return ep;
+}
+
+Uint erts_encode_dist_ext_size(Eterm term, Uint32 flags, ErtsAtomCacheMap *acmp)
+{
+ Uint sz = 0;
+#ifndef ERTS_DEBUG_USE_DIST_SEP
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+#endif
+ sz++ /* VERSION_MAGIC */;
+ sz += encode_size_struct2(acmp, term, flags);
+ return sz;
+}
+
+Uint erts_encode_ext_size(Eterm term)
+{
+ return encode_size_struct2(NULL, term, TERM_TO_BINARY_DFLAGS)
+ + 1 /* VERSION_MAGIC */;
+}
+
+void erts_encode_dist_ext(Eterm term, byte **ext, Uint32 flags, ErtsAtomCacheMap *acmp)
+{
+ byte *ep = *ext;
+#ifndef ERTS_DEBUG_USE_DIST_SEP
+ if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE))
+#endif
+ *ep++ = VERSION_MAGIC;
+ ep = enc_term(acmp, term, ep, flags);
+ if (!ep)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:erts_encode_dist_ext(): Internal data structure error\n",
+ __FILE__, __LINE__);
+ *ext = ep;
+}
+
+void erts_encode_ext(Eterm term, byte **ext)
+{
+ byte *ep = *ext;
+ *ep++ = VERSION_MAGIC;
+ ep = enc_term(NULL, term, ep, TERM_TO_BINARY_DFLAGS);
+ if (!ep)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:erts_encode_ext(): Internal data structure error\n",
+ __FILE__, __LINE__);
+ *ext = ep;
+}
+
+ErtsDistExternal *
+erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize)
+{
+ size_t align_sz;
+ size_t dist_ext_sz;
+ size_t ext_sz;
+ byte *ep;
+ ErtsDistExternal *new_edep;
+
+ dist_ext_sz = ERTS_DIST_EXT_SIZE(edep);
+ ASSERT(edep->ext_endp && edep->extp);
+ ASSERT(edep->ext_endp >= edep->extp);
+ ext_sz = edep->ext_endp - edep->extp;
+
+ align_sz = ERTS_WORD_ALIGN_PAD_SZ(dist_ext_sz + ext_sz);
+
+ new_edep = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA,
+ dist_ext_sz + ext_sz + align_sz + xsize);
+
+ ep = (byte *) new_edep;
+ sys_memcpy((void *) ep, (void *) edep, dist_ext_sz);
+ ep += dist_ext_sz;
+ if (new_edep->dep)
+ erts_refc_inc(&new_edep->dep->refc, 1);
+ new_edep->extp = ep;
+ new_edep->ext_endp = ep + ext_sz;
+ new_edep->heap_size = -1;
+ sys_memcpy((void *) ep, (void *) edep->extp, ext_sz);
+ return new_edep;
+}
+
+int
+erts_prepare_dist_ext(ErtsDistExternal *edep,
+ byte *ext,
+ Uint size,
+ DistEntry *dep,
+ ErtsAtomCache *cache)
+{
+#undef ERTS_EXT_FAIL
+#undef ERTS_EXT_HDR_FAIL
+#if 1
+#define ERTS_EXT_FAIL goto fail
+#define ERTS_EXT_HDR_FAIL goto bad_hdr
+#else
+#define ERTS_EXT_FAIL abort()
+#define ERTS_EXT_HDR_FAIL abort()
+#endif
+
+ register byte *ep = ext;
+
+ edep->heap_size = -1;
+ edep->ext_endp = ext+size;
+
+ if (size < 2)
+ ERTS_EXT_FAIL;
+
+ if (ep[0] != VERSION_MAGIC) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ if (dep)
+ erts_dsprintf(dsbufp,
+ "** Got message from incompatible erlang on "
+ "channel %d\n",
+ dist_entry_channel_no(dep));
+ else
+ erts_dsprintf(dsbufp,
+ "** Attempt to convert old incompatible "
+ "binary %d\n",
+ *ep);
+ erts_send_error_to_logger_nogl(dsbufp);
+ ERTS_EXT_FAIL;
+ }
+
+ edep->flags = 0;
+ edep->dep = dep;
+ if (dep) {
+ erts_smp_de_rlock(dep);
+ if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)
+ edep->flags |= ERTS_DIST_EXT_DFLAG_HDR;
+
+ edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK);
+ erts_smp_de_runlock(dep);
+ }
+
+ if (ep[1] != DIST_HEADER) {
+ if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR)
+ ERTS_EXT_HDR_FAIL;
+ edep->attab.size = 0;
+ edep->extp = ext;
+ }
+ else {
+ int tix;
+ int no_atoms;
+
+ if (!(edep->flags & ERTS_DIST_EXT_DFLAG_HDR))
+ ERTS_EXT_HDR_FAIL;
+
+#undef CHKSIZE
+#define CHKSIZE(SZ) \
+ do { if ((SZ) > edep->ext_endp - ep) ERTS_EXT_HDR_FAIL; } while(0)
+
+ CHKSIZE(1+1+1);
+ ep += 2;
+ no_atoms = (int) get_int8(ep);
+ if (no_atoms < 0 || ERTS_ATOM_CACHE_SIZE < no_atoms)
+ ERTS_EXT_HDR_FAIL;
+ ep++;
+ if (no_atoms) {
+#if MAX_ATOM_LENGTH > 255
+ int long_atoms = 0;
+#endif
+#ifdef DEBUG
+ byte *flgs_buf = ep;
+#endif
+ byte *flgsp = ep;
+ int flgs_size = ERTS_DIST_HDR_ATOM_CACHE_FLAG_BYTES(no_atoms);
+ int byte_ix;
+ int bit_ix;
+ int got_flgs;
+ register Uint32 flgs = 0;
+
+ CHKSIZE(flgs_size);
+ ep += flgs_size;
+
+ /*
+ * Check long atoms flag
+ */
+ byte_ix = ERTS_DIST_HDR_ATOM_CACHE_FLAG_BYTE_IX(no_atoms);
+ bit_ix = ERTS_DIST_HDR_ATOM_CACHE_FLAG_BIT_IX(no_atoms);
+ if (flgsp[byte_ix] & (((byte) ERTS_DIST_HDR_LONG_ATOMS_FLG)
+ << bit_ix)) {
+#if MAX_ATOM_LENGTH > 255
+ long_atoms = 1;
+#else
+ ERTS_EXT_HDR_FAIL; /* Long atoms not supported yet */
+#endif
+ }
+
+#ifdef DEBUG
+ byte_ix = 0;
+ bit_ix = 0;
+#endif
+ got_flgs = 0;
+ /*
+ * Setup the atom translation table.
+ */
+ edep->flags |= ERTS_DIST_EXT_ATOM_TRANS_TAB;
+ edep->attab.size = no_atoms;
+ for (tix = 0; tix < no_atoms; tix++) {
+ Eterm atom;
+ int cix;
+ int len;
+
+ if (!got_flgs) {
+ int left = no_atoms - tix;
+ if (left > 6) {
+ flgs = ((((Uint32) flgsp[3]) << 24)
+ | (((Uint32) flgsp[2]) << 16)
+ | (((Uint32) flgsp[1]) << 8)
+ | ((Uint32) flgsp[0]));
+ flgsp += 4;
+ }
+ else {
+ flgs = 0;
+ switch (left) {
+ case 6:
+ case 5:
+ flgs |= (((Uint32) flgsp[2]) << 16);
+ case 4:
+ case 3:
+ flgs |= (((Uint32) flgsp[1]) << 8);
+ case 2:
+ case 1:
+ flgs |= ((Uint32) flgsp[0]);
+ }
+ }
+ got_flgs = 8;
+ }
+
+ ASSERT(byte_ix == ERTS_DIST_HDR_ATOM_CACHE_FLAG_BYTE_IX(tix));
+ ASSERT(bit_ix == ERTS_DIST_HDR_ATOM_CACHE_FLAG_BIT_IX(tix));
+ ASSERT((flgs & 3)
+ == (((flgs_buf[byte_ix]
+ & (((byte) 3) << bit_ix)) >> bit_ix) & 3));
+
+ CHKSIZE(1);
+ cix = (int) ((flgs & 7) << 8);
+ if ((flgs & 8) == 0) {
+ /* atom already cached */
+ cix += (int) get_int8(ep);
+ if (cix >= ERTS_ATOM_CACHE_SIZE)
+ ERTS_EXT_HDR_FAIL;
+ ep++;
+ atom = cache->in_arr[cix];
+ if (!is_atom(atom))
+ ERTS_EXT_HDR_FAIL;
+ edep->attab.atom[tix] = atom;
+ }
+ else {
+ /* new cached atom */
+ cix += (int) get_int8(ep);
+ if (cix >= ERTS_ATOM_CACHE_SIZE)
+ ERTS_EXT_HDR_FAIL;
+ ep++;
+#if MAX_ATOM_LENGTH > 255
+ if (long_atoms) {
+ CHKSIZE(2);
+ len = get_int16(ep);
+ ep += 2;
+ }
+ else
+#endif
+ {
+ CHKSIZE(1);
+ len = get_int8(ep);
+ ep++;
+ }
+ if (len > MAX_ATOM_LENGTH)
+ ERTS_EXT_HDR_FAIL; /* Too long atom */
+ CHKSIZE(len);
+ atom = am_atom_put((char *) ep, len);
+ ep += len;
+ cache->in_arr[cix] = atom;
+ edep->attab.atom[tix] = atom;
+ }
+ flgs >>= 4;
+ got_flgs--;
+#ifdef DEBUG
+ bit_ix += 4;
+ if (bit_ix >= 8) {
+ bit_ix = 0;
+ flgs = (int) flgs_buf[++byte_ix];
+ ASSERT(byte_ix < flgs_size);
+ }
+#endif
+ }
+ }
+ edep->extp = ep;
+#ifdef ERTS_DEBUG_USE_DIST_SEP
+ if (*ep != VERSION_MAGIC)
+ ERTS_EXT_HDR_FAIL;
+#endif
+ }
+#ifdef ERTS_DEBUG_USE_DIST_SEP
+ if (*ep != VERSION_MAGIC)
+ ERTS_EXT_FAIL;
+#endif
+
+ return 0;
+
+#undef CHKSIZE
+#undef ERTS_EXT_FAIL
+#undef ERTS_EXT_HDR_FAIL
+
+ bad_hdr:
+ if (dep) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "%T got a corrupted distribution header from %T "
+ "on distribution channel %d\n",
+ erts_this_node->sysname,
+ edep->dep->sysname,
+ dist_entry_channel_no(edep->dep));
+ for (ep = ext; ep < edep->ext_endp; ep++)
+ erts_dsprintf(dsbufp, ep != ext ? ",%b8u" : "<<%b8u", *ep);
+ erts_dsprintf(dsbufp, ">>");
+ erts_send_warning_to_logger_nogl(dsbufp);
+ }
+ fail:
+ if (dep)
+ erts_kill_dist_connection(dep, dep->connection_id);
+ return -1;
+}
+
+static void
+bad_dist_ext(ErtsDistExternal *edep)
+{
+ if (edep->dep) {
+ DistEntry *dep = edep->dep;
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ byte *ep;
+ erts_dsprintf(dsbufp,
+ "%T got a corrupted external term from %T "
+ "on distribution channel %d\n",
+ erts_this_node->sysname,
+ dep->sysname,
+ dist_entry_channel_no(dep));
+ for (ep = edep->extp; ep < edep->ext_endp; ep++)
+ erts_dsprintf(dsbufp,
+ ep != edep->extp ? ",%b8u" : "<<...,%b8u",
+ *ep);
+ erts_dsprintf(dsbufp, ">>\n");
+ erts_dsprintf(dsbufp, "ATOM_CACHE_REF translations: ");
+ if (!(edep->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) || !edep->attab.size)
+ erts_dsprintf(dsbufp, "none");
+ else {
+ int i;
+ erts_dsprintf(dsbufp, "0=%T", edep->attab.atom[0]);
+ for (i = 1; i < edep->attab.size; i++)
+ erts_dsprintf(dsbufp, ", %d=%T", i, edep->attab.atom[i]);
+ }
+ erts_send_warning_to_logger_nogl(dsbufp);
+ erts_kill_dist_connection(dep, ERTS_DIST_EXT_CON_ID(edep));
+ }
+}
+
+Sint
+erts_decode_dist_ext_size(ErtsDistExternal *edep, int no_refc_bins)
+{
+ Sint res;
+ byte *ep;
+ if (edep->extp >= edep->ext_endp)
+ goto fail;
+#ifndef ERTS_DEBUG_USE_DIST_SEP
+ if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) {
+ if (*edep->extp == VERSION_MAGIC)
+ goto fail;
+ ep = edep->extp;
+ }
+ else
+#endif
+ {
+ if (*edep->extp != VERSION_MAGIC)
+ goto fail;
+ ep = edep->extp+1;
+ }
+ res = decoded_size(ep, edep->ext_endp, no_refc_bins);
+ if (res >= 0)
+ return res;
+ fail:
+ bad_dist_ext(edep);
+ return -1;
+}
+
+Sint erts_decode_ext_size(byte *ext, Uint size, int no_refc_bins)
+{
+ if (size == 0 || *ext != VERSION_MAGIC)
+ return -1;
+ return decoded_size(ext+1, ext+size, no_refc_bins);
+}
+
+/*
+** hpp is set to either a &p->htop or
+** a pointer to a memory pointer (form message buffers)
+** on return hpp is updated to point after allocated data
+*/
+Eterm
+erts_decode_dist_ext(Eterm** hpp,
+ ErlOffHeap* off_heap,
+ ErtsDistExternal *edep)
+{
+ Eterm obj;
+ byte* ep = edep->extp;
+
+ if (ep >= edep->ext_endp)
+ goto error;
+#ifndef ERTS_DEBUG_USE_DIST_SEP
+ if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) {
+ if (*ep == VERSION_MAGIC)
+ goto error;
+ }
+ else
+#endif
+ {
+ if (*ep != VERSION_MAGIC)
+ goto error;
+ ep++;
+ }
+ ep = dec_term(edep, hpp, ep, off_heap, &obj);
+ if (!ep)
+ goto error;
+
+ edep->extp = ep;
+
+ return obj;
+
+ error:
+
+ bad_dist_ext(edep);
+
+ return THE_NON_VALUE;
+}
+
+Eterm erts_decode_ext(Eterm **hpp, ErlOffHeap *off_heap, byte **ext)
+{
+ Eterm obj;
+ byte *ep = *ext;
+ if (*ep++ != VERSION_MAGIC)
+ return THE_NON_VALUE;
+ ep = dec_term(NULL, hpp, ep, off_heap, &obj);
+ if (!ep) {
+#ifdef DEBUG
+ bin_write(ERTS_PRINT_STDERR,NULL,*ext,500);
+#endif
+ return THE_NON_VALUE;
+ }
+ *ext = ep;
+ return obj;
+}
+
+
+
+/**********************************************************************/
+
+BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
+{
+ Eterm res;
+ Eterm *hp;
+ Eterm *hendp;
+ Uint hsz;
+ ErtsDistExternal ede;
+ Eterm *tp;
+ Eterm real_bin;
+ Uint offset;
+ Uint size;
+ Uint bitsize;
+ Uint bitoffs;
+ Uint arity;
+ int i;
+
+ ede.flags = ERTS_DIST_EXT_ATOM_TRANS_TAB;
+ ede.dep = NULL;
+ ede.heap_size = -1;
+
+ if (is_not_tuple(BIF_ARG_1))
+ goto badarg;
+ tp = tuple_val(BIF_ARG_1);
+ arity = arityval(tp[0]);
+ if (arity > ERTS_MAX_INTERNAL_ATOM_CACHE_ENTRIES)
+ goto badarg;
+
+ ede.attab.size = arity;
+ for (i = 1; i <= arity; i++) {
+ if (is_not_atom(tp[i]))
+ goto badarg;
+ ede.attab.atom[i-1] = tp[i];
+ }
+
+ if (is_not_binary(BIF_ARG_2))
+ goto badarg;
+
+ size = binary_size(BIF_ARG_2);
+ if (size == 0)
+ goto badarg;
+ ERTS_GET_REAL_BIN(BIF_ARG_2, real_bin, offset, bitoffs, bitsize);
+ if (bitsize != 0)
+ goto badarg;
+
+ ede.extp = binary_bytes(real_bin)+offset;
+ ede.ext_endp = ede.extp + size;
+
+ hsz = erts_decode_dist_ext_size(&ede, 0);
+ if (hsz < 0)
+ goto badarg;
+
+ hp = HAlloc(BIF_P, hsz);
+ hendp = hp + hsz;
+
+ res = erts_decode_dist_ext(&hp, &MSO(BIF_P), &ede);
+
+ HRelease(BIF_P, hendp, hp);
+
+ if (is_value(res))
+ BIF_RET(res);
+
+ badarg:
+
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
+Eterm
+term_to_binary_1(Process* p, Eterm Term)
+{
+ return erts_term_to_binary(p, Term, 0, TERM_TO_BINARY_DFLAGS);
+}
+
+Eterm
+term_to_binary_2(Process* p, Eterm Term, Eterm Flags)
+{
+ int level = 0;
+ Uint flags = TERM_TO_BINARY_DFLAGS;
+
+ while (is_list(Flags)) {
+ Eterm arg = CAR(list_val(Flags));
+ Eterm* tp;
+ if (arg == am_compressed) {
+ level = Z_DEFAULT_COMPRESSION;
+ } else if (is_tuple(arg) && *(tp = tuple_val(arg)) == make_arityval(2)) {
+ if (tp[1] == am_minor_version && is_small(tp[2])) {
+ switch (signed_val(tp[2])) {
+ case 0:
+ flags = TERM_TO_BINARY_DFLAGS;
+ break;
+ case 1:
+ flags = TERM_TO_BINARY_DFLAGS|DFLAG_NEW_FLOATS;
+ break;
+ default:
+ goto error;
+ }
+ } else if (tp[1] == am_compressed && is_small(tp[2])) {
+ level = signed_val(tp[2]);
+ if (!(0 <= level && level < 10)) {
+ goto error;
+ }
+ } else {
+ goto error;
+ }
+ } else {
+ error:
+ BIF_ERROR(p, BADARG);
+ }
+ Flags = CDR(list_val(Flags));
+ }
+ if (is_not_nil(Flags)) {
+ goto error;
+ }
+
+ return erts_term_to_binary(p, Term, level, flags);
+}
+
+static ERTS_INLINE Sint
+binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size)
+{
+ Sint res;
+ byte *bytes = data;
+ Sint size = data_size;
+
+ state->exttmp = 0;
+
+ if (size < 1 || *bytes != VERSION_MAGIC) {
+ error:
+ if (state->exttmp)
+ erts_free(ERTS_ALC_T_TMP, state->extp);
+ state->extp = NULL;
+ state->exttmp = 0;
+ return -1;
+ }
+ bytes++;
+ size--;
+ if (size < 5 || *bytes != COMPRESSED) {
+ state->extp = bytes;
+ }
+ else {
+ uLongf dest_len = get_int32(bytes+1);
+ state->extp = erts_alloc(ERTS_ALC_T_TMP, dest_len);
+ state->exttmp = 1;
+ if (erl_zlib_uncompress(state->extp, &dest_len, bytes+5, size-5) != Z_OK)
+ goto error;
+ size = (Sint) dest_len;
+ }
+ res = decoded_size(state->extp, state->extp + size, 0);
+ if (res < 0)
+ goto error;
+ return res;
+}
+
+static ERTS_INLINE void
+binary2term_abort(ErtsBinary2TermState *state)
+{
+ if (state->exttmp) {
+ state->exttmp = 0;
+ erts_free(ERTS_ALC_T_TMP, state->extp);
+ }
+}
+
+static ERTS_INLINE Eterm
+binary2term_create(ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *ohp)
+{
+ Eterm res;
+ if (!dec_term(NULL, hpp, state->extp, ohp, &res))
+ res = THE_NON_VALUE;
+ if (state->exttmp) {
+ state->exttmp = 0;
+ erts_free(ERTS_ALC_T_TMP, state->extp);
+ }
+ return res;
+}
+
+Sint
+erts_binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size)
+{
+ return binary2term_prepare(state, data, data_size);
+}
+
+void
+erts_binary2term_abort(ErtsBinary2TermState *state)
+{
+ binary2term_abort(state);
+}
+
+Eterm
+erts_binary2term_create(ErtsBinary2TermState *state, Eterm **hpp, ErlOffHeap *ohp)
+{
+ return binary2term_create(state, hpp, ohp);
+}
+
+BIF_RETTYPE binary_to_term_1(BIF_ALIST_1)
+{
+ Sint heap_size;
+ Eterm res;
+ Eterm* hp;
+ Eterm* endp;
+ Sint size;
+ byte* bytes;
+ byte* temp_alloc = NULL;
+ ErtsBinary2TermState b2ts;
+
+ if ((bytes = erts_get_aligned_binary_bytes(BIF_ARG_1, &temp_alloc)) == NULL) {
+ error:
+ erts_free_aligned_binary_bytes(temp_alloc);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ size = binary_size(BIF_ARG_1);
+
+ heap_size = binary2term_prepare(&b2ts, bytes, size);
+ if (heap_size < 0)
+ goto error;
+
+ hp = HAlloc(BIF_P, heap_size);
+ endp = hp + heap_size;
+
+ res = binary2term_create(&b2ts, &hp, &MSO(BIF_P));
+
+ erts_free_aligned_binary_bytes(temp_alloc);
+
+ if (hp > endp) {
+ erl_exit(1, ":%s, line %d: heap overrun by %d words(s)\n",
+ __FILE__, __LINE__, hp-endp);
+ }
+
+ HRelease(BIF_P, endp, hp);
+
+ if (res == THE_NON_VALUE)
+ goto error;
+
+ return res;
+}
+
+Eterm
+external_size_1(Process* p, Eterm Term)
+{
+ Uint size = erts_encode_ext_size(Term);
+ if (IS_USMALL(0, size)) {
+ BIF_RET(make_small(size));
+ } else {
+ Eterm* hp = HAlloc(p, BIG_UINT_HEAP_SIZE);
+ BIF_RET(uint_to_big(size, hp));
+ }
+}
+
+Eterm
+erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags)
+{
+ int size;
+ Eterm bin;
+ size_t real_size;
+ byte* endp;
+
+ size = encode_size_struct2(NULL, Term, flags) + 1 /* VERSION_MAGIC */;
+
+ if (level != 0) {
+ byte buf[256];
+ byte* bytes = buf;
+ byte* out_bytes;
+ uLongf dest_len;
+
+ if (sizeof(buf) < size) {
+ bytes = erts_alloc(ERTS_ALC_T_TMP, size);
+ }
+
+ if ((endp = enc_term(NULL, Term, bytes, flags))
+ == NULL) {
+ erl_exit(1, "%s, line %d: bad term: %x\n",
+ __FILE__, __LINE__, Term);
+ }
+ real_size = endp - bytes;
+ if (real_size > size) {
+ erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
+ __FILE__, __LINE__, real_size - size);
+ }
+
+ /*
+ * We don't want to compress if compression actually increases the size.
+ * Therefore, don't give zlib more out buffer than the size of the
+ * uncompressed external format (minus the 5 bytes needed for the
+ * COMPRESSED tag). If zlib returns any error, we'll revert to using
+ * the original uncompressed external term format.
+ */
+
+ if (real_size < 5) {
+ dest_len = 0;
+ } else {
+ dest_len = real_size - 5;
+ }
+ bin = new_binary(p, NULL, real_size+1);
+ out_bytes = binary_bytes(bin);
+ out_bytes[0] = VERSION_MAGIC;
+ if (erl_zlib_compress2(out_bytes+6, &dest_len, bytes, real_size, level) != Z_OK) {
+ sys_memcpy(out_bytes+1, bytes, real_size);
+ bin = erts_realloc_binary(bin, real_size+1);
+ } else {
+ out_bytes[1] = COMPRESSED;
+ put_int32(real_size, out_bytes+2);
+ bin = erts_realloc_binary(bin, dest_len+6);
+ }
+ if (bytes != buf) {
+ erts_free(ERTS_ALC_T_TMP, bytes);
+ }
+ return bin;
+ } else {
+ byte* bytes;
+
+ bin = new_binary(p, (byte *)NULL, size);
+ bytes = binary_bytes(bin);
+ bytes[0] = VERSION_MAGIC;
+ if ((endp = enc_term(NULL, Term, bytes+1, flags))
+ == NULL) {
+ erl_exit(1, "%s, line %d: bad term: %x\n",
+ __FILE__, __LINE__, Term);
+ }
+ real_size = endp - bytes;
+ if (real_size > size) {
+ erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
+ __FILE__, __LINE__, endp - (bytes + size));
+ }
+ return erts_realloc_binary(bin, real_size);
+ }
+}
+
+/*
+ * This function fills ext with the external format of atom.
+ * If it's an old atom we just supply an index, otherwise
+ * we insert the index _and_ the entire atom. This way the receiving side
+ * does not have to perform an hash on the etom to locate it, and
+ * we save a lot of space on the wire.
+ */
+
+static byte*
+enc_atom(ErtsAtomCacheMap *acmp, Eterm atom, byte *ep, Uint32 dflags)
+{
+ int iix;
+ int i, j;
+
+ ASSERT(is_atom(atom));
+
+ /*
+ * term_to_binary/1,2 and the initial distribution message
+ * don't use the cache.
+ */
+ iix = get_iix_acache_map(acmp, atom);
+ if (iix < 0) {
+ i = atom_val(atom);
+ j = atom_tab(i)->len;
+ if ((MAX_ATOM_LENGTH <= 255 || j <= 255)
+ && (dflags & DFLAG_SMALL_ATOM_TAGS)) {
+ *ep++ = SMALL_ATOM_EXT;
+ put_int8(j, ep);
+ ep++;
+ }
+ else {
+ *ep++ = ATOM_EXT;
+ put_int16(j, ep);
+ ep += 2;
+ }
+ sys_memcpy((char *) ep, (char*)atom_tab(i)->name, (int) j);
+ ep += j;
+ return ep;
+ }
+
+ /* The atom is referenced in the cache. */
+ *ep++ = ATOM_CACHE_REF;
+ put_int8(iix, ep);
+ ep++;
+ return ep;
+}
+
+static byte*
+enc_pid(ErtsAtomCacheMap *acmp, Eterm pid, byte* ep, Uint32 dflags)
+{
+ Uint on, os;
+
+ *ep++ = PID_EXT;
+ /* insert atom here containing host and sysname */
+ ep = enc_atom(acmp, pid_node_name(pid), ep, dflags);
+
+ /* two bytes for each number and serial */
+
+ on = pid_number(pid);
+ os = pid_serial(pid);
+
+ put_int32(on, ep);
+ ep += 4;
+ put_int32(os, ep);
+ ep += 4;
+ *ep++ = pid_creation(pid);
+ return ep;
+}
+
+/* Expect an atom in plain text or cached */
+static byte*
+dec_atom(ErtsDistExternal *edep, byte* ep, Eterm* objp)
+{
+ Uint len;
+ int n;
+
+ switch (*ep++) {
+ case ATOM_CACHE_REF:
+ if (!(edep->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB))
+ goto error;
+ n = get_int8(ep);
+ ep++;
+ if (n >= edep->attab.size)
+ goto error;
+ ASSERT(is_atom(edep->attab.atom[n]));
+ *objp = edep->attab.atom[n];
+ break;
+ case ATOM_EXT:
+ len = get_int16(ep),
+ ep += 2;
+ *objp = am_atom_put((char*)ep, len);
+ ep += len;
+ break;
+ case SMALL_ATOM_EXT:
+ len = get_int8(ep);
+ ep++;
+ *objp = am_atom_put((char*)ep, len);
+ ep += len;
+ break;
+ default:
+ error:
+ *objp = NIL; /* Don't leave a hole in the heap */
+ return NULL;
+ }
+ return ep;
+}
+
+static byte*
+dec_pid(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Eterm* objp)
+{
+ Eterm sysname;
+ Uint data;
+ Uint num;
+ Uint ser;
+ Uint cre;
+ ErlNode *node;
+
+ *objp = NIL; /* In case we fail, don't leave a hole in the heap */
+
+ /* eat first atom */
+ if ((ep = dec_atom(edep, ep, &sysname)) == NULL)
+ return NULL;
+ num = get_int32(ep);
+ ep += 4;
+ if (num > ERTS_MAX_PID_NUMBER)
+ return NULL;
+ ser = get_int32(ep);
+ ep += 4;
+ if (ser > ERTS_MAX_PID_SERIAL)
+ return NULL;
+ if ((cre = get_int8(ep)) >= MAX_CREATION)
+ return NULL;
+ ep += 1;
+
+ /*
+ * We are careful to create the node entry only after all
+ * validity tests are done.
+ */
+ cre = dec_set_creation(sysname,cre);
+ node = erts_find_or_insert_node(sysname,cre);
+
+ data = make_pid_data(ser, num);
+ if(node == erts_this_node) {
+ *objp = make_internal_pid(data);
+ } else {
+ ExternalThing *etp = (ExternalThing *) *hpp;
+ *hpp += EXTERNAL_THING_HEAD_SIZE + 1;
+
+ etp->header = make_external_pid_header(1);
+ etp->next = off_heap->externals;
+ etp->node = node;
+ etp->data.ui[0] = data;
+
+ off_heap->externals = etp;
+ *objp = make_external_pid(etp);
+ }
+ return ep;
+}
+
+
+#define ENC_TERM ((Eterm) 0)
+#define ENC_ONE_CONS ((Eterm) 1)
+#define ENC_PATCH_FUN_SIZE ((Eterm) 2)
+#define ENC_LAST_ARRAY_ELEMENT ((Eterm) 3)
+
+static byte*
+enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags)
+{
+ DECLARE_ESTACK(s);
+ Uint n;
+ Uint i;
+ Uint j;
+ Uint* ptr;
+ Eterm val;
+ FloatDef f;
+
+ goto L_jump_start;
+
+ outer_loop:
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ switch (val = ESTACK_POP(s)) {
+ case ENC_TERM:
+ break;
+ case ENC_ONE_CONS:
+ encode_one_cons:
+ {
+ Eterm* cons = list_val(obj);
+ Eterm tl;
+
+ obj = CAR(cons);
+ tl = CDR(cons);
+ ESTACK_PUSH(s, is_list(tl) ? ENC_ONE_CONS : ENC_TERM);
+ ESTACK_PUSH(s, tl);
+ }
+ break;
+ case ENC_PATCH_FUN_SIZE:
+ {
+ byte* size_p = (byte *) obj;
+
+ put_int32(ep - size_p, size_p);
+ }
+ goto outer_loop;
+ case ENC_LAST_ARRAY_ELEMENT:
+ {
+ Eterm* ptr = (Eterm *) obj;
+ obj = *ptr;
+ }
+ break;
+ default: /* ENC_LAST_ARRAY_ELEMENT+1 and upwards */
+ {
+ Eterm* ptr = (Eterm *) obj;
+ obj = *ptr++;
+ ESTACK_PUSH(s, val-1);
+ ESTACK_PUSH(s, (Eterm) ptr);
+ }
+ break;
+ }
+
+ L_jump_start:
+ switch(tag_val_def(obj)) {
+ case NIL_DEF:
+ *ep++ = NIL_EXT;
+ break;
+
+ case ATOM_DEF:
+ ep = enc_atom(acmp,obj,ep,dflags);
+ break;
+
+ case SMALL_DEF:
+ {
+ Sint val = signed_val(obj);
+
+ if ((Uint)val < 256) {
+ *ep++ = SMALL_INTEGER_EXT;
+ put_int8(val, ep);
+ ep++;
+ } else if (sizeof(Sint) == 4 || IS_SSMALL28(val)) {
+ *ep++ = INTEGER_EXT;
+ put_int32(val, ep);
+ ep += 4;
+ } else {
+ Eterm tmp_big[2];
+ Eterm big = small_to_big(val, tmp_big);
+ *ep++ = SMALL_BIG_EXT;
+ n = big_bytes(big);
+ ASSERT(n < 256);
+ put_int8(n, ep);
+ ep += 1;
+ *ep++ = big_sign(big);
+ ep = big_to_bytes(big, ep);
+ }
+ }
+ break;
+
+ case BIG_DEF:
+ if ((n = big_bytes(obj)) < 256) {
+ *ep++ = SMALL_BIG_EXT;
+ put_int8(n, ep);
+ ep += 1;
+ }
+ else {
+ *ep++ = LARGE_BIG_EXT;
+ put_int32(n, ep);
+ ep += 4;
+ }
+ *ep++ = big_sign(obj);
+ ep = big_to_bytes(obj, ep);
+ break;
+
+ case PID_DEF:
+ case EXTERNAL_PID_DEF:
+ ep = enc_pid(acmp, obj, ep, dflags);
+ break;
+
+ case REF_DEF:
+ case EXTERNAL_REF_DEF: {
+ Uint32 *ref_num;
+
+ ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
+ *ep++ = NEW_REFERENCE_EXT;
+ i = ref_no_of_numbers(obj);
+ put_int16(i, ep);
+ ep += 2;
+ ep = enc_atom(acmp,ref_node_name(obj),ep,dflags);
+ *ep++ = ref_creation(obj);
+ ref_num = ref_numbers(obj);
+ for (j = 0; j < i; j++) {
+ put_int32(ref_num[j], ep);
+ ep += 4;
+ }
+ break;
+ }
+ case PORT_DEF:
+ case EXTERNAL_PORT_DEF:
+
+ *ep++ = PORT_EXT;
+ ep = enc_atom(acmp,port_node_name(obj),ep,dflags);
+ j = port_number(obj);
+ put_int32(j, ep);
+ ep += 4;
+ *ep++ = port_creation(obj);
+ break;
+
+ case LIST_DEF:
+ {
+ int is_str;
+
+ i = is_external_string(obj, &is_str);
+ if (is_str) {
+ *ep++ = STRING_EXT;
+ put_int16(i, ep);
+ ep += 2;
+ while (is_list(obj)) {
+ Eterm* cons = list_val(obj);
+ *ep++ = unsigned_val(CAR(cons));
+ obj = CDR(cons);
+ }
+ } else {
+ *ep++ = LIST_EXT;
+ put_int32(i, ep);
+ ep += 4;
+ goto encode_one_cons;
+ }
+ }
+ break;
+
+ case TUPLE_DEF:
+ ptr = tuple_val(obj);
+ i = arityval(*ptr);
+ ptr++;
+ if (i <= 0xff) {
+ *ep++ = SMALL_TUPLE_EXT;
+ put_int8(i, ep);
+ ep += 1;
+ } else {
+ *ep++ = LARGE_TUPLE_EXT;
+ put_int32(i, ep);
+ ep += 4;
+ }
+ if (i > 0) {
+ ESTACK_PUSH(s, ENC_LAST_ARRAY_ELEMENT+i-1);
+ ESTACK_PUSH(s, (Eterm) ptr);
+ }
+ break;
+
+ case FLOAT_DEF:
+ GET_DOUBLE(obj, f);
+ if (dflags & DFLAG_NEW_FLOATS) {
+ *ep++ = NEW_FLOAT_EXT;
+#ifdef WORDS_BIGENDIAN
+ put_int32(f.fw[0], ep);
+ ep += 4;
+ put_int32(f.fw[1], ep);
+#else
+ put_int32(f.fw[1], ep);
+ ep += 4;
+ put_int32(f.fw[0], ep);
+#endif
+ ep += 4;
+ } else {
+ *ep++ = FLOAT_EXT;
+
+ /* now the sprintf which does the work */
+ i = sys_double_to_chars(f.fd, (char*) ep);
+
+ /* Don't leave garbage after the float! (Bad practice in general,
+ * and Purify complains.)
+ */
+ sys_memset(ep+i, 0, 31-i);
+ ep += 31;
+ }
+ break;
+
+ case BINARY_DEF:
+ {
+ Uint bitoffs;
+ Uint bitsize;
+ byte* bytes;
+
+ ERTS_GET_BINARY_BYTES(obj, bytes, bitoffs, bitsize);
+ if (bitsize == 0) {
+ /* Plain old byte-sized binary. */
+ *ep++ = BINARY_EXT;
+ j = binary_size(obj);
+ put_int32(j, ep);
+ ep += 4;
+ copy_binary_to_buffer(ep, 0, bytes, bitoffs, 8*j);
+ ep += j;
+ } else if (dflags & DFLAG_BIT_BINARIES) {
+ /* Bit-level binary. */
+ *ep++ = BIT_BINARY_EXT;
+ j = binary_size(obj);
+ put_int32((j+1), ep);
+ ep += 4;
+ *ep++ = bitsize;
+ ep[j] = 0; /* Zero unused bits at end of binary */
+ copy_binary_to_buffer(ep, 0, bytes, bitoffs, 8*j+bitsize);
+ ep += j + 1;
+ } else {
+ /*
+ * Bit-level binary, but the receiver doesn't support it.
+ * Build a tuple instead.
+ */
+ *ep++ = SMALL_TUPLE_EXT;
+ *ep++ = 2;
+ *ep++ = BINARY_EXT;
+ j = binary_size(obj);
+ put_int32((j+1), ep);
+ ep += 4;
+ ep[j] = 0; /* Zero unused bits at end of binary */
+ copy_binary_to_buffer(ep, 0, bytes, bitoffs, 8*j+bitsize);
+ ep += j+1;
+ *ep++ = SMALL_INTEGER_EXT;
+ *ep++ = bitsize;
+ }
+ break;
+ }
+ case EXPORT_DEF:
+ {
+ Export* exp = (Export *) (export_val(obj))[1];
+ if ((dflags & DFLAG_EXPORT_PTR_TAG) != 0) {
+ *ep++ = EXPORT_EXT;
+ ep = enc_atom(acmp, exp->code[0], ep, dflags);
+ ep = enc_atom(acmp, exp->code[1], ep, dflags);
+ ep = enc_term(acmp, make_small(exp->code[2]), ep, dflags);
+ } else {
+ /* Tag, arity */
+ *ep++ = SMALL_TUPLE_EXT;
+ put_int8(2, ep);
+ ep += 1;
+
+ /* Module name */
+ ep = enc_atom(acmp, exp->code[0], ep, dflags);
+
+ /* Function name */
+ ep = enc_atom(acmp, exp->code[1], ep, dflags);
+ }
+ break;
+ }
+ break;
+ case FUN_DEF:
+ {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(obj);
+
+ if ((dflags & DFLAG_NEW_FUN_TAGS) != 0) {
+ int ei;
+
+ *ep++ = NEW_FUN_EXT;
+ ESTACK_PUSH(s, ENC_PATCH_FUN_SIZE);
+ ESTACK_PUSH(s, (Eterm) ep); /* Position for patching in size */
+ ep += 4;
+ *ep = funp->arity;
+ ep += 1;
+ sys_memcpy(ep, funp->fe->uniq, 16);
+ ep += 16;
+ put_int32(funp->fe->index, ep);
+ ep += 4;
+ put_int32(funp->num_free, ep);
+ ep += 4;
+ ep = enc_atom(acmp, funp->fe->module, ep, dflags);
+ ep = enc_term(acmp, make_small(funp->fe->old_index), ep, dflags);
+ ep = enc_term(acmp, make_small(funp->fe->old_uniq), ep, dflags);
+ ep = enc_pid(acmp, funp->creator, ep, dflags);
+
+ fun_env:
+ for (ei = funp->num_free-1; ei > 0; ei--) {
+ ESTACK_PUSH(s, ENC_TERM);
+ ESTACK_PUSH(s, funp->env[ei]);
+ }
+ if (funp->num_free != 0) {
+ obj = funp->env[0];
+ goto L_jump_start;
+ }
+ } else {
+ /*
+ * Communicating with an obsolete erl_interface or
+ * jinterface node. Convert the fun to a tuple to
+ * avoid crasching.
+ */
+
+ /* Tag, arity */
+ *ep++ = SMALL_TUPLE_EXT;
+ put_int8(5, ep);
+ ep += 1;
+
+ /* 'fun' */
+ ep = enc_atom(acmp, am_fun, ep, dflags);
+
+ /* Module name */
+ ep = enc_atom(acmp, funp->fe->module, ep, dflags);
+
+ /* Index, Uniq */
+ *ep++ = INTEGER_EXT;
+ put_int32(funp->fe->old_index, ep);
+ ep += 4;
+ *ep++ = INTEGER_EXT;
+ put_int32(funp->fe->old_uniq, ep);
+ ep += 4;
+
+ /* Environment sub-tuple arity */
+ ASSERT(funp->num_free < MAX_ARG);
+ *ep++ = SMALL_TUPLE_EXT;
+ put_int8(funp->num_free, ep);
+ ep += 1;
+ goto fun_env;
+ }
+ }
+ break;
+ }
+ }
+ DESTROY_ESTACK(s);
+ return ep;
+}
+
+static Uint
+is_external_string(Eterm list, int* p_is_string)
+{
+ Uint len = 0;
+
+ /*
+ * Calculate the length of the list as long as all characters
+ * are integers from 0 through 255.
+ */
+ while (is_list(list)) {
+ Eterm* consp = list_val(list);
+ Eterm hd = CAR(consp);
+
+ if (!is_byte(hd)) {
+ break;
+ }
+ len++;
+ list = CDR(consp);
+ }
+
+ /*
+ * If we have reached the end of the list, and we have
+ * not exceeded the maximum length of a string, this
+ * is a string.
+ */
+ *p_is_string = is_nil(list) && len < MAX_STRING_LEN;
+
+ /*
+ * Continue to calculate the length.
+ */
+ while (is_list(list)) {
+ Eterm* consp = list_val(list);
+ len++;
+ list = CDR(consp);
+ }
+ return len;
+}
+
+static byte*
+dec_term(ErtsDistExternal *edep, Eterm** hpp, byte* ep, ErlOffHeap* off_heap, Eterm* objp)
+{
+ int n;
+ register Eterm* hp = *hpp; /* Please don't take the address of hp */
+ Eterm* next = objp;
+
+ *next = (Eterm) NULL;
+
+ while (next != NULL) {
+ objp = next;
+ next = (Eterm *) (*objp);
+
+ switch (*ep++) {
+ case INTEGER_EXT:
+ {
+ Sint sn = get_int32(ep);
+
+ ep += 4;
+#if defined(ARCH_64)
+ *objp = make_small(sn);
+#else
+ if (MY_IS_SSMALL(sn)) {
+ *objp = make_small(sn);
+ } else {
+ *objp = small_to_big(sn, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+#endif
+ break;
+ }
+ case SMALL_INTEGER_EXT:
+ n = get_int8(ep);
+ ep++;
+ *objp = make_small(n);
+ break;
+ case SMALL_BIG_EXT:
+ n = get_int8(ep);
+ ep++;
+ goto big_loop;
+ case LARGE_BIG_EXT:
+ n = get_int32(ep);
+ ep += 4;
+ big_loop:
+ {
+ Eterm big;
+ byte* first;
+ byte* last;
+ Uint neg;
+
+ neg = get_int8(ep); /* Sign bit */
+ ep++;
+
+ /*
+ * Strip away leading zeroes to avoid creating illegal bignums.
+ */
+ first = ep;
+ last = ep + n;
+ ep += n;
+ do {
+ --last;
+ } while (first <= last && *last == 0);
+
+ if ((n = last - first + 1) == 0) {
+ /* Zero width bignum defaults to zero */
+ big = make_small(0);
+ } else {
+ big = bytes_to_big(first, n, neg, hp);
+ if (is_big(big)) {
+ hp += big_arity(big) + 1;
+ }
+ }
+ *objp = big;
+ break;
+ }
+ case ATOM_CACHE_REF:
+ if (edep == 0 || (edep->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) == 0) {
+ goto error;
+ }
+ n = get_int8(ep);
+ ep++;
+ if (n >= edep->attab.size)
+ goto error;
+ ASSERT(is_atom(edep->attab.atom[n]));
+ *objp = edep->attab.atom[n];
+ break;
+ case ATOM_EXT:
+ n = get_int16(ep);
+ ep += 2;
+ *objp = am_atom_put((char*)ep, n);
+ ep += n;
+ break;
+ case SMALL_ATOM_EXT:
+ n = get_int8(ep);
+ ep++;
+ *objp = am_atom_put((char*)ep, n);
+ ep += n;
+ break;
+ case LARGE_TUPLE_EXT:
+ n = get_int32(ep);
+ ep += 4;
+ goto tuple_loop;
+ case SMALL_TUPLE_EXT:
+ n = get_int8(ep);
+ ep++;
+ tuple_loop:
+ *objp = make_tuple(hp);
+ *hp++ = make_arityval(n);
+ hp += n;
+ objp = hp - 1;
+ while (n-- > 0) {
+ objp[0] = (Eterm) next;
+ next = objp;
+ objp--;
+ }
+ break;
+ case NIL_EXT:
+ *objp = NIL;
+ break;
+ case LIST_EXT:
+ n = get_int32(ep);
+ ep += 4;
+ if (n == 0) {
+ next = objp;
+ break;
+ }
+ *objp = make_list(hp);
+ hp += 2*n;
+ objp = hp - 2;
+ objp[0] = (Eterm) (objp+1);
+ objp[1] = (Eterm) next;
+ next = objp;
+ objp -= 2;
+ while (--n > 0) {
+ objp[0] = (Eterm) next;
+ objp[1] = make_list(objp + 2);
+ next = objp;
+ objp -= 2;
+ }
+ break;
+ case STRING_EXT:
+ n = get_int16(ep);
+ ep += 2;
+ if (n == 0) {
+ *objp = NIL;
+ break;
+ }
+ *objp = make_list(hp);
+ while (n-- > 0) {
+ hp[0] = make_small(*ep++);
+ hp[1] = make_list(hp+2);
+ hp += 2;
+ }
+ hp[-1] = NIL;
+ break;
+ case FLOAT_EXT:
+ {
+ FloatDef ff;
+
+ if (sys_chars_to_double((char*)ep, &ff.fd) != 0) {
+ goto error;
+ }
+ ep += 31;
+ *objp = make_float(hp);
+ PUT_DOUBLE(ff, hp);
+ hp += FLOAT_SIZE_OBJECT;
+ break;
+ }
+ case NEW_FLOAT_EXT:
+ {
+ FloatDef ff;
+#ifndef NO_FPE_SIGNALS
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+#endif
+
+#ifdef WORDS_BIGENDIAN
+ ff.fw[0] = get_int32(ep);
+ ep += 4;
+ ff.fw[1] = get_int32(ep);
+ ep += 4;
+#else
+ ff.fw[1] = get_int32(ep);
+ ep += 4;
+ ff.fw[0] = get_int32(ep);
+ ep += 4;
+#endif
+ __ERTS_FP_CHECK_INIT(fpexnp);
+ __ERTS_FP_ERROR_THOROUGH(fpexnp, ff.fd, goto error);
+ *objp = make_float(hp);
+ PUT_DOUBLE(ff, hp);
+ hp += FLOAT_SIZE_OBJECT;
+ break;
+ }
+ case PID_EXT:
+ *hpp = hp;
+ ep = dec_pid(edep, hpp, ep, off_heap, objp);
+ hp = *hpp;
+ if (ep == NULL) {
+ return NULL;
+ }
+ break;
+ case PORT_EXT:
+ {
+ Eterm sysname;
+ ErlNode *node;
+ Uint num;
+ Uint cre;
+
+ if ((ep = dec_atom(edep, ep, &sysname)) == NULL) {
+ goto error;
+ }
+ if ((num = get_int32(ep)) > ERTS_MAX_PORT_NUMBER) {
+ goto error;
+ }
+ ep += 4;
+ if ((cre = get_int8(ep)) >= MAX_CREATION) {
+ goto error;
+ }
+ ep++;
+ cre = dec_set_creation(sysname,cre);
+ node = erts_find_or_insert_node(sysname, cre);
+
+ if(node == erts_this_node) {
+ *objp = make_internal_port(num);
+ }
+ else {
+ ExternalThing *etp = (ExternalThing *) hp;
+ hp += EXTERNAL_THING_HEAD_SIZE + 1;
+
+ etp->header = make_external_port_header(1);
+ etp->next = off_heap->externals;
+ etp->node = node;
+ etp->data.ui[0] = num;
+
+ off_heap->externals = etp;
+ *objp = make_external_port(etp);
+ }
+
+ break;
+ }
+ case REFERENCE_EXT:
+ {
+ Eterm sysname;
+ ErlNode *node;
+ int i;
+ Uint cre;
+ Uint32 *ref_num;
+ Uint32 r0;
+ Uint ref_words;
+
+ ref_words = 1;
+
+ if ((ep = dec_atom(edep, ep, &sysname)) == NULL)
+ goto error;
+ if ((r0 = get_int32(ep)) >= MAX_REFERENCE )
+ goto error;
+ ep += 4;
+
+ if ((cre = get_int8(ep)) >= MAX_CREATION)
+ goto error;
+ ep += 1;
+ goto ref_ext_common;
+
+ case NEW_REFERENCE_EXT:
+
+ ref_words = get_int16(ep);
+ ep += 2;
+
+ if (ref_words > ERTS_MAX_REF_NUMBERS)
+ goto error;
+
+ if ((ep = dec_atom(edep, ep, &sysname)) == NULL)
+ goto error;
+
+ if ((cre = get_int8(ep)) >= MAX_CREATION)
+ goto error;
+ ep += 1;
+
+ r0 = get_int32(ep);
+ ep += 4;
+ if (r0 >= MAX_REFERENCE)
+ goto error;
+
+ ref_ext_common:
+
+ cre = dec_set_creation(sysname, cre);
+ node = erts_find_or_insert_node(sysname, cre);
+ if(node == erts_this_node) {
+ RefThing *rtp = (RefThing *) hp;
+ hp += REF_THING_HEAD_SIZE;
+#ifdef ARCH_64
+ rtp->header = make_ref_thing_header(ref_words/2 + 1);
+#else
+ rtp->header = make_ref_thing_header(ref_words);
+#endif
+ *objp = make_internal_ref(rtp);
+ }
+ else {
+ ExternalThing *etp = (ExternalThing *) hp;
+ hp += EXTERNAL_THING_HEAD_SIZE;
+
+#ifdef ARCH_64
+ etp->header = make_external_ref_header(ref_words/2 + 1);
+#else
+ etp->header = make_external_ref_header(ref_words);
+#endif
+ etp->next = off_heap->externals;
+ etp->node = node;
+
+ off_heap->externals = etp;
+ *objp = make_external_ref(etp);
+ }
+
+ ref_num = (Uint32 *) hp;
+#ifdef ARCH_64
+ *(ref_num++) = ref_words /* 32-bit arity */;
+#endif
+ ref_num[0] = r0;
+ for(i = 1; i < ref_words; i++) {
+ ref_num[i] = get_int32(ep);
+ ep += 4;
+ }
+#ifdef ARCH_64
+ if ((1 + ref_words) % 2)
+ ref_num[ref_words] = 0;
+ hp += ref_words/2 + 1;
+#else
+ hp += ref_words;
+#endif
+ break;
+ }
+ case BINARY_EXT:
+ {
+ n = get_int32(ep);
+ ep += 4;
+
+ if (n <= ERL_ONHEAP_BIN_LIMIT || off_heap == NULL) {
+ ErlHeapBin* hb = (ErlHeapBin *) hp;
+
+ hb->thing_word = header_heap_bin(n);
+ hb->size = n;
+ hp += heap_bin_size(n);
+ sys_memcpy(hb->data, ep, n);
+ *objp = make_binary(hb);
+ } else {
+ Binary* dbin = erts_bin_nrml_alloc(n);
+ ProcBin* pb;
+ dbin->flags = 0;
+ dbin->orig_size = n;
+ erts_refc_init(&dbin->refc, 1);
+ sys_memcpy(dbin->orig_bytes, ep, n);
+ pb = (ProcBin *) hp;
+ hp += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = n;
+ pb->next = off_heap->mso;
+ off_heap->mso = pb;
+ pb->val = dbin;
+ pb->bytes = (byte*) dbin->orig_bytes;
+ pb->flags = 0;
+ *objp = make_binary(pb);
+ }
+ ep += n;
+ break;
+ }
+ case BIT_BINARY_EXT:
+ {
+ Eterm bin;
+ ErlSubBin* sb;
+ Uint bitsize;
+
+ n = get_int32(ep);
+ bitsize = ep[4];
+ ep += 5;
+ if (n <= ERL_ONHEAP_BIN_LIMIT || off_heap == NULL) {
+ ErlHeapBin* hb = (ErlHeapBin *) hp;
+
+ hb->thing_word = header_heap_bin(n);
+ hb->size = n;
+ sys_memcpy(hb->data, ep, n);
+ bin = make_binary(hb);
+ hp += heap_bin_size(n);
+ } else {
+ Binary* dbin = erts_bin_nrml_alloc(n);
+ ProcBin* pb;
+ dbin->flags = 0;
+ dbin->orig_size = n;
+ erts_refc_init(&dbin->refc, 1);
+ sys_memcpy(dbin->orig_bytes, ep, n);
+ pb = (ProcBin *) hp;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = n;
+ pb->next = off_heap->mso;
+ off_heap->mso = pb;
+ pb->val = dbin;
+ pb->bytes = (byte*) dbin->orig_bytes;
+ pb->flags = 0;
+ bin = make_binary(pb);
+ hp += PROC_BIN_SIZE;
+ }
+ ep += n;
+ if (bitsize == 0) {
+ *objp = bin;
+ } else {
+ sb = (ErlSubBin *) hp;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->orig = bin;
+ sb->size = n - 1;
+ sb->bitsize = bitsize;
+ sb->bitoffs = 0;
+ sb->offs = 0;
+ sb->is_writable = 0;
+ *objp = make_binary(sb);
+ hp += ERL_SUB_BIN_SIZE;
+ }
+ break;
+ }
+ case EXPORT_EXT:
+ {
+ Eterm mod;
+ Eterm name;
+ Eterm temp;
+ Sint arity;
+
+ if ((ep = dec_atom(edep, ep, &mod)) == NULL) {
+ goto error;
+ }
+ if ((ep = dec_atom(edep, ep, &name)) == NULL) {
+ goto error;
+ }
+ *hpp = hp;
+ ep = dec_term(edep, hpp, ep, off_heap, &temp);
+ hp = *hpp;
+ if (ep == NULL) {
+ return NULL;
+ }
+ if (!is_small(temp)) {
+ goto error;
+ }
+ arity = signed_val(temp);
+ if (arity < 0) {
+ goto error;
+ }
+ *objp = make_export(hp);
+ *hp++ = HEADER_EXPORT;
+ *hp++ = (Eterm) erts_export_get_or_make_stub(mod, name, arity);
+ break;
+ }
+ break;
+ case NEW_FUN_EXT:
+ {
+ ErlFunThing* funp = (ErlFunThing *) hp;
+ Uint arity;
+ Eterm module;
+ byte* uniq;
+ int index;
+ Sint old_uniq;
+ Sint old_index;
+ unsigned num_free;
+ int i;
+ Eterm* temp_hp;
+ Eterm** hpp = &temp_hp;
+ Eterm temp;
+
+ ep += 4; /* Skip total size in bytes */
+ arity = *ep++;
+ uniq = ep;
+ ep += 16;
+ index = get_int32(ep);
+ ep += 4;
+ num_free = get_int32(ep);
+ ep += 4;
+ hp += ERL_FUN_SIZE;
+ if (num_free > 0) {
+ /* Don't leave a hole in case we fail */
+ *hp = make_pos_bignum_header(num_free-1);
+ }
+ hp += num_free;
+ *hpp = hp;
+ funp->thing_word = HEADER_FUN;
+ funp->num_free = num_free;
+ funp->creator = NIL; /* Don't leave a hole in case we fail */
+ *objp = make_fun(funp);
+
+ /* Module */
+ if ((ep = dec_atom(edep, ep, &temp)) == NULL) {
+ goto error;
+ }
+ module = temp;
+
+ /* Index */
+ if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) {
+ goto error;
+ }
+ if (!is_small(temp)) {
+ goto error;
+ }
+ old_index = unsigned_val(temp);
+
+ /* Uniq */
+ if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) {
+ goto error;
+ }
+ if (!is_small(temp)) {
+ goto error;
+ }
+ old_uniq = unsigned_val(temp);
+
+#ifndef HYBRID /* FIND ME! */
+ /*
+ * It is safe to link the fun into the fun list only when
+ * no more validity tests can fail.
+ */
+ funp->next = off_heap->funs;
+ off_heap->funs = funp;
+#endif
+
+ funp->fe = erts_put_fun_entry2(module, old_uniq, old_index,
+ uniq, index, arity);
+ funp->arity = arity;
+#ifdef HIPE
+ if (funp->fe->native_address == NULL) {
+ hipe_set_closure_stub(funp->fe, num_free);
+ }
+ funp->native_address = funp->fe->native_address;
+#endif
+ hp = *hpp;
+
+ /* Environment */
+ for (i = num_free-1; i >= 0; i--) {
+ funp->env[i] = (Eterm) next;
+ next = funp->env + i;
+ }
+ /* Creator */
+ funp->creator = (Eterm) next;
+ next = &(funp->creator);
+ break;
+ }
+ case FUN_EXT:
+ {
+ ErlFunThing* funp = (ErlFunThing *) hp;
+ Eterm module;
+ Sint old_uniq;
+ Sint old_index;
+ unsigned num_free;
+ int i;
+ Eterm* temp_hp;
+ Eterm** hpp = &temp_hp;
+ Eterm temp;
+
+ num_free = get_int32(ep);
+ ep += 4;
+ hp += ERL_FUN_SIZE;
+ if (num_free > 0) {
+ /* Don't leave a hole in the heap in case we fail. */
+ *hp = make_pos_bignum_header(num_free-1);
+ }
+ hp += num_free;
+ *hpp = hp;
+ funp->thing_word = HEADER_FUN;
+ funp->num_free = num_free;
+ *objp = make_fun(funp);
+
+ /* Creator pid */
+ switch(*ep) {
+ case PID_EXT:
+ ep = dec_pid(edep, hpp, ++ep, off_heap, &funp->creator);
+ if (ep == NULL) {
+ funp->creator = NIL; /* Don't leave a hole in the heap */
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+
+ /* Module */
+ if ((ep = dec_atom(edep, ep, &temp)) == NULL) {
+ goto error;
+ }
+ module = temp;
+
+ /* Index */
+ if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) {
+ goto error;
+ }
+ if (!is_small(temp)) {
+ goto error;
+ }
+ old_index = unsigned_val(temp);
+
+ /* Uniq */
+ if ((ep = dec_term(edep, hpp, ep, off_heap, &temp)) == NULL) {
+ goto error;
+ }
+ if (!is_small(temp)) {
+ goto error;
+ }
+
+#ifndef HYBRID /* FIND ME! */
+ /*
+ * It is safe to link the fun into the fun list only when
+ * no more validity tests can fail.
+ */
+ funp->next = off_heap->funs;
+ off_heap->funs = funp;
+#endif
+
+ old_uniq = unsigned_val(temp);
+
+ funp->fe = erts_put_fun_entry(module, old_uniq, old_index);
+ funp->arity = funp->fe->address[-1] - num_free;
+#ifdef HIPE
+ funp->native_address = funp->fe->native_address;
+#endif
+ hp = *hpp;
+
+ /* Environment */
+ for (i = num_free-1; i >= 0; i--) {
+ funp->env[i] = (Eterm) next;
+ next = funp->env + i;
+ }
+ break;
+ }
+ default:
+ error:
+ /*
+ * Be careful to return the updated heap pointer, to avoid
+ * that the caller wipes out binaries or other off-heap objects
+ * that may have been linked into the process.
+ */
+ *hpp = hp;
+ return NULL;
+ }
+ }
+ *hpp = hp;
+ return ep;
+}
+
+/* returns the number of bytes needed to encode an object
+ to a sequence of bytes
+ N.B. That this must agree with to_external2() above!!!
+ (except for cached atoms) */
+
+static Uint
+encode_size_struct2(ErtsAtomCacheMap *acmp, Eterm obj, unsigned dflags)
+{
+ DECLARE_ESTACK(s);
+ Uint m, i, arity;
+ Uint result = 0;
+
+ goto L_jump_start;
+
+ outer_loop:
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+
+ handle_popped_obj:
+ if (is_CP(obj)) {
+ Eterm* ptr = (Eterm *) obj;
+
+ /*
+ * Pointer into a tuple.
+ */
+ obj = *ptr--;
+ if (!is_header(obj)) {
+ ESTACK_PUSH(s, (Eterm)ptr);
+ } else {
+ /* Reached tuple header */
+ ASSERT(header_is_arityval(obj));
+ goto outer_loop;
+ }
+ } else if (is_list(obj)) {
+ Eterm* cons = list_val(obj);
+ Eterm tl;
+
+ tl = CDR(cons);
+ obj = CAR(cons);
+ ESTACK_PUSH(s, tl);
+ } else if (is_nil(obj)) {
+ result++;
+ goto outer_loop;
+ } else {
+ /*
+ * Other term (in the tail of a non-proper list or
+ * in a fun's environment).
+ */
+ }
+
+ L_jump_start:
+ switch (tag_val_def(obj)) {
+ case NIL_DEF:
+ result++;
+ break;
+ case ATOM_DEF: {
+ int alen = atom_tab(atom_val(obj))->len;
+ if ((MAX_ATOM_LENGTH <= 255 || alen <= 255)
+ && (dflags & DFLAG_SMALL_ATOM_TAGS)) {
+ /* Make sure a SMALL_ATOM_EXT fits: SMALL_ATOM_EXT l t1 t2... */
+ result += 1 + 1 + alen;
+ }
+ else {
+ /* Make sure an ATOM_EXT fits: ATOM_EXT l1 l0 t1 t2... */
+ result += 1 + 2 + alen;
+ }
+ insert_acache_map(acmp, obj);
+ break;
+ }
+ case SMALL_DEF:
+ {
+ Sint val = signed_val(obj);
+
+ if ((Uint)val < 256)
+ result += 1 + 1; /* SMALL_INTEGER_EXT */
+ else if (sizeof(Sint) == 4 || IS_SSMALL28(val))
+ result += 1 + 4; /* INTEGER_EXT */
+ else {
+ Eterm tmp_big[2];
+ i = big_bytes(small_to_big(val, tmp_big));
+ result += 1 + 1 + 1 + i; /* SMALL_BIG_EXT */
+ }
+ }
+ break;
+ case BIG_DEF:
+ if ((i = big_bytes(obj)) < 256)
+ result += 1 + 1 + 1 + i; /* tag,size,sign,digits */
+ else
+ result += 1 + 4 + 1 + i; /* tag,size,sign,digits */
+ break;
+ case PID_DEF:
+ case EXTERNAL_PID_DEF:
+ result += (1 + encode_size_struct2(acmp, pid_node_name(obj), dflags) +
+ 4 + 4 + 1);
+ break;
+ case REF_DEF:
+ case EXTERNAL_REF_DEF:
+ ASSERT(dflags & DFLAG_EXTENDED_REFERENCES);
+ i = ref_no_of_numbers(obj);
+ result += (1 + 2 + encode_size_struct2(acmp, ref_node_name(obj), dflags) +
+ 1 + 4*i);
+ break;
+ case PORT_DEF:
+ case EXTERNAL_PORT_DEF:
+ result += (1 + encode_size_struct2(acmp, port_node_name(obj), dflags) +
+ 4 + 1);
+ break;
+ case LIST_DEF:
+ if ((m = is_string(obj)) && (m < MAX_STRING_LEN)) {
+ result += m + 2 + 1;
+ } else {
+ result += 5;
+ goto handle_popped_obj;
+ }
+ break;
+ case TUPLE_DEF:
+ {
+ Eterm* ptr = tuple_val(obj);
+
+ arity = arityval(*ptr);
+ if (arity <= 0xff) {
+ result += 1 + 1;
+ } else {
+ result += 1 + 4;
+ }
+ ptr += arity;
+ obj = (Eterm) ptr;
+ goto handle_popped_obj;
+ }
+ break;
+ case FLOAT_DEF:
+ if (dflags & DFLAG_NEW_FLOATS) {
+ result += 9;
+ } else {
+ result += 32; /* Yes, including the tag */
+ }
+ break;
+ case BINARY_DEF:
+ result += 1 + 4 + binary_size(obj) +
+ 5; /* For unaligned binary */
+ break;
+ case FUN_DEF:
+ {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(obj);
+
+ if ((dflags & DFLAG_NEW_FUN_TAGS) != 0) {
+ result += 20+1+1+4; /* New ID + Tag */
+ result += 4; /* Length field (number of free variables */
+ result += encode_size_struct2(acmp, funp->creator, dflags);
+ result += encode_size_struct2(acmp, funp->fe->module, dflags);
+ result += 2 * (1+4); /* Index, Uniq */
+ } else {
+ /*
+ * Size when fun is mapped to a tuple.
+ */
+ result += 1 + 1; /* Tuple tag, arity */
+ result += 1 + 1 + 2 +
+ atom_tab(atom_val(am_fun))->len; /* 'fun' */
+ result += 1 + 1 + 2 +
+ atom_tab(atom_val(funp->fe->module))->len; /* Module name */
+ result += 2 * (1 + 4); /* Index + Uniq */
+ result += 1 + (funp->num_free < 0x100 ? 1 : 4);
+ }
+ for (i = 1; i < funp->num_free; i++) {
+ obj = funp->env[i];
+
+ if (is_not_list(obj)) {
+ /* Push any non-list terms on the stack */
+ ESTACK_PUSH(s, obj);
+ } else {
+ /* Lists must be handled specially. */
+ if ((m = is_string(obj)) && (m < MAX_STRING_LEN)) {
+ result += m + 2 + 1;
+ } else {
+ result += 5;
+ ESTACK_PUSH(s, obj);
+ }
+ }
+ }
+ if (funp->num_free != 0) {
+ obj = funp->env[0];
+ goto L_jump_start;
+ }
+ break;
+ }
+
+ case EXPORT_DEF:
+ {
+ Export* ep = (Export *) (export_val(obj))[1];
+ result += 1;
+ result += encode_size_struct2(acmp, ep->code[0], dflags);
+ result += encode_size_struct2(acmp, ep->code[1], dflags);
+ result += encode_size_struct2(acmp, make_small(ep->code[2]), dflags);
+ }
+ break;
+
+ default:
+ erl_exit(1,"Internal data structure error (in encode_size_struct2)%x\n",
+ obj);
+ }
+ }
+
+ DESTROY_ESTACK(s);
+ return result;
+}
+
+static Sint
+decoded_size(byte *ep, byte* endp, int no_refc_bins)
+{
+ int heap_size = 0;
+ int terms;
+ int atom_extra_skip = 0;
+ Uint n;
+
+#define SKIP(sz) \
+ do { \
+ if ((sz) <= endp-ep) { \
+ ep += (sz); \
+ } else { return -1; }; \
+ } while (0)
+
+#define SKIP2(sz1, sz2) \
+ do { \
+ Uint sz = (sz1) + (sz2); \
+ if (sz1 < sz && (sz) <= endp-ep) { \
+ ep += (sz); \
+ } else { return -1; } \
+ } while (0)
+
+#define CHKSIZE(sz) \
+ do { \
+ if ((sz) > endp-ep) { return -1; } \
+ } while (0)
+
+#define ADDTERMS(n) \
+ do { \
+ int before = terms; \
+ terms += (n); \
+ if (terms < before) return -1; \
+ } while (0)
+
+
+ for (terms=1; terms > 0; terms--) {
+ int tag;
+
+ CHKSIZE(1);
+ tag = ep++[0];
+ switch (tag) {
+ case INTEGER_EXT:
+ SKIP(4);
+ heap_size += BIG_UINT_HEAP_SIZE;
+ break;
+ case SMALL_INTEGER_EXT:
+ SKIP(1);
+ break;
+ case SMALL_BIG_EXT:
+ CHKSIZE(1);
+ n = ep[0]; /* number of bytes */
+ SKIP2(n, 1+1); /* skip size,sign,digits */
+ heap_size += 1+(n+sizeof(Eterm)-1)/sizeof(Eterm); /* XXX: 1 too much? */
+ break;
+ case LARGE_BIG_EXT:
+ CHKSIZE(4);
+ n = get_int32(ep);
+ SKIP2(n,4+1); /* skip, size,sign,digits */
+ heap_size += 1+1+(n+sizeof(Eterm)-1)/sizeof(Eterm); /* XXX: 1 too much? */
+ break;
+ case ATOM_EXT:
+ CHKSIZE(2);
+ n = get_int16(ep);
+ if (n > MAX_ATOM_LENGTH) {
+ return -1;
+ }
+ SKIP(n+2+atom_extra_skip);
+ atom_extra_skip = 0;
+ break;
+ case SMALL_ATOM_EXT:
+ CHKSIZE(1);
+ n = get_int8(ep);
+ if (n > MAX_ATOM_LENGTH) {
+ return -1;
+ }
+ SKIP(n+1+atom_extra_skip);
+ atom_extra_skip = 0;
+ break;
+ case ATOM_CACHE_REF:
+ SKIP(1+atom_extra_skip);
+ atom_extra_skip = 0;
+ break;
+ case PID_EXT:
+ atom_extra_skip = 9;
+ /* In case it is an external pid */
+ heap_size += EXTERNAL_THING_HEAD_SIZE + 1;
+ terms++;
+ break;
+ case PORT_EXT:
+ atom_extra_skip = 5;
+ /* In case it is an external port */
+ heap_size += EXTERNAL_THING_HEAD_SIZE + 1;
+ terms++;
+ break;
+ case NEW_REFERENCE_EXT:
+ {
+ int id_words;
+
+ CHKSIZE(2);
+ id_words = get_int16(ep);
+
+ if (id_words > ERTS_MAX_REF_NUMBERS)
+ return -1;
+
+ ep += 2;
+ atom_extra_skip = 1 + 4*id_words;
+ /* In case it is an external ref */
+#ifdef ARCH_64
+ heap_size += EXTERNAL_THING_HEAD_SIZE + id_words/2 + 1;
+#else
+ heap_size += EXTERNAL_THING_HEAD_SIZE + id_words;
+#endif
+ terms++;
+ break;
+ }
+ case REFERENCE_EXT:
+ /* In case it is an external ref */
+ heap_size += EXTERNAL_THING_HEAD_SIZE + 1;
+ atom_extra_skip = 5;
+ terms++;
+ break;
+ case NIL_EXT:
+ break;
+ case LIST_EXT:
+ CHKSIZE(4);
+ n = get_int32(ep);
+ ep += 4;
+ ADDTERMS(n);
+ terms++;
+ heap_size += 2 * n;
+ break;
+ case SMALL_TUPLE_EXT:
+ CHKSIZE(1);
+ n = *ep++;
+ terms += n;
+ heap_size += n + 1;
+ break;
+ case LARGE_TUPLE_EXT:
+ CHKSIZE(4);
+ n = get_int32(ep);
+ ep += 4;
+ ADDTERMS(n);
+ heap_size += n + 1;
+ break;
+ case STRING_EXT:
+ CHKSIZE(2);
+ n = get_int16(ep);
+ SKIP(n+2);
+ heap_size += 2 * n;
+ break;
+ case FLOAT_EXT:
+ SKIP(31);
+ heap_size += FLOAT_SIZE_OBJECT;
+ break;
+ case NEW_FLOAT_EXT:
+ SKIP(8);
+ heap_size += FLOAT_SIZE_OBJECT;
+ break;
+ case BINARY_EXT:
+ CHKSIZE(4);
+ n = get_int32(ep);
+ SKIP2(n, 4);
+ if (n <= ERL_ONHEAP_BIN_LIMIT || no_refc_bins) {
+ heap_size += heap_bin_size(n);
+ } else {
+ heap_size += PROC_BIN_SIZE;
+ }
+ break;
+ case BIT_BINARY_EXT:
+ {
+ CHKSIZE(5);
+ n = get_int32(ep);
+ SKIP2(n, 5);
+ if (n <= ERL_ONHEAP_BIN_LIMIT || no_refc_bins) {
+ heap_size += heap_bin_size(n) + ERL_SUB_BIN_SIZE;
+ } else {
+ heap_size += PROC_BIN_SIZE + ERL_SUB_BIN_SIZE;
+ }
+ }
+ break;
+ case EXPORT_EXT:
+ terms += 3;
+ heap_size += 2;
+ break;
+ case NEW_FUN_EXT:
+ {
+ unsigned num_free;
+ Uint total_size;
+
+ CHKSIZE(1+16+4+4);
+ total_size = get_int32(ep);
+ CHKSIZE(total_size);
+ ep += 1+16+4+4;
+ /*FALLTHROUGH*/
+
+ case FUN_EXT:
+ CHKSIZE(4);
+ num_free = get_int32(ep);
+ ep += 4;
+ if (num_free > MAX_ARG) {
+ return -1;
+ }
+ terms += 4 + num_free;
+ heap_size += ERL_FUN_SIZE + num_free;
+ break;
+ }
+ default:
+ return -1;
+ }
+ }
+ /* 'terms' may be non-zero if it has wrapped around */
+ return terms==0 ? heap_size : -1;
+#undef SKIP
+#undef SKIP2
+#undef CHKSIZE
+}
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
new file mode 100644
index 0000000000..f308680f89
--- /dev/null
+++ b/erts/emulator/beam/external.h
@@ -0,0 +1,211 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Same order as the ordering of terms in erlang */
+
+/* Since there are 255 different External tag values to choose from
+ There is no reason to not be extravagant.
+ Hence, the different tags for large/small tuple e.t.c
+*/
+
+
+#ifdef ERTS_WANT_EXTERNAL_TAGS
+#ifndef ERTS_EXTERNAL_TAGS
+#define ERTS_EXTERNAL_TAGS
+
+#define SMALL_INTEGER_EXT 'a'
+#define INTEGER_EXT 'b'
+#define FLOAT_EXT 'c'
+#define ATOM_EXT 'd'
+#define SMALL_ATOM_EXT 's'
+#define REFERENCE_EXT 'e'
+#define NEW_REFERENCE_EXT 'r'
+#define PORT_EXT 'f'
+#define NEW_FLOAT_EXT 'F'
+#define PID_EXT 'g'
+#define SMALL_TUPLE_EXT 'h'
+#define LARGE_TUPLE_EXT 'i'
+#define NIL_EXT 'j'
+#define STRING_EXT 'k'
+#define LIST_EXT 'l'
+#define BINARY_EXT 'm'
+#define BIT_BINARY_EXT 'M'
+#define SMALL_BIG_EXT 'n'
+#define LARGE_BIG_EXT 'o'
+#define NEW_FUN_EXT 'p'
+#define EXPORT_EXT 'q'
+#define FUN_EXT 'u'
+
+#define DIST_HEADER 'D'
+#define ATOM_CACHE_REF 'R'
+#define COMPRESSED 'P'
+
+#if 0
+/* Not used anymore */
+#define CACHED_ATOM 'C'
+#define NEW_CACHE 'N'
+#endif
+
+
+#define VERSION_MAGIC 131 /* 130 in erlang 4.2 */
+ /* Increment this when changing the external format. */
+ /* ON the other hand, don't change the external format */
+ /* since that breaks other people's code! */
+
+#endif /* ERTS_EXTERNAL_TAGS */
+#endif /* ERTS_WANT_EXTERNAL_TAGS */
+
+#ifndef ERL_EXTERNAL_H__
+#define ERL_EXTERNAL_H__
+
+#include "erl_node_tables.h"
+
+#define ERTS_ATOM_CACHE_SIZE 2048
+
+typedef struct cache {
+ Eterm in_arr[ERTS_ATOM_CACHE_SIZE];
+ Eterm out_arr[ERTS_ATOM_CACHE_SIZE];
+} ErtsAtomCache;
+
+typedef struct {
+ int hdr_sz;
+ int sz;
+ int cix[ERTS_ATOM_CACHE_SIZE];
+ struct {
+ Eterm atom;
+ int iix;
+ } cache[ERTS_ATOM_CACHE_SIZE];
+} ErtsAtomCacheMap;
+
+typedef struct {
+ Uint32 size;
+ Eterm atom[ERTS_ATOM_CACHE_SIZE];
+} ErtsAtomTranslationTable;
+
+#define ERTS_DIST_EXT_DFLAG_HDR (((Uint32) 1) << 31)
+#define ERTS_DIST_EXT_ATOM_TRANS_TAB (((Uint32) 1) << 30)
+#define ERTS_DIST_EXT_CON_ID_MASK ((Uint32) 0x3fffffff)
+
+#define ERTS_DIST_EXT_CON_ID(DIST_EXTP) \
+ ((DIST_EXTP)->flags & ERTS_DIST_EXT_CON_ID_MASK)
+typedef struct {
+ DistEntry *dep;
+ byte *extp;
+ byte *ext_endp;
+ Sint heap_size;
+ Uint32 flags;
+ ErtsAtomTranslationTable attab;
+} ErtsDistExternal;
+
+typedef struct {
+ int have_header;
+ int cache_entries;
+} ErtsDistHeaderPeek;
+
+#define ERTS_DIST_EXT_SIZE(EDEP) \
+ (sizeof(ErtsDistExternal) \
+ - (((EDEP)->flags & ERTS_DIST_EXT_ATOM_TRANS_TAB) \
+ ? (ASSERT_EXPR(0 <= (EDEP)->attab.size \
+ && (EDEP)->attab.size <= ERTS_ATOM_CACHE_SIZE), \
+ sizeof(Eterm)*(ERTS_ATOM_CACHE_SIZE - (EDEP)->attab.size)) \
+ : sizeof(ErtsAtomTranslationTable)))
+
+typedef struct {
+ byte *extp;
+ int exttmp;
+} ErtsBinary2TermState;
+
+/* -------------------------------------------------------------------------- */
+
+void erts_init_atom_cache_map(ErtsAtomCacheMap *);
+void erts_reset_atom_cache_map(ErtsAtomCacheMap *);
+void erts_destroy_atom_cache_map(ErtsAtomCacheMap *);
+void erts_finalize_atom_cache_map(ErtsAtomCacheMap *);
+Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *);
+
+Uint erts_encode_ext_dist_header_size(ErtsAtomCacheMap *);
+byte *erts_encode_ext_dist_header_setup(byte *, ErtsAtomCacheMap *);
+byte *erts_encode_ext_dist_header_finalize(byte *, ErtsAtomCache *);
+Uint erts_encode_dist_ext_size(Eterm, Uint32, ErtsAtomCacheMap *);
+void erts_encode_dist_ext(Eterm, byte **, Uint32, ErtsAtomCacheMap *);
+
+Uint erts_encode_ext_size(Eterm);
+void erts_encode_ext(Eterm, byte **);
+
+#ifdef ERTS_WANT_EXTERNAL_TAGS
+ERTS_GLB_INLINE void erts_peek_dist_header(ErtsDistHeaderPeek *, byte *, Uint);
+#endif
+ERTS_GLB_INLINE void erts_free_dist_ext_copy(ErtsDistExternal *);
+ERTS_GLB_INLINE void *erts_dist_ext_trailer(ErtsDistExternal *);
+ErtsDistExternal *erts_make_dist_ext_copy(ErtsDistExternal *, Uint);
+void *erts_dist_ext_trailer(ErtsDistExternal *);
+void erts_destroy_dist_ext_copy(ErtsDistExternal *);
+int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint,
+ DistEntry *, ErtsAtomCache *);
+Sint erts_decode_dist_ext_size(ErtsDistExternal *, int);
+Eterm erts_decode_dist_ext(Eterm **, ErlOffHeap *, ErtsDistExternal *);
+
+Sint erts_decode_ext_size(byte*, Uint, int);
+Eterm erts_decode_ext(Eterm **, ErlOffHeap *, byte**);
+
+Eterm erts_term_to_binary(Process* p, Eterm Term, int level, Uint flags);
+
+Sint erts_binary2term_prepare(ErtsBinary2TermState *, byte *, Sint);
+void erts_binary2term_abort(ErtsBinary2TermState *);
+Eterm erts_binary2term_create(ErtsBinary2TermState *, Eterm **hpp, ErlOffHeap *);
+int erts_debug_max_atom_out_cache_index(void);
+int erts_debug_atom_to_out_cache_index(Eterm);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+#ifdef ERTS_WANT_EXTERNAL_TAGS
+ERTS_GLB_INLINE void
+erts_peek_dist_header(ErtsDistHeaderPeek *dhpp, byte *ext, Uint sz)
+{
+ if (ext[0] == VERSION_MAGIC
+ || ext[1] != DIST_HEADER
+ || sz < (1+1+1))
+ dhpp->have_header = 0;
+ else {
+ dhpp->have_header = 1;
+ dhpp->cache_entries = (int) get_int8(&ext[2]);
+ }
+}
+#endif
+
+ERTS_GLB_INLINE void
+erts_free_dist_ext_copy(ErtsDistExternal *edep)
+{
+ if (edep->dep)
+ erts_deref_dist_entry(edep->dep);
+ erts_free(ERTS_ALC_T_EXT_TERM_DATA, edep);
+}
+
+ERTS_GLB_INLINE void *
+erts_dist_ext_trailer(ErtsDistExternal *edep)
+{
+ void *res = (void *) (edep->ext_endp
+ + ERTS_WORD_ALIGN_PAD_SZ(edep->ext_endp));
+ ASSERT((((Uint) res) % sizeof(Uint)) == 0);
+ return res;
+}
+
+#endif
+
+#endif /* ERL_EXTERNAL_H__ */
diff --git a/erts/emulator/beam/fix_alloc.c b/erts/emulator/beam/fix_alloc.c
new file mode 100644
index 0000000000..5637281597
--- /dev/null
+++ b/erts/emulator/beam/fix_alloc.c
@@ -0,0 +1,287 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/* General purpose Memory allocator for fixed block size objects */
+/* This allocater is at least an order of magnitude faster than malloc() */
+
+
+#define NOPERBLOCK 20
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_db.h"
+
+#ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE
+
+#if ERTS_ALC_MTA_FIXED_SIZE
+#include "erl_threads.h"
+#include "erl_smp.h"
+# ifdef ERTS_SMP
+# define FA_LOCK(FA) erts_smp_spin_lock(&(FA)->slck)
+# define FA_UNLOCK(FA) erts_smp_spin_unlock(&(FA)->slck)
+# else
+# define FA_LOCK(FA) erts_mtx_lock(&(FA)->mtx)
+# define FA_UNLOCK(FA) erts_mtx_unlock(&(FA)->mtx)
+# endif
+#else
+# define FA_LOCK(FA)
+# define FA_UNLOCK(FA)
+#endif
+
+typedef union {double d; long l;} align_t;
+
+typedef struct fix_alloc_block {
+ struct fix_alloc_block *next;
+ align_t mem[1];
+} FixAllocBlock;
+
+typedef struct fix_alloc {
+ Uint item_size;
+ void *freelist;
+ Uint no_free;
+ Uint no_blocks;
+ FixAllocBlock *blocks;
+#if ERTS_ALC_MTA_FIXED_SIZE
+# ifdef ERTS_SMP
+ erts_smp_spinlock_t slck;
+# else
+ erts_mtx_t mtx;
+# endif
+#endif
+} FixAlloc;
+
+static void *(*core_alloc)(Uint);
+static Uint xblk_sz;
+
+static FixAlloc **fa;
+#define FA_SZ (1 + ERTS_ALC_N_MAX_A_FIXED_SIZE - ERTS_ALC_N_MIN_A_FIXED_SIZE)
+
+#define FIX_IX(N) ((N) - ERTS_ALC_N_MIN_A_FIXED_SIZE)
+
+#define FIX_POOL_SZ(I_SZ) \
+ ((I_SZ)*NOPERBLOCK + sizeof(FixAllocBlock) - sizeof(align_t))
+
+#if defined(DEBUG) && !ERTS_ALC_MTA_FIXED_SIZE
+static int first_time;
+#endif
+
+void erts_init_fix_alloc(Uint extra_block_size,
+ void *(*alloc)(Uint))
+{
+ int i;
+
+ xblk_sz = extra_block_size;
+ core_alloc = alloc;
+
+ fa = (FixAlloc **) (*core_alloc)(FA_SZ * sizeof(FixAlloc *));
+ if (!fa)
+ erts_alloc_enomem(ERTS_ALC_T_UNDEF, FA_SZ * sizeof(FixAlloc *));
+
+ for (i = 0; i < FA_SZ; i++)
+ fa[i] = NULL;
+#if defined(DEBUG) && !ERTS_ALC_MTA_FIXED_SIZE
+ first_time = 1;
+#endif
+}
+
+Uint
+erts_get_fix_size(ErtsAlcType_t type)
+{
+ Uint i = FIX_IX(ERTS_ALC_T2N(type));
+ return i < FA_SZ && fa[i] ? fa[i]->item_size : 0;
+}
+
+void
+erts_set_fix_size(ErtsAlcType_t type, Uint size)
+{
+ Uint sz;
+ Uint i;
+ FixAlloc *fs;
+ ErtsAlcType_t t_no = ERTS_ALC_T2N(type);
+ sz = xblk_sz + size;
+
+#ifdef DEBUG
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
+ ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+#endif
+
+ while (sz % sizeof(align_t) != 0) /* Alignment */
+ sz++;
+
+ i = FIX_IX(t_no);
+ fs = (FixAlloc *) (*core_alloc)(sizeof(FixAlloc));
+ if (!fs)
+ erts_alloc_n_enomem(t_no, sizeof(FixAlloc));
+
+ fs->item_size = sz;
+ fs->no_blocks = 0;
+ fs->no_free = 0;
+ fs->blocks = NULL;
+ fs->freelist = NULL;
+ if (fa[i])
+ erl_exit(-1, "Attempt to overwrite existing fix size (%d)", i);
+ fa[i] = fs;
+
+#if ERTS_ALC_MTA_FIXED_SIZE
+#ifdef ERTS_SMP
+ erts_smp_spinlock_init_x(&fs->slck, "fix_alloc", make_small(i));
+#else
+ erts_mtx_init_x(&fs->mtx, "fix_alloc", make_small(i));
+#endif
+#endif
+
+}
+
+void
+erts_fix_info(ErtsAlcType_t type, ErtsFixInfo *efip)
+{
+ Uint i;
+ FixAlloc *f;
+#ifdef DEBUG
+ FixAllocBlock *b;
+ void *fp;
+#endif
+ Uint real_item_size;
+ ErtsAlcType_t t_no = ERTS_ALC_T2N(type);
+
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
+ ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+
+ i = FIX_IX(t_no);
+ f = fa[i];
+
+ efip->total = sizeof(FixAlloc *);
+ efip->used = 0;
+ if (!f)
+ return;
+
+ real_item_size = f->item_size - xblk_sz;
+
+ FA_LOCK(f);
+
+ efip->total += sizeof(FixAlloc);
+ efip->total += f->no_blocks*FIX_POOL_SZ(real_item_size);
+ efip->used = efip->total - f->no_free*real_item_size;
+
+#ifdef DEBUG
+ ASSERT(efip->total >= efip->used);
+ for(i = 0, b = f->blocks; b; i++, b = b->next);
+ ASSERT(f->no_blocks == i);
+ for (i = 0, fp = f->freelist; fp; i++, fp = *((void **) fp));
+ ASSERT(f->no_free == i);
+#endif
+
+ FA_UNLOCK(f);
+
+}
+
+void
+erts_fix_free(ErtsAlcType_t t_no, void *extra, void* ptr)
+{
+ Uint i;
+ FixAlloc *f;
+
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
+ ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+
+ i = FIX_IX(t_no);
+ f = fa[i];
+
+ FA_LOCK(f);
+ *((void **) ptr) = f->freelist;
+ f->freelist = ptr;
+ f->no_free++;
+ FA_UNLOCK(f);
+}
+
+
+void *erts_fix_realloc(ErtsAlcType_t t_no, void *extra, void* ptr, Uint size)
+{
+ erts_alc_fatal_error(ERTS_ALC_E_NOTSUP, ERTS_ALC_O_REALLOC, t_no);
+ return NULL;
+}
+
+void *erts_fix_alloc(ErtsAlcType_t t_no, void *extra, Uint size)
+{
+ void *ret;
+ int i;
+ FixAlloc *f;
+
+#if defined(DEBUG) && !ERTS_ALC_MTA_FIXED_SIZE
+ ASSERT(ERTS_ALC_N_MIN_A_FIXED_SIZE <= t_no);
+ ASSERT(t_no <= ERTS_ALC_N_MAX_A_FIXED_SIZE);
+ if (first_time) { /* Check that all sizes have been initialized */
+ int i;
+ for (i = 0; i < FA_SZ; i++)
+ ASSERT(fa[i]);
+ first_time = 0;
+ }
+#endif
+
+
+ i = FIX_IX(t_no);
+ f = fa[i];
+
+ ASSERT(f);
+ ASSERT(f->item_size >= size);
+
+ FA_LOCK(f);
+ if (f->freelist == NULL) { /* Gotta alloc some more mem */
+ char *ptr;
+ FixAllocBlock *bl;
+ Uint n;
+
+
+ FA_UNLOCK(f);
+ bl = (*core_alloc)(FIX_POOL_SZ(f->item_size));
+ if (!bl)
+ return NULL;
+
+ FA_LOCK(f);
+ bl->next = f->blocks; /* link in first */
+ f->blocks = bl;
+
+ n = NOPERBLOCK;
+ ptr = (char *) &f->blocks->mem[0];
+ while(n--) {
+ *((void **) ptr) = f->freelist;
+ f->freelist = (void *) ptr;
+ ptr += f->item_size;
+ }
+#if !ERTS_ALC_MTA_FIXED_SIZE
+ ASSERT(f->no_free == 0);
+#endif
+ f->no_free += NOPERBLOCK;
+ f->no_blocks++;
+ }
+
+ ret = f->freelist;
+ f->freelist = *((void **) f->freelist);
+ ASSERT(f->no_free > 0);
+ f->no_free--;
+
+ FA_UNLOCK(f);
+
+ return ret;
+}
+
+#endif /* #ifdef ERTS_ALC_N_MIN_A_FIXED_SIZE */
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
new file mode 100644
index 0000000000..1b64e23174
--- /dev/null
+++ b/erts/emulator/beam/global.h
@@ -0,0 +1,1800 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __GLOBAL_H__
+#define __GLOBAL_H__
+
+#include "sys.h"
+#include "erl_alloc.h"
+#include "erl_vm.h"
+#include "erl_node_container_utils.h"
+#include "hash.h"
+#include "index.h"
+#include "atom.h"
+#include "export.h"
+#include "module.h"
+#include "register.h"
+#include "erl_fun.h"
+#include "erl_node_tables.h"
+#include "benchmark.h"
+#include "erl_process.h"
+#include "erl_sys_driver.h"
+#include "erl_debug.h"
+
+typedef struct port Port;
+#include "erl_port_task.h"
+
+#define ERTS_MAX_NO_OF_ASYNC_THREADS 1024
+extern int erts_async_max_threads;
+#define ERTS_ASYNC_THREAD_MIN_STACK_SIZE 16 /* Kilo words */
+#define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
+extern int erts_async_thread_suggested_stack_size;
+
+typedef struct erts_driver_t_ erts_driver_t;
+
+#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
+
+typedef struct {
+ int size; /* total size in bytes */
+
+ SysIOVec* v_start;
+ SysIOVec* v_end;
+ SysIOVec* v_head;
+ SysIOVec* v_tail;
+ SysIOVec v_small[SMALL_IO_QUEUE];
+
+ ErlDrvBinary** b_start;
+ ErlDrvBinary** b_end;
+ ErlDrvBinary** b_head;
+ ErlDrvBinary** b_tail;
+ ErlDrvBinary* b_small[SMALL_IO_QUEUE];
+} ErlIOQueue;
+
+typedef struct line_buf { /* Buffer used in line oriented I/O */
+ int bufsiz; /* Size of character buffer */
+ int ovlen; /* Length of overflow data */
+ int ovsiz; /* Actual size of overflow buffer */
+ char data[1]; /* Starting point of buffer data,
+ data[0] is a flag indicating an unprocess CR,
+ The rest is the overflow buffer. */
+} LineBuf;
+
+struct enif_environment_t /* ErlNifEnv */
+{
+ void* nif_data;
+ Process* proc;
+ Eterm* hp;
+ Eterm* hp_end;
+ unsigned heap_frag_sz;
+ int fpe_was_unmasked;
+};
+extern void erts_pre_nif(struct enif_environment_t*, Process*, void* nif_data);
+extern void erts_post_nif(struct enif_environment_t* env);
+extern Eterm erts_nif_taints(Process* p);
+
+/*
+ * Port Specific Data.
+ *
+ * Only use PrtSD for very rarely used data.
+ */
+
+#define ERTS_PRTSD_SCHED_ID 0
+
+#define ERTS_PRTSD_SIZE 1
+
+typedef struct {
+ void *data[ERTS_PRTSD_SIZE];
+} ErtsPrtSD;
+
+#ifdef ERTS_SMP
+typedef struct ErtsXPortsList_ ErtsXPortsList;
+#endif
+
+/*
+ * Port locking:
+ *
+ * Locking is done either driver specific or port specific. When
+ * driver specific locking is used, all instances of the driver,
+ * i.e. ports running the driver, share the same lock. When port
+ * specific locking is used each instance have its own lock.
+ *
+ * Most fields in the Port structure are protected by the lock
+ * referred to by the lock field. I'v called it the port lock.
+ * This lock is shared between all ports running the same driver
+ * when driver specific locking is used.
+ *
+ * The 'sched' field is protected by the port tasks lock
+ * (see erl_port_tasks.c)
+ *
+ * The 'status' field is protected by a combination of the port lock,
+ * the port tasks lock, and the state_lck. It may be read if
+ * the state_lck, or the port lock is held. It may only be
+ * modified if both the port lock and the state_lck is held
+ * (with one exception; see below). When changeing status from alive
+ * to dead or vice versa, also the port task lock has to be held.
+ * This in order to guarantee that tasks are scheduled only for
+ * ports that are alive.
+ *
+ * The status field may be modified with only the state_lck
+ * held when status is changed from dead to alive. This since no
+ * threads can have any references to the port other than via the
+ * port table.
+ *
+ * /rickard
+ */
+
+struct port {
+ ErtsPortTaskSched sched;
+ ErtsPortTaskHandle timeout_task;
+#ifdef ERTS_SMP
+ erts_smp_atomic_t refc;
+ erts_smp_mtx_t *lock;
+ ErtsXPortsList *xports;
+ erts_smp_atomic_t run_queue;
+ erts_smp_spinlock_t state_lck; /* protects: id, status, snapshot */
+#endif
+ Eterm id; /* The Port id of this port */
+ Eterm connected; /* A connected process */
+ Eterm caller; /* Current caller. */
+ Eterm data; /* Data associated with port. */
+ ErlHeapFragment* bp; /* Heap fragment holding data (NULL if imm data). */
+ ErtsLink *nlinks;
+ ErtsMonitor *monitors; /* Only MON_ORIGIN monitors of pid's */
+ Uint bytes_in; /* Number of bytes read */
+ Uint bytes_out; /* Number of bytes written */
+#ifdef ERTS_SMP
+ ErtsSmpPTimer *ptimer;
+#else
+ ErlTimer tm; /* Timer entry */
+#endif
+
+ Eterm tracer_proc; /* If the port is traced, this is the tracer */
+ Uint trace_flags; /* Trace flags */
+
+ ErlIOQueue ioq; /* driver accessible i/o queue */
+ DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
+ char *name; /* String used in the open */
+ erts_driver_t* drv_ptr;
+ long drv_data;
+ ErtsProcList *suspended; /* List of suspended processes. */
+ LineBuf *linebuf; /* Buffer to hold data not ready for
+ process to get (line oriented I/O)*/
+ Uint32 status; /* Status and type flags */
+ int control_flags; /* Flags for port_control() */
+ Uint32 snapshot; /* Next snapshot that port should be part of */
+ struct reg_proc *reg;
+ ErlDrvPDL port_data_lock;
+
+ ErtsPrtSD *psd; /* Port specific data */
+};
+
+
+ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_port_runq(Port *prt)
+{
+#ifdef ERTS_SMP
+ ErtsRunQueue *rq1, *rq2;
+ rq1 = (ErtsRunQueue *) erts_smp_atomic_read(&prt->run_queue);
+ while (1) {
+ erts_smp_runq_lock(rq1);
+ rq2 = (ErtsRunQueue *) erts_smp_atomic_read(&prt->run_queue);
+ if (rq1 == rq2)
+ return rq1;
+ erts_smp_runq_unlock(rq1);
+ rq1 = rq2;
+ }
+#else
+ return erts_common_run_queue;
+#endif
+}
+
+#endif
+
+
+ERTS_GLB_INLINE void *erts_prtsd_get(Port *p, int ix);
+ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void *
+erts_prtsd_get(Port *prt, int ix)
+{
+ return prt->psd ? prt->psd->data[ix] : NULL;
+}
+
+ERTS_GLB_INLINE void *
+erts_prtsd_set(Port *prt, int ix, void *data)
+{
+ if (prt->psd) {
+ void *old = prt->psd->data[ix];
+ prt->psd->data[ix] = data;
+ return old;
+ }
+ else {
+ prt->psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD));
+ prt->psd->data[ix] = data;
+ return NULL;
+ }
+}
+
+#endif
+
+/* Driver handle (wrapper for old plain handle) */
+#define ERL_DE_OK 0
+#define ERL_DE_UNLOAD 1
+#define ERL_DE_FORCE_UNLOAD 2
+#define ERL_DE_RELOAD 3
+#define ERL_DE_FORCE_RELOAD 4
+#define ERL_DE_PERMANENT 5
+
+#define ERL_DE_PROC_LOADED 0
+#define ERL_DE_PROC_AWAIT_UNLOAD 1
+#define ERL_DE_PROC_AWAIT_UNLOAD_ONLY 2
+#define ERL_DE_PROC_AWAIT_LOAD 3
+
+/* Flags for process entries */
+#define ERL_DE_FL_DEREFERENCED 1
+
+/* Flags for drivers, put locking policy here /PaN */
+#define ERL_DE_FL_KILL_PORTS 1
+
+#define ERL_FL_CONSISTENT_MASK ( ERL_DE_FL_KILL_PORTS )
+
+/* System specific load errors are returned as positive values */
+#define ERL_DE_NO_ERROR 0
+#define ERL_DE_LOAD_ERROR_NO_INIT -1
+#define ERL_DE_LOAD_ERROR_FAILED_INIT -2
+#define ERL_DE_LOAD_ERROR_BAD_NAME -3
+#define ERL_DE_LOAD_ERROR_NAME_TO_LONG -4
+#define ERL_DE_LOAD_ERROR_INCORRECT_VERSION -5
+#define ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY -6
+#define ERL_DE_ERROR_UNSPECIFIED -7
+#define ERL_DE_LOOKUP_ERROR_NOT_FOUND -8
+#define ERL_DE_DYNAMIC_ERROR_OFFSET -10
+
+typedef struct de_proc_entry {
+ Process *proc; /* The process... */
+ Uint awaiting_status; /* PROC_LOADED == Have loaded the driver
+ PROC_AWAIT_UNLOAD == Wants to be notified
+ when we have unloaded the driver (was locked)
+ PROC_AWAIT_LOAD == Wants to be notified when we
+ reloaded the driver (old was locked) */
+ Uint flags; /* ERL_FL_DE_DEREFERENCED when reload in progress */
+ Eterm heap[REF_THING_SIZE]; /* "ref heap" */
+ struct de_proc_entry *next;
+} DE_ProcEntry;
+
+typedef struct {
+ void *handle; /* Handle for DLL or SO (for dyn. drivers). */
+ DE_ProcEntry *procs; /* List of pids that have loaded this driver,
+ or that wait for it to change state */
+ erts_refc_t refc; /* Number of ports/processes having
+ references to the driver */
+ Uint port_count; /* Number of ports using the driver */
+ Uint flags; /* ERL_DE_FL_KILL_PORTS */
+ int status; /* ERL_DE_xxx */
+ char *full_path; /* Full path of the driver */
+ char *reload_full_path; /* If status == ERL_DE_RELOAD, this contains
+ full name of driver (path) */
+ char *reload_driver_name; /* ... and this contains the driver name */
+ Uint reload_flags; /* flags for reloaded driver */
+} DE_Handle;
+
+/*
+ * This structure represents a link to the next driver.
+ */
+
+struct erts_driver_t_ {
+ erts_driver_t *next;
+ erts_driver_t *prev;
+ char *name;
+ struct {
+ int major;
+ int minor;
+ } version;
+ int flags;
+ DE_Handle *handle;
+#ifdef ERTS_SMP
+ erts_smp_mtx_t *lock;
+#endif
+ ErlDrvEntry *entry;
+ ErlDrvData (*start)(ErlDrvPort port, char *command, SysDriverOpts* opts);
+ void (*stop)(ErlDrvData drv_data);
+ void (*finish)(void);
+ void (*flush)(ErlDrvData drv_data);
+ void (*output)(ErlDrvData drv_data, char *buf, int len);
+ void (*outputv)(ErlDrvData drv_data, ErlIOVec *ev); /* Might be NULL */
+ int (*control)(ErlDrvData drv_data, unsigned int command, char *buf,
+ int len, char **rbuf, int rlen); /* Might be NULL */
+ int (*call)(ErlDrvData drv_data, unsigned int command, char *buf,
+ int len, char **rbuf, int rlen, unsigned int *flags); /* Might be NULL */
+ void (*event)(ErlDrvData drv_data, ErlDrvEvent event,
+ ErlDrvEventData event_data);
+ void (*ready_input)(ErlDrvData drv_data, ErlDrvEvent event);
+ void (*ready_output)(ErlDrvData drv_data, ErlDrvEvent event);
+ void (*timeout)(ErlDrvData drv_data);
+ void (*ready_async)(ErlDrvData drv_data, ErlDrvThreadData thread_data); /* Might be NULL */
+ void (*process_exit)(ErlDrvData drv_data, ErlDrvMonitor *monitor);
+ void (*stop_select)(ErlDrvEvent event, void*); /* Might be NULL */
+};
+
+extern erts_driver_t *driver_list;
+extern erts_smp_mtx_t erts_driver_list_lock;
+
+extern void erts_ddll_init(void);
+extern void erts_ddll_lock_driver(DE_Handle *dh, char *name);
+
+/* These are for bookkeeping */
+extern void erts_ddll_increment_port_count(DE_Handle *dh);
+extern void erts_ddll_decrement_port_count(DE_Handle *dh);
+
+/* These makes things happen, drivers may be scheduled for unload etc */
+extern void erts_ddll_reference_driver(DE_Handle *dh);
+extern void erts_ddll_reference_referenced_driver(DE_Handle *dh);
+extern void erts_ddll_dereference_driver(DE_Handle *dh);
+
+extern char *erts_ddll_error(int code);
+extern void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks);
+extern int erts_ddll_driver_ok(DE_Handle *dh);
+extern void erts_ddll_remove_monitor(Process *p,
+ Eterm ref,
+ ErtsProcLocks plocks);
+extern Eterm erts_ddll_monitor_driver(Process *p,
+ Eterm description,
+ ErtsProcLocks plocks);
+/*
+ * Max no. of drivers (linked in and dynamically loaded). Each table
+ * entry uses 4 bytes.
+ */
+#define DRIVER_TAB_SIZE 32
+
+/*
+** Just like the driver binary but with initial flags
+** Note that the two structures Binary and ErlDrvBinary HAVE to
+** be equal except for extra fields in the beginning of the struct.
+** ErlDrvBinary is defined in erl_driver.h.
+** When driver_alloc_binary is called, a Binary is allocated, but
+** the pointer returned is to the address of the first element that
+** also occurs in the ErlDrvBinary struct (driver.*binary takes care if this).
+** The driver need never know about additions to the internal Binary of the
+** emulator. One should however NEVER be sloppy when mixing ErlDrvBinary
+** and Binary, the macros below can convert one type to the other, as they both
+** in reality are equal.
+*/
+typedef struct binary {
+ Uint flags;
+ erts_refc_t refc;
+#ifdef ARCH_32
+ Uint32 align__; /* *DO NOT USE* only for alignment. */
+#endif
+ /* Add fields BEFORE this, otherwise the drivers crash */
+ long orig_size;
+ char orig_bytes[1]; /* to be continued */
+} Binary;
+
+/*
+ * 'Binary' alignment:
+ * Address of orig_bytes[0] of a Binary should always be 8-byte aligned.
+ * It is assumed that the flags, refc, and orig_size fields are 4 bytes on
+ * 32-bits architectures and 8 bytes on 64-bits architectures.
+ */
+
+/*
+ * "magic" binary.
+ */
+typedef struct {
+ void (*destructor)(Binary *);
+ char magic_bin_data[1];
+} ErtsBinaryMagicPart;
+
+#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
+ (((ErtsBinaryMagicPart *) (BP)->orig_bytes)->destructor)
+#define ERTS_MAGIC_BIN_DATA(BP) \
+ ((void *) (((ErtsBinaryMagicPart *) (BP)->orig_bytes)->magic_bin_data))
+#define ERTS_MAGIC_BIN_DATA_SIZE(BP) \
+ ((BP)->orig_size - (sizeof(ErtsBinaryMagicPart) - 1))
+
+#define Binary2ErlDrvBinary(B) ((ErlDrvBinary *) (&((B)->orig_size)))
+#define ErlDrvBinary2Binary(D) ((Binary *) \
+ (((char *) (D)) - \
+ ((char *) &(((Binary *) 0)->orig_size))))
+
+/* A "magic" binary flag */
+#define BIN_FLAG_MAGIC 1
+#define BIN_FLAG_USR1 2 /* Reserved for use by different modules too mark */
+#define BIN_FLAG_USR2 4 /* certain binaries as special (used by ets) */
+#define BIN_FLAG_DRV 8
+
+/*
+ * This structure represents one type of a binary in a process.
+ */
+
+typedef struct proc_bin {
+ Eterm thing_word; /* Subtag REFC_BINARY_SUBTAG. */
+ Uint size; /* Binary size in bytes. */
+ struct proc_bin *next; /* Pointer to next ProcBin. */
+ Binary *val; /* Pointer to Binary structure. */
+ byte *bytes; /* Pointer to the actual data bytes. */
+ Uint flags; /* Flag word. */
+} ProcBin;
+
+#define PB_IS_WRITABLE 1 /* Writable (only one reference to ProcBin) */
+#define PB_ACTIVE_WRITER 2 /* There is an active writer */
+
+/*
+ * ProcBin size in Eterm words.
+ */
+#define PROC_BIN_SIZE (sizeof(ProcBin)/sizeof(Eterm))
+
+ERTS_GLB_INLINE Eterm erts_mk_magic_binary_term(Eterm **hpp,
+ ErlOffHeap *ohp,
+ Binary *mbp);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Eterm
+erts_mk_magic_binary_term(Eterm **hpp, ErlOffHeap *ohp, Binary *mbp)
+{
+ ProcBin *pb = (ProcBin *) *hpp;
+ *hpp += PROC_BIN_SIZE;
+
+ ASSERT(mbp->flags & BIN_FLAG_MAGIC);
+
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = 0;
+ pb->next = ohp->mso;
+ ohp->mso = pb;
+ pb->val = mbp;
+ pb->bytes = (byte *) mbp->orig_bytes;
+ pb->flags = 0;
+
+ erts_refc_inc(&mbp->refc, 1);
+
+ return make_binary(pb);
+}
+
+#endif
+
+#define ERTS_TERM_IS_MAGIC_BINARY(T) \
+ (is_binary((T)) \
+ && (thing_subtag(*binary_val((T))) == REFC_BINARY_SUBTAG) \
+ && (((ProcBin *) binary_val((T)))->val->flags & BIN_FLAG_MAGIC))
+
+/* arrays that get malloced at startup */
+extern Port* erts_port;
+extern erts_smp_atomic_t erts_ports_alive;
+
+extern Uint erts_max_ports;
+extern Uint erts_port_tab_index_mask;
+extern erts_smp_atomic_t erts_ports_snapshot;
+extern erts_smp_atomic_t erts_dead_ports_ptr;
+
+ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_spinlock_is_locked(&prt->state_lck));
+ if (prt->snapshot != erts_smp_atomic_read(&erts_ports_snapshot)) {
+ /* Dead ports are added from the end of the snapshot buffer */
+ Eterm* tombstone = (Eterm*) erts_smp_atomic_addtest(&erts_dead_ports_ptr,
+ -(long)sizeof(Eterm));
+ ASSERT(tombstone+1 != NULL);
+ ASSERT(prt->snapshot == (Uint32) erts_smp_atomic_read(&erts_ports_snapshot) - 1);
+ *tombstone = prt->id;
+ }
+ /*else no ongoing snapshot or port was already included or created after snapshot */
+}
+
+#endif
+
+/* controls warning mapping in error_logger */
+
+extern Eterm node_cookie;
+extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */
+extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
+extern Uint display_items; /* no of items to display in traces etc */
+extern Uint display_loads; /* print info about loaded modules */
+
+extern int erts_backtrace_depth;
+extern erts_smp_atomic_t erts_max_gen_gcs;
+
+extern int erts_disable_tolerant_timeofday;
+
+#ifdef HYBRID
+
+/* Message Area heap pointers */
+extern Eterm *global_heap; /* Heap start */
+extern Eterm *global_hend; /* Heap end */
+extern Eterm *global_htop; /* Heap top (heap pointer) */
+extern Eterm *global_saved_htop; /* Saved heap top (heap pointer) */
+extern Uint global_heap_sz; /* Heap size, in words */
+extern Eterm *global_old_heap; /* Old generation */
+extern Eterm *global_old_hend;
+extern ErlOffHeap erts_global_offheap; /* Global MSO (OffHeap) list */
+
+extern Uint16 global_gen_gcs;
+extern Uint16 global_max_gen_gcs;
+extern Uint global_gc_flags;
+
+#ifdef INCREMENTAL
+#define ACTIVATE(p)
+#define DEACTIVATE(p)
+#define IS_ACTIVE(p) 1
+
+#define INC_ACTIVATE(p) do { \
+ if ((p)->active) { \
+ if ((p)->active_next != NULL) { \
+ (p)->active_next->active_prev = (p)->active_prev; \
+ if ((p)->active_prev) { \
+ (p)->active_prev->active_next = (p)->active_next; \
+ } else { \
+ inc_active_proc = (p)->active_next; \
+ } \
+ inc_active_last->active_next = (p); \
+ (p)->active_next = NULL; \
+ (p)->active_prev = inc_active_last; \
+ inc_active_last = (p); \
+ } \
+ } else { \
+ (p)->active_next = NULL; \
+ (p)->active_prev = inc_active_last; \
+ if (inc_active_last) { \
+ inc_active_last->active_next = (p); \
+ } else { \
+ inc_active_proc = (p); \
+ } \
+ inc_active_last = (p); \
+ (p)->active = 1; \
+ } \
+} while(0);
+
+#define INC_DEACTIVATE(p) do { \
+ ASSERT((p)->active == 1); \
+ if ((p)->active_next == NULL) { \
+ inc_active_last = (p)->active_prev; \
+ } else { \
+ (p)->active_next->active_prev = (p)->active_prev; \
+ } \
+ if ((p)->active_prev == NULL) { \
+ inc_active_proc = (p)->active_next; \
+ } else { \
+ (p)->active_prev->active_next = (p)->active_next; \
+ } \
+ (p)->active = 0; \
+} while(0);
+
+#define INC_IS_ACTIVE(p) ((p)->active != 0)
+
+#else
+extern Eterm *global_old_htop;
+extern Eterm *global_high_water;
+#define ACTIVATE(p) (p)->active = 1;
+#define DEACTIVATE(p) (p)->active = 0;
+#define IS_ACTIVE(p) ((p)->active != 0)
+#define INC_ACTIVATE(p)
+#define INC_IS_ACTIVE(p) 1
+#endif /* INCREMENTAL */
+
+#else
+# define ACTIVATE(p)
+# define DEACTIVATE(p)
+# define IS_ACTIVE(p) 1
+# define INC_ACTIVATE(p)
+#endif /* HYBRID */
+
+#ifdef HYBRID
+extern Uint global_heap_min_sz;
+#endif
+
+extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */
+extern int stackdump_on_exit;
+
+/*
+ * Here is an implementation of a lightweiht stack.
+ *
+ * Use it like this:
+ *
+ * DECLARE_ESTACK(Stack) (At the start of a block)
+ * ...
+ * ESTACK_PUSH(Stack, Term)
+ * ...
+ * if (ESTACK_ISEMPTY(Stack)) {
+ * Stack is empty
+ * } else {
+ * Term = ESTACK_POP(Stack);
+ * Process popped Term here
+ * }
+ * ...
+ * DESTROY_ESTACK(Stack)
+ */
+
+
+void erl_grow_stack(Eterm** start, Eterm** sp, Eterm** end);
+#define ESTK_CONCAT(a,b) a##b
+#define ESTK_SUBSCRIPT(s,i) *((Eterm *)((byte *)ESTK_CONCAT(s,_start) + (i)))
+#define DEF_ESTACK_SIZE (16)
+
+#define DECLARE_ESTACK(s) \
+ Eterm ESTK_CONCAT(s,_default_stack)[DEF_ESTACK_SIZE]; \
+ Eterm* ESTK_CONCAT(s,_start) = ESTK_CONCAT(s,_default_stack); \
+ Eterm* ESTK_CONCAT(s,_sp) = ESTK_CONCAT(s,_start); \
+ Eterm* ESTK_CONCAT(s,_end) = ESTK_CONCAT(s,_start) + DEF_ESTACK_SIZE
+
+#define DESTROY_ESTACK(s) \
+do { \
+ if (ESTK_CONCAT(s,_start) != ESTK_CONCAT(s,_default_stack)) { \
+ erts_free(ERTS_ALC_T_ESTACK, ESTK_CONCAT(s,_start)); \
+ } \
+} while(0)
+
+#define ESTACK_PUSH(s, x) \
+do { \
+ if (ESTK_CONCAT(s,_sp) == ESTK_CONCAT(s,_end)) { \
+ erl_grow_stack(&ESTK_CONCAT(s,_start), &ESTK_CONCAT(s,_sp), \
+ &ESTK_CONCAT(s,_end)); \
+ } \
+ *ESTK_CONCAT(s,_sp)++ = (x); \
+} while(0)
+
+#define ESTACK_PUSH2(s, x, y) \
+do { \
+ if (ESTK_CONCAT(s,_sp) > ESTK_CONCAT(s,_end) - 2) { \
+ erl_grow_stack(&ESTK_CONCAT(s,_start), &ESTK_CONCAT(s,_sp), \
+ &ESTK_CONCAT(s,_end)); \
+ } \
+ *ESTK_CONCAT(s,_sp)++ = (x); \
+ *ESTK_CONCAT(s,_sp)++ = (y); \
+} while(0)
+
+#define ESTACK_PUSH3(s, x, y, z) \
+do { \
+ if (ESTK_CONCAT(s,_sp) > ESTK_CONCAT(s,_end) - 3) { \
+ erl_grow_stack(&ESTK_CONCAT(s,_start), &ESTK_CONCAT(s,_sp), \
+ &ESTK_CONCAT(s,_end)); \
+ } \
+ *ESTK_CONCAT(s,_sp)++ = (x); \
+ *ESTK_CONCAT(s,_sp)++ = (y); \
+ *ESTK_CONCAT(s,_sp)++ = (z); \
+} while(0)
+
+#define ESTACK_COUNT(s) (ESTK_CONCAT(s,_sp) - ESTK_CONCAT(s,_start))
+
+#define ESTACK_ISEMPTY(s) (ESTK_CONCAT(s,_sp) == ESTK_CONCAT(s,_start))
+#define ESTACK_POP(s) (*(--ESTK_CONCAT(s,_sp)))
+
+
+/* port status flags */
+
+#define ERTS_PORT_SFLG_CONNECTED ((Uint32) (1 << 0))
+/* Port have begun exiting */
+#define ERTS_PORT_SFLG_EXITING ((Uint32) (1 << 1))
+/* Distribution port */
+#define ERTS_PORT_SFLG_DISTRIBUTION ((Uint32) (1 << 2))
+#define ERTS_PORT_SFLG_BINARY_IO ((Uint32) (1 << 3))
+#define ERTS_PORT_SFLG_SOFT_EOF ((Uint32) (1 << 4))
+/* Flow control */
+#define ERTS_PORT_SFLG_PORT_BUSY ((Uint32) (1 << 5))
+/* Port is closing (no i/o accepted) */
+#define ERTS_PORT_SFLG_CLOSING ((Uint32) (1 << 6))
+/* Send a closed message when terminating */
+#define ERTS_PORT_SFLG_SEND_CLOSED ((Uint32) (1 << 7))
+/* Line orinted io on port */
+#define ERTS_PORT_SFLG_LINEBUF_IO ((Uint32) (1 << 8))
+/* Immortal port (only certain system ports) */
+#define ERTS_PORT_SFLG_IMMORTAL ((Uint32) (1 << 9))
+#define ERTS_PORT_SFLG_FREE ((Uint32) (1 << 10))
+#define ERTS_PORT_SFLG_FREE_SCHEDULED ((Uint32) (1 << 11))
+#define ERTS_PORT_SFLG_INITIALIZING ((Uint32) (1 << 12))
+/* Port uses port specific locking (opposed to driver specific locking) */
+#define ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK ((Uint32) (1 << 13))
+#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 14))
+#ifdef DEBUG
+/* Only debug: make sure all flags aren't cleared unintentionally */
+#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31))
+#endif
+
+/* Combinations of port status flags */
+#define ERTS_PORT_SFLGS_DEAD \
+ (ERTS_PORT_SFLG_FREE \
+ | ERTS_PORT_SFLG_FREE_SCHEDULED \
+ | ERTS_PORT_SFLG_INITIALIZING)
+#define ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
+ (ERTS_PORT_SFLGS_DEAD | ERTS_PORT_SFLG_INVALID)
+#define ERTS_PORT_SFLGS_INVALID_LOOKUP \
+ (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
+ | ERTS_PORT_SFLG_CLOSING)
+#define ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP \
+ (ERTS_PORT_SFLGS_INVALID_LOOKUP \
+ | ERTS_PORT_SFLG_PORT_BUSY \
+ | ERTS_PORT_SFLG_DISTRIBUTION)
+
+/* binary.c */
+
+void erts_emasculate_writable_binary(ProcBin* pb);
+Eterm erts_new_heap_binary(Process *p, byte *buf, int len, byte** datap);
+Eterm erts_new_mso_binary(Process*, byte*, int);
+Eterm new_binary(Process*, byte*, int);
+Eterm erts_realloc_binary(Eterm bin, size_t size);
+void erts_cleanup_mso(ProcBin* pb);
+
+/* erl_bif_info.c */
+
+void erts_bif_info_init(void);
+
+/* bif.c */
+Eterm erts_make_ref(Process *);
+Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]);
+void erts_queue_monitor_message(Process *,
+ ErtsProcLocks*,
+ Eterm,
+ Eterm,
+ Eterm,
+ Eterm);
+void erts_init_bif(void);
+
+/* erl_bif_port.c */
+
+/* erl_bif_trace.c */
+void erts_system_monitor_clear(Process *c_p);
+void erts_system_profile_clear(Process *c_p);
+
+/* beam_load.c */
+int erts_load_module(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm* mod, byte* code, int size);
+void init_load(void);
+Eterm* find_function_from_pc(Eterm* pc);
+Eterm erts_module_info_0(Process* p, Eterm module);
+Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
+Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
+
+/* break.c */
+void init_break_handler(void);
+void erts_set_ignore_break(void);
+void erts_replace_intr(void);
+void process_info(int, void *);
+void print_process_info(int, void *, Process*);
+void info(int, void *);
+void loaded(int, void *);
+
+/* config.c */
+
+__decl_noreturn void __noreturn erl_exit(int n, char*, ...);
+__decl_noreturn void __noreturn erl_exit0(char *, int, int n, char*, ...);
+void erl_error(char*, va_list);
+
+#define ERL_EXIT0(n,f) erl_exit0(__FILE__, __LINE__, n, f)
+#define ERL_EXIT1(n,f,a) erl_exit0(__FILE__, __LINE__, n, f, a)
+#define ERL_EXIT2(n,f,a,b) erl_exit0(__FILE__, __LINE__, n, f, a, b)
+#define ERL_EXIT3(n,f,a,b,c) erl_exit0(__FILE__, __LINE__, n, f, a, b, c)
+
+/* copy.c */
+void init_copy(void);
+Eterm copy_object(Eterm, Process*);
+Uint size_object(Eterm);
+Eterm copy_struct(Eterm, Uint, Eterm**, ErlOffHeap*);
+Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*);
+
+#ifdef HYBRID
+#define RRMA_DEFAULT_SIZE 256
+#define RRMA_STORE(p,ptr,src) do { \
+ ASSERT((p)->rrma != NULL); \
+ ASSERT((p)->rrsrc != NULL); \
+ (p)->rrma[(p)->nrr] = (ptr); \
+ (p)->rrsrc[(p)->nrr++] = (src); \
+ if ((p)->nrr == (p)->rrsz) \
+ { \
+ (p)->rrsz *= 2; \
+ (p)->rrma = (Eterm *) erts_realloc(ERTS_ALC_T_ROOTSET, \
+ (void*)(p)->rrma, \
+ sizeof(Eterm) * (p)->rrsz); \
+ (p)->rrsrc = (Eterm **) erts_realloc(ERTS_ALC_T_ROOTSET, \
+ (void*)(p)->rrsrc, \
+ sizeof(Eterm) * (p)->rrsz); \
+ } \
+} while(0)
+
+/* Note that RRMA_REMOVE decreases the given index after deletion.
+ * This is done so that a loop with an increasing index can call
+ * remove without having to decrease the index to see the element
+ * placed in the hole after the deleted element.
+ */
+#define RRMA_REMOVE(p,index) do { \
+ p->rrsrc[index] = p->rrsrc[--p->nrr]; \
+ p->rrma[index--] = p->rrma[p->nrr]; \
+ } while(0);
+
+
+/* The MessageArea STACKs are used while copying messages to the
+ * message area.
+ */
+#define MA_STACK_EXTERNAL_DECLARE(type,_s_) \
+ typedef type ma_##_s_##_type; \
+ extern ma_##_s_##_type *ma_##_s_##_stack; \
+ extern Uint ma_##_s_##_top; \
+ extern Uint ma_##_s_##_size;
+
+#define MA_STACK_DECLARE(_s_) \
+ ma_##_s_##_type *ma_##_s_##_stack; Uint ma_##_s_##_top; Uint ma_##_s_##_size;
+
+#define MA_STACK_ALLOC(_s_) do { \
+ ma_##_s_##_top = 0; \
+ ma_##_s_##_size = 512; \
+ ma_##_s_##_stack = (ma_##_s_##_type*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \
+ sizeof(ma_##_s_##_type) * ma_##_s_##_size); \
+} while(0)
+
+
+#define MA_STACK_PUSH(_s_,val) do { \
+ ma_##_s_##_stack[ma_##_s_##_top++] = (val); \
+ if (ma_##_s_##_top == ma_##_s_##_size) \
+ { \
+ ma_##_s_##_size *= 2; \
+ ma_##_s_##_stack = \
+ (ma_##_s_##_type*) erts_realloc(ERTS_ALC_T_OBJECT_STACK, \
+ (void*)ma_##_s_##_stack, \
+ sizeof(ma_##_s_##_type) * ma_##_s_##_size); \
+ } \
+} while(0)
+
+#define MA_STACK_POP(_s_) (ma_##_s_##_top != 0 ? ma_##_s_##_stack[--ma_##_s_##_top] : 0)
+#define MA_STACK_TOP(_s_) (ma_##_s_##_stack[ma_##_s_##_top - 1])
+#define MA_STACK_UPDATE(_s_,offset,value) \
+ *(ma_##_s_##_stack[ma_##_s_##_top - 1] + (offset)) = (value)
+#define MA_STACK_SIZE(_s_) (ma_##_s_##_top)
+#define MA_STACK_ELM(_s_,i) ma_##_s_##_stack[i]
+
+MA_STACK_EXTERNAL_DECLARE(Eterm,src);
+MA_STACK_EXTERNAL_DECLARE(Eterm*,dst);
+MA_STACK_EXTERNAL_DECLARE(Uint,offset);
+
+
+#ifdef INCREMENTAL
+extern Eterm *ma_pending_stack;
+extern Uint ma_pending_top;
+extern Uint ma_pending_size;
+
+#define NO_COPY(obj) (IS_CONST(obj) || \
+ (((ptr_val(obj) >= global_heap) && \
+ (ptr_val(obj) < global_htop)) || \
+ ((ptr_val(obj) >= inc_fromspc) && \
+ (ptr_val(obj) < inc_fromend)) || \
+ ((ptr_val(obj) >= global_old_heap) && \
+ (ptr_val(obj) < global_old_hend))))
+
+#else
+
+#define NO_COPY(obj) (IS_CONST(obj) || \
+ (((ptr_val(obj) >= global_heap) && \
+ (ptr_val(obj) < global_htop)) || \
+ ((ptr_val(obj) >= global_old_heap) && \
+ (ptr_val(obj) < global_old_hend))))
+
+#endif /* INCREMENTAL */
+
+#define LAZY_COPY(from,obj) do { \
+ if (!NO_COPY(obj)) { \
+ BM_LAZY_COPY_START; \
+ BM_COUNT(messages_copied); \
+ obj = copy_struct_lazy(from,obj,0); \
+ BM_LAZY_COPY_STOP; \
+ } \
+} while(0)
+
+Eterm copy_struct_lazy(Process*, Eterm, Uint);
+
+#endif /* HYBRID */
+
+/* Utilities */
+extern void erts_delete_nodes_monitors(Process *, ErtsProcLocks);
+extern Eterm erts_monitor_nodes(Process *, Eterm, Eterm);
+extern Eterm erts_processes_monitoring_nodes(Process *);
+extern int erts_do_net_exits(DistEntry*, Eterm);
+extern int distribution_info(int, void *);
+extern int is_node_name_atom(Eterm a);
+
+extern int erts_net_message(Port *, DistEntry *, byte *, int, byte *, int);
+
+extern void init_dist(void);
+extern int stop_dist(void);
+
+void erl_progressf(char* format, ...);
+
+#ifdef MESS_DEBUG
+void print_pass_through(int, byte*, int);
+#endif
+
+/* beam_emu.c */
+int catchlevel(Process*);
+void init_emulator(_VOID_);
+void process_main(void);
+Eterm build_stacktrace(Process* c_p, Eterm exc);
+Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
+
+/* erl_init.c */
+
+typedef struct {
+ Eterm delay_time;
+ int context_reds;
+ int input_reds;
+} ErtsModifiedTimings;
+
+extern Export *erts_delay_trap;
+extern int erts_modified_timing_level;
+extern ErtsModifiedTimings erts_modified_timings[];
+#define ERTS_USE_MODIFIED_TIMING() \
+ (erts_modified_timing_level >= 0)
+#define ERTS_MODIFIED_TIMING_DELAY \
+ (erts_modified_timings[erts_modified_timing_level].delay_time)
+#define ERTS_MODIFIED_TIMING_CONTEXT_REDS \
+ (erts_modified_timings[erts_modified_timing_level].context_reds)
+#define ERTS_MODIFIED_TIMING_INPUT_REDS \
+ (erts_modified_timings[erts_modified_timing_level].input_reds)
+
+extern Eterm erts_error_logger_warnings;
+extern int erts_initialized;
+extern int erts_compat_rel;
+extern int erts_use_sender_punish;
+void erts_short_init(void);
+void erl_start(int, char**);
+void erts_usage(void);
+Eterm erts_preloaded(Process* p);
+/* erl_md5.c */
+
+typedef struct {
+ Uint32 state[4]; /* state (ABCD) */
+ Uint32 count[2]; /* number of bits, modulo 2^64 (lsb first) */
+ unsigned char buffer[64]; /* input buffer */
+} MD5_CTX;
+
+void MD5Init(MD5_CTX *);
+void MD5Update(MD5_CTX *, unsigned char *, unsigned int);
+void MD5Final(unsigned char [16], MD5_CTX *);
+
+/* ggc.c */
+
+
+typedef struct {
+ Uint garbage_collections;
+ Uint reclaimed;
+} ErtsGCInfo;
+
+void erts_gc_info(ErtsGCInfo *gcip);
+void erts_init_gc(void);
+int erts_garbage_collect(Process*, int, Eterm*, int);
+void erts_garbage_collect_hibernate(Process* p);
+Eterm erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity);
+void erts_garbage_collect_literals(Process* p, Eterm* literals, Uint lit_size);
+Uint erts_next_heap_size(Uint, Uint);
+Eterm erts_heap_sizes(Process* p);
+
+void erts_offset_off_heap(ErlOffHeap *, Sint, Eterm*, Eterm*);
+void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*);
+void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
+
+#ifdef HYBRID
+int erts_global_garbage_collect(Process*, int, Eterm*, int);
+#endif
+
+/* io.c */
+
+struct erl_drv_port_data_lock {
+ erts_mtx_t mtx;
+ erts_atomic_t refc;
+};
+
+typedef struct {
+ char *name;
+ char *driver_name;
+} ErtsPortNames;
+
+#define ERTS_SPAWN_DRIVER 1
+#define ERTS_SPAWN_EXECUTABLE 2
+#define ERTS_SPAWN_ANY (ERTS_SPAWN_DRIVER | ERTS_SPAWN_EXECUTABLE)
+
+int erts_add_driver_entry(ErlDrvEntry *drv, DE_Handle *handle, int driver_list_locked);
+void erts_destroy_driver(erts_driver_t *drv);
+void erts_wake_process_later(Port*, Process*);
+int erts_open_driver(erts_driver_t*, Eterm, char*, SysDriverOpts*, int *);
+int erts_is_port_ioq_empty(Port *);
+void erts_terminate_port(Port *);
+void close_port(Eterm);
+void init_io(void);
+void cleanup_io(void);
+void erts_do_exit_port(Port *, Eterm, Eterm);
+void erts_port_command(Process *, Eterm, Port *, Eterm);
+Eterm erts_port_control(Process*, Port*, Uint, Eterm);
+int erts_write_to_port(Eterm caller_id, Port *p, Eterm list);
+void print_port_info(int, void *, int);
+void erts_raw_port_command(Port*, byte*, Uint);
+void driver_report_exit(int, int);
+LineBuf* allocate_linebuf(int);
+int async_ready(Port *, void*);
+Sint erts_test_next_port(int, Uint);
+ErtsPortNames *erts_get_port_names(Eterm);
+void erts_free_port_names(ErtsPortNames *);
+Uint erts_port_ioq_size(Port *pp);
+void erts_stale_drv_select(Eterm, ErlDrvEvent, int, int);
+void erts_port_cleanup(Port *);
+void erts_fire_port_monitor(Port *prt, Eterm ref);
+#ifdef ERTS_SMP
+void erts_smp_xports_unlock(Port *);
+#endif
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int erts_lc_is_port_locked(Port *);
+#endif
+
+ERTS_GLB_INLINE void erts_smp_port_state_lock(Port*);
+ERTS_GLB_INLINE void erts_smp_port_state_unlock(Port*);
+
+ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt);
+ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt);
+ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_smp_port_state_lock(Port* prt)
+{
+#ifdef ERTS_SMP
+ erts_smp_spin_lock(&prt->state_lck);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_port_state_unlock(Port *prt)
+{
+#ifdef ERTS_SMP
+ erts_smp_spin_unlock(&prt->state_lck);
+#endif
+}
+
+
+ERTS_GLB_INLINE int
+erts_smp_port_trylock(Port *prt)
+{
+#ifdef ERTS_SMP
+ int res;
+
+ ASSERT(erts_smp_atomic_read(&prt->refc) > 0);
+ erts_smp_atomic_inc(&prt->refc);
+ res = erts_smp_mtx_trylock(prt->lock);
+ if (res == EBUSY) {
+ erts_smp_atomic_dec(&prt->refc);
+ }
+
+ return res;
+#else /* !ERTS_SMP */
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_port_lock(Port *prt)
+{
+#ifdef ERTS_SMP
+ ASSERT(erts_smp_atomic_read(&prt->refc) > 0);
+ erts_smp_atomic_inc(&prt->refc);
+ erts_smp_mtx_lock(prt->lock);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_port_unlock(Port *prt)
+{
+#ifdef ERTS_SMP
+ long refc;
+ refc = erts_smp_atomic_dectest(&prt->refc);
+ ASSERT(refc >= 0);
+ if (refc == 0)
+ erts_port_cleanup(prt);
+ else
+ erts_smp_mtx_unlock(prt->lock);
+#endif
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+#define ERTS_INVALID_PORT_OPT(PP, ID, FLGS) \
+ (!(PP) || ((PP)->status & (FLGS)) || (PP)->id != (ID))
+
+/* port lookup */
+
+#define INVALID_PORT(PP, ID) \
+ ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_LOOKUP)
+
+/* Invalidate trace port if anything suspicious, for instance
+ * that the port is a distribution port or it is busy.
+ */
+#define INVALID_TRACER_PORT(PP, ID) \
+ ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)
+
+#define ERTS_PORT_SCHED_ID(P, ID) \
+ ((Uint) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (ID)))
+
+#ifdef ERTS_SMP
+Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks);
+#endif
+
+#define erts_id2port(ID, P, PL) \
+ erts_id2port_sflgs((ID), (P), (PL), ERTS_PORT_SFLGS_INVALID_LOOKUP)
+
+ERTS_GLB_INLINE Port*erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
+ERTS_GLB_INLINE void erts_port_release(Port *);
+ERTS_GLB_INLINE Port*erts_drvport2port(ErlDrvPort);
+ERTS_GLB_INLINE Port*erts_drvportid2port(Eterm);
+ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm id);
+ERTS_GLB_INLINE int erts_is_port_alive(Eterm id);
+ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm id);
+ERTS_GLB_INLINE void erts_port_status_bandor_set(Port *, Uint32, Uint32);
+ERTS_GLB_INLINE void erts_port_status_band_set(Port *, Uint32);
+ERTS_GLB_INLINE void erts_port_status_bor_set(Port *, Uint32);
+ERTS_GLB_INLINE void erts_port_status_set(Port *, Uint32);
+ERTS_GLB_INLINE Uint32 erts_port_status_get(Port *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Port*
+erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 sflgs)
+{
+#ifdef ERTS_SMP
+ int no_proc_locks = !c_p || !c_p_locks;
+#endif
+ Port *prt;
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ prt = &erts_port[internal_port_index(id)];
+
+ erts_smp_port_state_lock(prt);
+ if (ERTS_INVALID_PORT_OPT(prt, id, sflgs)) {
+ erts_smp_port_state_unlock(prt);
+ prt = NULL;
+ }
+#ifdef ERTS_SMP
+ else {
+ erts_smp_atomic_inc(&prt->refc);
+ erts_smp_port_state_unlock(prt);
+
+ if (no_proc_locks)
+ erts_smp_mtx_lock(prt->lock);
+ else if (erts_smp_mtx_trylock(prt->lock) == EBUSY) {
+ /* Unlock process locks, and acquire locks in lock order... */
+ erts_smp_proc_unlock(c_p, c_p_locks);
+ erts_smp_mtx_lock(prt->lock);
+ erts_smp_proc_lock(c_p, c_p_locks);
+ }
+
+ /* The id may not have changed... */
+ ERTS_SMP_LC_ASSERT(prt->id == id);
+ /* ... but status may have... */
+ if (prt->status & sflgs) {
+ erts_smp_port_unlock(prt); /* Also decrements refc... */
+ prt = NULL;
+ }
+ }
+#endif
+
+ return prt;
+}
+
+ERTS_GLB_INLINE void
+erts_port_release(Port *prt)
+{
+#ifdef ERTS_SMP
+ erts_smp_port_unlock(prt);
+#else
+ if (prt->status & ERTS_PORT_SFLGS_DEAD)
+ erts_port_cleanup(prt);
+#endif
+}
+
+ERTS_GLB_INLINE Port*
+erts_drvport2port(ErlDrvPort drvport)
+{
+ int ix = (int) drvport;
+ if (ix < 0 || erts_max_ports <= ix)
+ return NULL;
+ if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ return NULL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix]));
+ return &erts_port[ix];
+}
+
+ERTS_GLB_INLINE Port*
+erts_drvportid2port(Eterm id)
+{
+ int ix;
+ if (is_not_internal_port(id))
+ return NULL;
+ ix = (int) internal_port_index(id);
+ if (erts_max_ports <= ix)
+ return NULL;
+ if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ return NULL;
+ if (erts_port[ix].id != id)
+ return NULL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix]));
+ return &erts_port[ix];
+}
+
+ERTS_GLB_INLINE Uint32
+erts_portid2status(Eterm id)
+{
+ if (is_not_internal_port(id))
+ return ERTS_PORT_SFLG_INVALID;
+ else {
+ Uint32 status;
+ int ix = internal_port_index(id);
+ if (erts_max_ports <= ix)
+ return ERTS_PORT_SFLG_INVALID;
+ erts_smp_port_state_lock(&erts_port[ix]);
+ if (erts_port[ix].id == id)
+ status = erts_port[ix].status;
+ else
+ status = ERTS_PORT_SFLG_INVALID;
+ erts_smp_port_state_unlock(&erts_port[ix]);
+ return status;
+ }
+}
+
+ERTS_GLB_INLINE int
+erts_is_port_alive(Eterm id)
+{
+ return !(erts_portid2status(id) & (ERTS_PORT_SFLG_INVALID
+ | ERTS_PORT_SFLGS_DEAD));
+}
+
+ERTS_GLB_INLINE int
+erts_is_valid_tracer_port(Eterm id)
+{
+ return !(erts_portid2status(id) & ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
+}
+
+ERTS_GLB_INLINE void erts_port_status_bandor_set(Port *prt,
+ Uint32 band_status,
+ Uint32 bor_status)
+{
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ erts_smp_port_state_lock(prt);
+ prt->status &= band_status;
+ prt->status |= bor_status;
+ erts_smp_port_state_unlock(prt);
+}
+
+ERTS_GLB_INLINE void erts_port_status_band_set(Port *prt, Uint32 status)
+{
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ erts_smp_port_state_lock(prt);
+ prt->status &= status;
+ erts_smp_port_state_unlock(prt);
+}
+
+ERTS_GLB_INLINE void erts_port_status_bor_set(Port *prt, Uint32 status)
+{
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ erts_smp_port_state_lock(prt);
+ prt->status |= status;
+ erts_smp_port_state_unlock(prt);
+}
+
+ERTS_GLB_INLINE void erts_port_status_set(Port *prt, Uint32 status)
+{
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ erts_smp_port_state_lock(prt);
+ prt->status = status;
+ erts_smp_port_state_unlock(prt);
+}
+
+ERTS_GLB_INLINE Uint32 erts_port_status_get(Port *prt)
+{
+ Uint32 res;
+ erts_smp_port_state_lock(prt);
+ res = prt->status;
+ erts_smp_port_state_unlock(prt);
+ return res;
+}
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/* erl_drv_thread.c */
+void erl_drv_thr_init(void);
+
+/* time.c */
+
+ERTS_GLB_INLINE long do_time_read_and_reset(void);
+#ifdef ERTS_TIMER_THREAD
+ERTS_GLB_INLINE int next_time(void);
+ERTS_GLB_INLINE void bump_timer(long);
+#else
+int next_time(void);
+void bump_timer(long);
+extern erts_smp_atomic_t do_time; /* set at clock interrupt */
+ERTS_GLB_INLINE void do_time_add(long);
+#endif
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ERTS_TIMER_THREAD
+ERTS_GLB_INLINE long do_time_read_and_reset(void) { return 0; }
+ERTS_GLB_INLINE int next_time(void) { return -1; }
+ERTS_GLB_INLINE void bump_timer(long ignore) { }
+#else
+ERTS_GLB_INLINE long do_time_read_and_reset(void)
+{
+ return erts_smp_atomic_xchg(&do_time, 0L);
+}
+ERTS_GLB_INLINE void do_time_add(long elapsed)
+{
+ erts_smp_atomic_add(&do_time, elapsed);
+}
+#endif
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+void init_time(void);
+void erl_set_timer(ErlTimer*, ErlTimeoutProc, ErlCancelProc, void*, Uint);
+void erl_cancel_timer(ErlTimer*);
+Uint time_left(ErlTimer *);
+
+Uint erts_timer_wheel_memory_size(void);
+
+#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME))
+# ifndef HAVE_ERTS_NOW_CPU
+# define HAVE_ERTS_NOW_CPU
+# ifdef HAVE_GETHRVTIME
+# define erts_start_now_cpu() sys_start_hrvtime()
+# define erts_stop_now_cpu() sys_stop_hrvtime()
+# endif
+# endif
+void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec);
+#endif
+
+void erts_get_timeval(SysTimeval *tv);
+long erts_get_time(void);
+
+extern SysTimeval erts_first_emu_time;
+
+void erts_get_emu_time(SysTimeval *);
+
+ERTS_GLB_INLINE int erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p)
+{
+ if (t1p->tv_sec == t2p->tv_sec) {
+ if (t1p->tv_usec < t2p->tv_usec)
+ return -1;
+ else if (t1p->tv_usec > t2p->tv_usec)
+ return 1;
+ return 0;
+ }
+ return t1p->tv_sec < t2p->tv_sec ? -1 : 1;
+}
+
+#endif
+
+#ifdef DEBUG
+void p_slpq(_VOID_);
+#endif
+
+/* utils.c */
+
+void erts_cleanup_offheap(ErlOffHeap *offheap);
+void erts_cleanup_externals(ExternalThing *);
+
+Uint erts_fit_in_bits(Uint);
+int list_length(Eterm);
+Export* erts_find_function(Eterm, Eterm, unsigned int);
+int erts_is_builtin(Eterm, Eterm, int);
+Uint32 make_broken_hash(Eterm);
+Uint32 block_hash(byte *, unsigned, Uint32);
+Uint32 make_hash2(Eterm);
+Uint32 make_hash(Eterm);
+
+
+Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str);
+Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
+Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
+Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
+Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
+Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
+Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
+Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
+#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
+Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
+Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm terms1[], Uint terms2[]);
+Eterm
+erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm atoms[], Uint uints[]);
+Eterm
+erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
+ Eterm atoms[], Uint uints1[], Uint uints2[]);
+
+Eterm store_external_or_ref_in_proc_(Process *, Eterm);
+Eterm store_external_or_ref_(Uint **, ExternalThing **, Eterm);
+
+#define NC_HEAP_SIZE(NC) \
+ (ASSERT_EXPR(is_node_container((NC))), \
+ IS_CONST((NC)) ? 0 : (thing_arityval(*boxed_val((NC))) + 1))
+#define STORE_NC(Hpp, ETpp, NC) \
+ (ASSERT_EXPR(is_node_container((NC))), \
+ IS_CONST((NC)) ? (NC) : store_external_or_ref_((Hpp), (ETpp), (NC)))
+#define STORE_NC_IN_PROC(Pp, NC) \
+ (ASSERT_EXPR(is_node_container((NC))), \
+ IS_CONST((NC)) ? (NC) : store_external_or_ref_in_proc_((Pp), (NC)))
+
+void erts_init_utils(void);
+void erts_init_utils_mem(void);
+
+erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint);
+void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *);
+
+int eq(Eterm, Eterm);
+#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
+
+Sint cmp(Eterm, Eterm);
+#define cmp_lt(a,b) (cmp((a),(b)) < 0)
+#define cmp_le(a,b) (cmp((a),(b)) <= 0)
+#define cmp_eq(a,b) (cmp((a),(b)) == 0)
+#define cmp_ne(a,b) (cmp((a),(b)) != 0)
+#define cmp_ge(a,b) (cmp((a),(b)) >= 0)
+#define cmp_gt(a,b) (cmp((a),(b)) > 0)
+
+#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
+#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
+#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
+#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
+
+int term_to_Uint(Eterm term, Uint *up);
+
+#ifdef HAVE_ERTS_NOW_CPU
+extern int erts_cpu_timestamp;
+#endif
+/* erl_bif_chksum.c */
+void erts_init_bif_chksum(void);
+/* erl_bif_re.c */
+void erts_init_bif_re(void);
+Sint erts_re_set_loop_limit(Sint limit);
+/* erl_unicode.c */
+void erts_init_unicode(void);
+Sint erts_unicode_set_loop_limit(Sint limit);
+/* erl_trace.c */
+void erts_init_trace(void);
+void erts_trace_check_exiting(Eterm exiting);
+Eterm erts_set_system_seq_tracer(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ Eterm new);
+Eterm erts_get_system_seq_tracer(void);
+void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
+void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
+void erts_set_system_monitor(Eterm monitor);
+Eterm erts_get_system_monitor(void);
+
+#ifdef ERTS_SMP
+void erts_check_my_tracer_proc(Process *);
+void erts_block_sys_msg_dispatcher(void);
+void erts_release_sys_msg_dispatcher(void);
+void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
+ Eterm,
+ Eterm,
+ ErlHeapFragment *));
+void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
+#endif
+
+void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
+void trace_send(Process*, Eterm, Eterm);
+void trace_receive(Process*, Eterm);
+Uint32 erts_call_trace(Process *p, Eterm mfa[], Binary *match_spec, Eterm* args,
+ int local, Eterm *tracer_pid);
+void erts_trace_return(Process* p, Eterm* fi, Eterm retval, Eterm *tracer_pid);
+void erts_trace_exception(Process* p, Eterm mfa[], Eterm class, Eterm value,
+ Eterm *tracer);
+void erts_trace_return_to(Process *p, Uint *pc);
+void trace_sched(Process*, Eterm);
+void trace_proc(Process*, Process*, Eterm, Eterm);
+void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args);
+void save_calls(Process *p, Export *);
+void trace_gc(Process *p, Eterm what);
+/* port tracing */
+void trace_virtual_sched(Process*, Eterm);
+void trace_sched_ports(Port *pp, Eterm);
+void trace_sched_ports_where(Port *pp, Eterm, Eterm);
+void trace_port(Port *, Eterm what, Eterm data);
+void trace_port_open(Port *, Eterm calling_pid, Eterm drv_name);
+
+/* system_profile */
+void erts_set_system_profile(Eterm profile);
+Eterm erts_get_system_profile(void);
+void profile_scheduler(Eterm scheduler_id, Eterm);
+void profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint Ms, Uint s, Uint us);
+void profile_runnable_proc(Process* p, Eterm status);
+void profile_runnable_port(Port* p, Eterm status);
+void erts_system_profile_setup_active_schedulers(void);
+
+/* system_monitor */
+void monitor_long_gc(Process *p, Uint time);
+void monitor_large_heap(Process *p);
+void monitor_generic(Process *p, Eterm type, Eterm spec);
+Uint erts_trace_flag2bit(Eterm flag);
+int erts_trace_flags(Eterm List,
+ Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
+Eterm erts_bif_trace(int bif_index, Process* p,
+ Eterm arg1, Eterm arg2, Eterm arg3, Uint *I);
+
+#ifdef ERTS_SMP
+void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
+#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \
+do { \
+ if ((ESDP)->pending_trace_msgs) \
+ erts_send_pending_trace_msgs((ESDP)); \
+} while (0)
+#else
+#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP)
+#endif
+
+void bin_write(int, void*, byte*, int);
+int intlist_to_buf(Eterm, char*, int); /* most callers pass plain char*'s */
+
+struct Sint_buf {
+#ifdef ARCH_64
+ char s[22];
+#else
+ char s[12];
+#endif
+};
+char* Sint_to_buf(Sint, struct Sint_buf*);
+
+Eterm buf_to_intlist(Eterm**, char*, int, Eterm); /* most callers pass plain char*'s */
+int io_list_to_buf(Eterm, char*, int);
+int io_list_to_buf2(Eterm, char*, int);
+int io_list_len(Eterm);
+int is_string(Eterm);
+void erl_at_exit(FUNCTION(void,(*),(void*)), void*);
+Eterm collect_memory(Process *);
+void dump_memory_to_fd(int);
+int dump_memory_data(const char *);
+
+Eterm erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2);
+Eterm erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2);
+Eterm erts_mixed_times(Process* p, Eterm arg1, Eterm arg2);
+Eterm erts_mixed_div(Process* p, Eterm arg1, Eterm arg2);
+Eterm erts_int_div(Process* p, Eterm arg1, Eterm arg2);
+Eterm erts_int_rem(Process* p, Eterm arg1, Eterm arg2);
+Eterm erts_band(Process* p, Eterm arg1, Eterm arg2);
+Eterm erts_bor(Process* p, Eterm arg1, Eterm arg2);
+Eterm erts_bxor(Process* p, Eterm arg1, Eterm arg2);
+Eterm erts_bnot(Process* p, Eterm arg);
+
+Eterm erts_gc_mixed_plus(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_mixed_minus(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_mixed_times(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_mixed_div(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_int_div(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_int_rem(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_band(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_bor(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_bxor(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_bnot(Process* p, Eterm* reg, Uint live);
+
+Eterm erts_gc_length_1(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_size_1(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_bit_size_1(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_byte_size_1(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_abs_1(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_float_1(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live);
+Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live);
+
+Uint erts_current_reductions(Process* current, Process *p);
+
+int erts_print_system_version(int to, void *arg, Process *c_p);
+
+/*
+ * Interface to erl_init
+ */
+void erl_init(void);
+void erts_first_process(Eterm modname, void* code, unsigned size, int argc, char** argv);
+
+#define seq_trace_output(token, msg, type, receiver, process) \
+seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
+#define seq_trace_output_exit(token, msg, type, receiver, exitfrom) \
+seq_trace_output_generic((token), (msg), (type), (receiver), NULL, (exitfrom))
+void seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
+ Eterm receiver, Process *process, Eterm exitfrom);
+
+int seq_trace_update_send(Process *process);
+
+Eterm erts_seq_trace(Process *process,
+ Eterm atom_type, Eterm atom_true_or_false,
+ int build_result);
+
+struct trace_pattern_flags {
+ unsigned int breakpoint : 1; /* Set if any other is set */
+ unsigned int local : 1; /* Local call trace breakpoint */
+ unsigned int meta : 1; /* Metadata trace breakpoint */
+ unsigned int call_count : 1; /* Fast call count breakpoint */
+};
+extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
+int erts_set_trace_pattern(Eterm* mfa, int specified,
+ Binary* match_prog_set, Binary *meta_match_prog_set,
+ int on, struct trace_pattern_flags,
+ Eterm meta_tracer_pid);
+void
+erts_get_default_trace_pattern(int *trace_pattern_is_on,
+ Binary **match_spec,
+ Binary **meta_match_spec,
+ struct trace_pattern_flags *trace_pattern_flags,
+ Eterm *meta_tracer_pid);
+void erts_bif_trace_init(void);
+
+/*
+** Call_trace uses this API for the parameter matching functions
+*/
+ struct erl_heap_fragment* saved_program_buf;
+
+#define MatchSetRef(MPSP) \
+do { \
+ if ((MPSP) != NULL) { \
+ erts_refc_inc(&(MPSP)->refc, 1); \
+ } \
+} while (0)
+
+#define MatchSetUnref(MPSP) \
+do { \
+ if (((MPSP) != NULL) && erts_refc_dectest(&(MPSP)->refc, 0) <= 0) { \
+ erts_bin_free(MPSP); \
+ } \
+} while(0)
+
+#define MatchSetGetSource(MPSP) erts_match_set_get_source(MPSP)
+
+extern Binary *erts_match_set_compile(Process *p, Eterm matchexpr);
+Eterm erts_match_set_lint(Process *p, Eterm matchexpr);
+extern void erts_match_set_release_result(Process* p);
+extern Eterm erts_match_set_run(Process *p, Binary *mpsp,
+ Eterm *args, int num_args,
+ Uint32 *return_flags);
+extern Eterm erts_match_set_get_source(Binary *mpsp);
+extern void erts_match_prog_foreach_offheap(Binary *b,
+ void (*)(ErlOffHeap *, void *),
+ void *);
+
+#define MATCH_SET_RETURN_TRACE 0x1 /* return trace requested */
+#define MATCH_SET_RETURN_TO_TRACE 0x2 /* Misleading name, it is not actually
+ set by the match program, but by the
+ breakpoint functions */
+#define MATCH_SET_EXCEPTION_TRACE 0x4 /* exception trace requested */
+#define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE)
+/*
+ * Flag values when tracing bif
+ */
+#define BIF_TRACE_AS_LOCAL 0x1
+#define BIF_TRACE_AS_GLOBAL 0x2
+#define BIF_TRACE_AS_META 0x4
+
+extern erts_driver_t vanilla_driver;
+extern erts_driver_t spawn_driver;
+extern erts_driver_t fd_driver;
+
+/* Should maybe be placed in erl_message.h, but then we get an include mess. */
+
+ERTS_GLB_INLINE Eterm *
+erts_alloc_message_heap(Uint size,
+ ErlHeapFragment **bpp,
+ ErlOffHeap **ohpp,
+ Process *receiver,
+ ErtsProcLocks *receiver_locks);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+/*
+ * NOTE: erts_alloc_message_heap() releases msg q and status
+ * lock on receiver without ensuring that other locks are
+ * held. User is responsible to ensure that the receiver
+ * pointer cannot become invalid until after message has
+ * been passed. This is normal done either by increasing
+ * reference count on process (preferred) or by holding
+ * main or link lock over the whole message passing
+ * operation.
+ */
+
+ERTS_GLB_INLINE Eterm *
+erts_alloc_message_heap(Uint size,
+ ErlHeapFragment **bpp,
+ ErlOffHeap **ohpp,
+ Process *receiver,
+ ErtsProcLocks *receiver_locks)
+{
+ Eterm *hp;
+#ifdef ERTS_SMP
+ int locked_main = 0;
+ ErtsProcLocks ulocks = *receiver_locks & ERTS_PROC_LOCKS_MSG_SEND;
+#endif
+
+ if (size > (Uint) INT_MAX)
+ erl_exit(ERTS_ABORT_EXIT, "HUGE size (%bpu)\n", size);
+
+ if (
+#if defined(ERTS_SMP)
+ *receiver_locks & ERTS_PROC_LOCK_MAIN
+#else
+ 1
+#endif
+ ) {
+#ifdef ERTS_SMP
+ try_allocate_on_heap:
+#endif
+ if (ERTS_PROC_IS_EXITING(receiver)
+ || HEAP_LIMIT(receiver) - HEAP_TOP(receiver) <= size) {
+#ifdef ERTS_SMP
+ if (locked_main)
+ ulocks |= ERTS_PROC_LOCK_MAIN;
+#endif
+ goto allocate_in_mbuf;
+ }
+#ifdef ERTS_SMP
+ if (ulocks) {
+ erts_smp_proc_unlock(receiver, ulocks);
+ *receiver_locks &= ~ulocks;
+ }
+#endif
+ hp = HEAP_TOP(receiver);
+ HEAP_TOP(receiver) = hp + size;
+ *bpp = NULL;
+ *ohpp = &MSO(receiver);
+ }
+#ifdef ERTS_SMP
+ else if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MAIN) == 0) {
+ locked_main = 1;
+ *receiver_locks |= ERTS_PROC_LOCK_MAIN;
+ goto try_allocate_on_heap;
+ }
+#endif
+ else {
+ ErlHeapFragment *bp;
+ allocate_in_mbuf:
+#ifdef ERTS_SMP
+ if (ulocks) {
+ *receiver_locks &= ~ulocks;
+ erts_smp_proc_unlock(receiver, ulocks);
+ }
+#endif
+ bp = new_message_buffer(size);
+ hp = bp->mem;
+ *bpp = bp;
+ *ohpp = &bp->off_heap;
+ }
+
+ return hp;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif
diff --git a/erts/emulator/beam/hash.c b/erts/emulator/beam/hash.c
new file mode 100644
index 0000000000..afaf32f8ce
--- /dev/null
+++ b/erts/emulator/beam/hash.c
@@ -0,0 +1,407 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** General hash functions
+**
+*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "hash.h"
+
+/*
+** List of sizes (all are primes)
+*/
+static const int h_size_table[] = {
+ 2, 5, 11, 23, 47, 97, 197, 397, 797, /* double upto here */
+ 1201, 1597,
+ 2411, 3203,
+ 4813, 6421,
+ 9643, 12853,
+ 19289, 25717,
+ 51437,
+ 102877,
+ 205759,
+ 411527,
+ 823117,
+ 1646237,
+ 3292489,
+ 6584983,
+ 13169977,
+ 26339969,
+ 52679969,
+ -1
+};
+
+/*
+** Get info about hash
+**
+*/
+
+void hash_get_info(HashInfo *hi, Hash *h)
+{
+ int size = h->size;
+ int i;
+ int max_depth = 0;
+ int objects = 0;
+
+ for (i = 0; i < size; i++) {
+ int depth = 0;
+ HashBucket* b = h->bucket[i];
+
+ while (b != (HashBucket*) 0) {
+ objects++;
+ depth++;
+ b = b->next;
+ }
+ if (depth > max_depth)
+ max_depth = depth;
+ }
+
+ hi->name = h->name;
+ hi->size = h->size;
+ hi->used = h->used;
+ hi->objs = objects;
+ hi->depth = max_depth;
+}
+
+/*
+** Display info about hash
+**
+*/
+
+void hash_info(int to, void *arg, Hash* h)
+{
+ HashInfo hi;
+
+ hash_get_info(&hi, h);
+
+ erts_print(to, arg, "=hash_table:%s\n", hi.name);
+ erts_print(to, arg, "size: %d\n", hi.size);
+ erts_print(to, arg, "used: %d\n", hi.used);
+ erts_print(to, arg, "objs: %d\n", hi.objs);
+ erts_print(to, arg, "depth: %d\n", hi.depth);
+}
+
+
+/*
+ * Returns size of table in bytes. Stored objects not included.
+ */
+int
+hash_table_sz(Hash *h)
+{
+ int i;
+ for(i=0;h->name[i];i++);
+ i++;
+ return sizeof(Hash) + h->size*sizeof(HashBucket*) + i;
+}
+
+
+/*
+** init a pre allocated or static hash structure
+** and allocate buckets.
+*/
+Hash* hash_init(ErtsAlcType_t type, Hash* h, char* name, int size, HashFunctions fun)
+{
+ int sz;
+ int ix = 0;
+
+ h->type = type;
+
+ while (h_size_table[ix] != -1 && h_size_table[ix] < size)
+ ix++;
+ if (h_size_table[ix] == -1)
+ erl_exit(1, "panic: too large hash table size (%d)\n", size);
+
+ size = h_size_table[ix];
+ sz = size*sizeof(HashBucket*);
+
+ h->bucket = (HashBucket**) erts_alloc(h->type, sz);
+
+ sys_memzero(h->bucket, sz);
+ h->is_allocated = 0;
+ h->name = name;
+ h->fun = fun;
+ h->size = size;
+ h->size20percent = h->size/5;
+ h->size80percent = (4*h->size)/5;
+ h->ix = ix;
+ h->used = 0;
+ return h;
+}
+
+/*
+** Create a new hash table
+*/
+Hash* hash_new(ErtsAlcType_t type, char* name, int size, HashFunctions fun)
+{
+ Hash* h;
+
+ h = erts_alloc(type, sizeof(Hash));
+
+ h = hash_init(type, h, name, size, fun);
+ h->is_allocated = 1;
+ return h;
+}
+
+/*
+** Delete hash table and all objects
+*/
+void hash_delete(Hash* h)
+{
+ int old_size = h->size;
+ int i;
+
+ for (i = 0; i < old_size; i++) {
+ HashBucket* b = h->bucket[i];
+ while (b != (HashBucket*) 0) {
+ HashBucket* b_next = b->next;
+
+ h->fun.free((void*) b);
+ b = b_next;
+ }
+ }
+ erts_free(h->type, h->bucket);
+ if (h->is_allocated)
+ erts_free(h->type, (void*) h);
+}
+
+/*
+** Rehash all objects
+*/
+static void rehash(Hash* h, int grow)
+{
+ int sz;
+ int old_size = h->size;
+ HashBucket** new_bucket;
+ int i;
+
+ if (grow) {
+ if ((h_size_table[h->ix+1]) == -1)
+ return;
+ h->ix++;
+ }
+ else {
+ if (h->ix == 0)
+ return;
+ h->ix--;
+ }
+ h->size = h_size_table[h->ix];
+ h->size20percent = h->size/5;
+ h->size80percent = (4*h->size)/5;
+ sz = h->size*sizeof(HashBucket*);
+
+ new_bucket = (HashBucket **) erts_alloc(h->type, sz);
+ sys_memzero(new_bucket, sz);
+
+ h->used = 0;
+
+ for (i = 0; i < old_size; i++) {
+ HashBucket* b = h->bucket[i];
+ while (b != (HashBucket*) 0) {
+ HashBucket* b_next = b->next;
+ int ix = b->hvalue % h->size;
+ if (new_bucket[ix] == NULL)
+ h->used++;
+ b->next = new_bucket[ix];
+ new_bucket[ix] = b;
+ b = b_next;
+ }
+ }
+ erts_free(h->type, (void *) h->bucket);
+ h->bucket = new_bucket;
+}
+
+/*
+** Find an object in the hash table
+**
+*/
+void* hash_get(Hash* h, void* tmpl)
+{
+ HashValue hval = h->fun.hash(tmpl);
+ int ix = hval % h->size;
+ HashBucket* b = h->bucket[ix];
+
+ while(b != (HashBucket*) 0) {
+ if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0))
+ return (void*) b;
+ b = b->next;
+ }
+ return (void*) 0;
+}
+
+/*
+** Find or insert an object in the hash table
+*/
+void* hash_put(Hash* h, void* tmpl)
+{
+ HashValue hval = h->fun.hash(tmpl);
+ int ix = hval % h->size;
+ HashBucket* b = h->bucket[ix];
+
+ while(b != (HashBucket*) 0) {
+ if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0))
+ return (void*) b;
+ b = b->next;
+ }
+ b = (HashBucket*) h->fun.alloc(tmpl);
+
+ if (h->bucket[ix] == NULL)
+ h->used++;
+
+ b->hvalue = hval;
+ b->next = h->bucket[ix];
+ h->bucket[ix] = b;
+
+ if (h->used > h->size80percent) /* rehash at 80% */
+ rehash(h, 1);
+ return (void*) b;
+}
+
+static void
+hash_insert_entry(Hash* h, HashBucket* entry)
+{
+ HashValue hval = entry->hvalue;
+ int ix = hval % h->size;
+ HashBucket* b = h->bucket[ix];
+
+ while (b != (HashBucket*) 0) {
+ if ((b->hvalue == hval) && (h->fun.cmp((void*)entry, (void*)b) == 0)) {
+ abort(); /* Should not happen */
+ }
+ b = b->next;
+ }
+
+ if (h->bucket[ix] == NULL)
+ h->used++;
+
+ entry->next = h->bucket[ix];
+ h->bucket[ix] = entry;
+
+ if (h->used > h->size80percent) /* rehash at 80% */
+ rehash(h, 1);
+}
+
+
+/*
+ * Move all entries in src into dst; empty src.
+ * Entries in src must not exist in dst.
+ */
+void
+erts_hash_merge(Hash* src, Hash* dst)
+{
+ int limit = src->size;
+ HashBucket** bucket = src->bucket;
+ int i;
+
+ src->used = 0;
+ for (i = 0; i < limit; i++) {
+ HashBucket* b = bucket[i];
+ HashBucket* next;
+
+ bucket[i] = NULL;
+ while (b) {
+ next = b->next;
+ hash_insert_entry(dst, b);
+ b = next;
+ }
+ }
+}
+
+/*
+** Erase hash entry return template if erased
+** return 0 if not erased
+*/
+void* hash_erase(Hash* h, void* tmpl)
+{
+ HashValue hval = h->fun.hash(tmpl);
+ int ix = hval % h->size;
+ HashBucket* b = h->bucket[ix];
+ HashBucket* prev = 0;
+
+ while(b != 0) {
+ if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
+ if (prev != 0)
+ prev->next = b->next;
+ else
+ h->bucket[ix] = b->next;
+ h->fun.free((void*)b);
+ if (h->bucket[ix] == NULL)
+ h->used--;
+ if (h->used < h->size20percent) /* rehash at 20% */
+ rehash(h, 0);
+ return tmpl;
+ }
+ prev = b;
+ b = b->next;
+ }
+ return (void*)0;
+}
+
+/*
+** Remove hash entry from table return entry if removed
+** return NULL if not removed
+** NOTE: hash_remove() differs from hash_erase() in that
+** it returns entry (not the template) and does
+** *not* call the free() callback.
+*/
+void *
+hash_remove(Hash *h, void *tmpl)
+{
+ HashValue hval = h->fun.hash(tmpl);
+ int ix = hval % h->size;
+ HashBucket *b = h->bucket[ix];
+ HashBucket *prev = NULL;
+
+ while (b) {
+ if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
+ if (prev)
+ prev->next = b->next;
+ else
+ h->bucket[ix] = b->next;
+ if (h->bucket[ix] == NULL)
+ h->used--;
+ if (h->used < h->size20percent) /* rehash at 20% */
+ rehash(h, 0);
+ return (void *) b;
+ }
+ prev = b;
+ b = b->next;
+ }
+ return NULL;
+}
+
+void hash_foreach(Hash* h, void (*func)(void *, void *), void *func_arg2)
+{
+ int i;
+
+ for (i = 0; i < h->size; i++) {
+ HashBucket* b = h->bucket[i];
+ while(b != (HashBucket*) 0) {
+ (*func)((void *) b, func_arg2);
+ b = b->next;
+ }
+ }
+}
+
diff --git a/erts/emulator/beam/hash.h b/erts/emulator/beam/hash.h
new file mode 100644
index 0000000000..6dd66fc9b3
--- /dev/null
+++ b/erts/emulator/beam/hash.h
@@ -0,0 +1,97 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** General hash functions
+**
+*/
+#ifndef __HASH_H__
+#define __HASH_H__
+
+#ifndef __SYS_H__
+#include "sys.h"
+#endif
+
+#include "erl_alloc.h"
+
+typedef unsigned long HashValue;
+
+typedef int (*HCMP_FUN)(void*, void*);
+typedef HashValue (*H_FUN)(void*);
+typedef void* (*HALLOC_FUN)(void*);
+typedef void (*HFREE_FUN)(void*);
+
+/*
+** This bucket must be placed in top of
+** every object that uses hashing!!!
+** (Object*) == (Object*) &bucket
+*/
+typedef struct hash_bucket
+{
+ struct hash_bucket* next; /* Next bucket */
+ HashValue hvalue; /* Store hash value for get, rehash */
+} HashBucket;
+
+typedef struct hash_functions
+{
+ H_FUN hash;
+ HCMP_FUN cmp;
+ HALLOC_FUN alloc;
+ HFREE_FUN free;
+} HashFunctions;
+
+typedef struct {
+ char *name;
+ int size;
+ int used;
+ int objs;
+ int depth;
+} HashInfo;
+
+typedef struct hash
+{
+ HashFunctions fun; /* Function block */
+ int is_allocated; /* 0 iff hash structure is on stack or is static */
+ ErtsAlcType_t type;
+ char* name; /* Table name (static string, for debugging) */
+ int size; /* Number of slots */
+ int size20percent; /* 20 percent of number of slots */
+ int size80percent; /* 80 percent of number of slots */
+ int ix; /* Size index in size table */
+ int used; /* Number of slots used */
+ HashBucket** bucket; /* Vector of bucket pointers (objects) */
+} Hash;
+
+Hash* hash_new(ErtsAlcType_t, char*, int, HashFunctions);
+Hash* hash_init(ErtsAlcType_t, Hash*, char*, int, HashFunctions);
+
+void hash_delete(Hash*);
+void hash_get_info(HashInfo*, Hash*);
+void hash_info(int, void *, Hash*);
+int hash_table_sz(Hash *);
+
+void* hash_get(Hash*, void*);
+void* hash_put(Hash*, void*);
+void* hash_erase(Hash*, void*);
+void* hash_remove(Hash*, void*);
+void hash_foreach(Hash*, void (*func)(void *, void *), void *);
+
+void erts_hash_merge(Hash* src, Hash* dst);
+
+#endif
diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c
new file mode 100644
index 0000000000..a4a3007f93
--- /dev/null
+++ b/erts/emulator/beam/index.c
@@ -0,0 +1,137 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "index.h"
+
+void index_info(int to, void *arg, IndexTable *t)
+{
+ hash_info(to, arg, &t->htable);
+ erts_print(to, arg, "=index_table:%s\n", t->htable.name);
+ erts_print(to, arg, "size: %d\n", t->size);
+ erts_print(to, arg, "limit: %d\n", t->limit);
+ erts_print(to, arg, "entries: %d\n",t->entries);
+}
+
+
+/*
+ * Returns size of table in bytes. Stored objects not included.
+ */
+int
+index_table_sz(IndexTable *t)
+{
+ return (sizeof(IndexTable)
+ - sizeof(Hash)
+ + t->size*sizeof(IndexSlot*)
+ + hash_table_sz(&(t->htable)));
+}
+
+
+/*
+** init a pre allocated or static hash structure
+** and allocate buckets.
+*/
+IndexTable*
+erts_index_init(ErtsAlcType_t type, IndexTable* t, char* name,
+ int size, int limit, HashFunctions fun)
+{
+ Uint base_size = ((limit+INDEX_PAGE_SIZE-1)/INDEX_PAGE_SIZE)*sizeof(IndexSlot*);
+ hash_init(type, &t->htable, name, 3*size/4, fun);
+
+ t->size = 0;
+ t->limit = limit;
+ t->entries = 0;
+ t->type = type;
+ t->seg_table = (IndexSlot***) erts_alloc(type, base_size);
+ return t;
+}
+
+int
+index_put(IndexTable* t, void* tmpl)
+{
+ int ix;
+ IndexSlot* p = (IndexSlot*) hash_put(&t->htable, tmpl);
+
+ if (p->index >= 0) {
+ return p->index;
+ }
+
+ ix = t->entries;
+ if (ix >= t->size) {
+ Uint sz;
+ if (ix >= t->limit) {
+ erl_exit(1, "no more index entries in %s (max=%d)\n",
+ t->htable.name, t->limit);
+ }
+ sz = INDEX_PAGE_SIZE*sizeof(IndexSlot*);
+ t->seg_table[ix>>INDEX_PAGE_SHIFT] = erts_alloc(t->type, sz);
+ t->size += INDEX_PAGE_SIZE;
+ }
+ t->entries++;
+ p->index = ix;
+ t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK] = p;
+ return ix;
+}
+
+int index_get(IndexTable* t, void* tmpl)
+{
+ IndexSlot* p = (IndexSlot*) hash_get(&t->htable, tmpl);
+
+ if (p != NULL) {
+ return p->index;
+ }
+ return -1;
+}
+
+void erts_index_merge(Hash* src, IndexTable* dst)
+{
+ int limit = src->size;
+ HashBucket** bucket = src->bucket;
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ HashBucket* b = bucket[i];
+ IndexSlot* p;
+ int ix;
+
+ while (b) {
+ Uint sz;
+ ix = dst->entries++;
+ if (ix >= dst->size) {
+ if (ix >= dst->limit) {
+ erl_exit(1, "no more index entries in %s (max=%d)\n",
+ dst->htable.name, dst->limit);
+ }
+ sz = INDEX_PAGE_SIZE*sizeof(IndexSlot*);
+ dst->seg_table[ix>>INDEX_PAGE_SHIFT] = erts_alloc(dst->type, sz);
+ dst->size += INDEX_PAGE_SIZE;
+ }
+ p = (IndexSlot*) b;
+ p->index = ix;
+ dst->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK] = p;
+ b = b->next;
+ }
+ }
+}
diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h
new file mode 100644
index 0000000000..4eb9b1f992
--- /dev/null
+++ b/erts/emulator/beam/index.h
@@ -0,0 +1,71 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** General hash and index functions
+** The idea behind this file was to capture the
+** way Atom,Export and Module table was implemented
+*/
+#ifndef __INDEX_H__
+#define __INDEX_H__
+
+#ifndef __HASH_H__
+#include "hash.h"
+#endif
+
+typedef struct index_slot
+{
+ HashBucket bucket;
+ int index;
+} IndexSlot;
+
+
+typedef struct index_table
+{
+ Hash htable; /* Mapping obj -> index */
+ ErtsAlcType_t type;
+ int size; /* Allocated size */
+ int limit; /* Max size */
+ int entries; /* Number of entries */
+ IndexSlot*** seg_table; /* Mapping index -> obj */
+} IndexTable;
+
+#define INDEX_PAGE_SHIFT 10
+#define INDEX_PAGE_SIZE (1 << INDEX_PAGE_SHIFT)
+#define INDEX_PAGE_MASK ((1 << INDEX_PAGE_SHIFT)-1)
+
+IndexTable *erts_index_init(ErtsAlcType_t,IndexTable*,char*,int,int,HashFunctions);
+void index_info(int, void *, IndexTable*);
+int index_table_sz(IndexTable *);
+
+int index_get(IndexTable*, void*);
+int index_put(IndexTable*, void*);
+void erts_index_merge(Hash*, IndexTable*);
+
+ERTS_GLB_INLINE IndexSlot* erts_index_lookup(IndexTable*, Uint);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE IndexSlot*
+erts_index_lookup(IndexTable* t, Uint ix)
+{
+ return t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK];
+}
+#endif
+
+#endif
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
new file mode 100644
index 0000000000..61985271e6
--- /dev/null
+++ b/erts/emulator/beam/io.c
@@ -0,0 +1,4732 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * I/O routines for manipulating ports.
+ */
+
+#define ERL_IO_C__
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+
+/* must be included BEFORE global.h (since it includes erl_driver.h) */
+#include "erl_sys_driver.h"
+
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "dist.h"
+#include "big.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+#include "erl_version.h"
+#include "error.h"
+
+extern ErlDrvEntry fd_driver_entry;
+extern ErlDrvEntry vanilla_driver_entry;
+extern ErlDrvEntry spawn_driver_entry;
+extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */
+
+erts_driver_t *driver_list; /* List of all drivers, static and dynamic. */
+erts_smp_mtx_t erts_driver_list_lock; /* Mutex for driver list */
+static erts_smp_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling
+ driver init */
+static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a
+ per thread basis (for BC interfaces) */
+
+Port* erts_port; /* The port table */
+erts_smp_atomic_t erts_ports_alive;
+erts_smp_atomic_t erts_bytes_out; /* No bytes sent out of the system */
+erts_smp_atomic_t erts_bytes_in; /* No bytes gotten into the system */
+
+Uint erts_max_ports;
+Uint erts_port_tab_index_mask;
+
+const ErlDrvTermData driver_term_nil = (ErlDrvTermData)NIL;
+
+erts_driver_t vanilla_driver;
+erts_driver_t spawn_driver;
+erts_driver_t fd_driver;
+
+static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *);
+static void terminate_port(Port *p);
+static void pdl_init(void);
+
+static ERTS_INLINE ErlIOQueue*
+drvport2ioq(ErlDrvPort drvport)
+{
+ int ix = (int) drvport;
+ if (ix < 0 || erts_max_ports <= ix)
+ return NULL;
+ if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ return NULL;
+ ERTS_LC_ASSERT(!erts_port[ix].port_data_lock
+ || erts_lc_mtx_is_locked(&erts_port[ix].port_data_lock->mtx));
+ ERTS_SMP_LC_ASSERT(erts_port[ix].port_data_lock
+ || erts_lc_is_port_locked(&erts_port[ix]));
+ return &erts_port[ix].ioq;
+}
+
+static ERTS_INLINE int
+is_port_ioq_empty(Port *pp)
+{
+ int res;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ if (!pp->port_data_lock)
+ res = (pp->ioq.size == 0);
+ else {
+ ErlDrvPDL pdl = pp->port_data_lock;
+ erts_mtx_lock(&pdl->mtx);
+ res = (pp->ioq.size == 0);
+ erts_mtx_unlock(&pdl->mtx);
+ }
+ return res;
+}
+
+int
+erts_is_port_ioq_empty(Port *pp)
+{
+ return is_port_ioq_empty(pp);
+}
+
+Uint
+erts_port_ioq_size(Port *pp)
+{
+ int res;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ if (!pp->port_data_lock)
+ res = pp->ioq.size;
+ else {
+ ErlDrvPDL pdl = pp->port_data_lock;
+ erts_mtx_lock(&pdl->mtx);
+ res = pp->ioq.size;
+ erts_mtx_unlock(&pdl->mtx);
+ }
+ return (Uint) res;
+}
+
+/*
+ * Line buffered I/O.
+ */
+typedef struct line_buf_context {
+ LineBuf **b;
+ char *buf;
+ int left;
+ int retlen;
+} LineBufContext;
+
+#define LINEBUF_EMPTY 0
+#define LINEBUF_EOL 1
+#define LINEBUF_NOEOL 2
+#define LINEBUF_ERROR -1
+
+#define LINEBUF_STATE(LBC) ((*(LBC).b)->data[0])
+
+#define LINEBUF_DATA(LBC) (((*(LBC).b)->data) + 1)
+#define LINEBUF_DATALEN(LBC) ((LBC).retlen)
+
+#define LINEBUF_INITIAL 100
+
+
+/* The 'number' field in a port now has two parts: the lowest bits
+ contain the index in the port table, and the higher bits are a counter
+ which is incremented each time we look for a free port and start from
+ the beginning of the table. erts_max_ports is the number of file descriptors,
+ rounded up to a power of 2.
+ To get the index from a port, use the macro 'internal_port_index';
+ 'port_number' returns the whole number field.
+*/
+
+static erts_smp_spinlock_t get_free_port_lck;
+static Uint last_port_num;
+static Uint port_num_mask;
+erts_smp_atomic_t erts_ports_snapshot; /* Identifies the _next_ snapshot (not the ongoing) */
+
+
+static ERTS_INLINE void
+kill_port(Port *pp)
+{
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ erts_port_task_free_port(pp);
+ ASSERT(pp->status & ERTS_PORT_SFLGS_DEAD);
+}
+
+#ifdef ERTS_SMP
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+int
+erts_lc_is_port_locked(Port *prt)
+{
+ if (!prt)
+ return 0;
+ return erts_smp_lc_mtx_is_locked(prt->lock);
+}
+#endif
+
+#endif /* #ifdef ERTS_SMP */
+
+static int
+get_free_port(void)
+{
+ Uint num;
+ Uint tries = erts_max_ports;
+ Port* port;
+
+ erts_smp_spin_lock(&get_free_port_lck);
+ num = last_port_num + 1;
+ for (;; ++num) {
+ port = &erts_port[num & erts_port_tab_index_mask];
+
+ erts_smp_port_state_lock(port);
+ if (port->status & ERTS_PORT_SFLG_FREE) {
+ last_port_num = num;
+ erts_smp_spin_unlock(&get_free_port_lck);
+ break;
+ }
+ erts_smp_port_state_unlock(port);
+
+ if (--tries == 0) {
+ erts_smp_spin_unlock(&get_free_port_lck);
+ return -1;
+ }
+ }
+ port->status = ERTS_PORT_SFLG_INITIALIZING;
+#ifdef ERTS_SMP
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&port->refc) == 0);
+ erts_smp_atomic_set(&port->refc, 2); /* Port alive + lock */
+#endif
+ erts_smp_port_state_unlock(port);
+ return num & port_num_mask;
+}
+
+/*
+ * erts_test_next_port() is only used for testing.
+ */
+Sint
+erts_test_next_port(int set, Uint next)
+{
+ Uint i, num;
+ Sint res = -1;
+
+ erts_smp_spin_lock(&get_free_port_lck);
+ if (set) {
+ last_port_num = (next - 1) & port_num_mask;
+ }
+ num = last_port_num + 1;
+
+ for (i=0; i < erts_max_ports && res<0; ++i, ++num) {
+
+ Port* port = &erts_port[num & erts_port_tab_index_mask];
+
+ erts_smp_port_state_lock(port);
+
+ if (port->status & ERTS_PORT_SFLG_FREE) {
+ last_port_num = num - 1;
+ res = num & port_num_mask;
+ }
+ erts_smp_port_state_unlock(port);
+ }
+ erts_smp_spin_unlock(&get_free_port_lck);
+ return res;
+}
+
+void
+erts_port_cleanup(Port *prt)
+{
+#ifdef ERTS_SMP
+ Uint32 port_specific;
+ erts_smp_mtx_t *mtx;
+#endif
+ erts_driver_t *driver;
+
+ erts_smp_port_state_lock(prt);
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ driver = prt->drv_ptr;
+ prt->drv_ptr = NULL;
+ ASSERT(driver);
+
+#ifdef ERTS_SMP
+
+ ASSERT(prt->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&prt->refc) == 0);
+
+ port_specific = (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK);
+
+ mtx = prt->lock;
+ ASSERT(mtx);
+
+ prt->lock = NULL;
+
+ ASSERT(prt->status & ERTS_PORT_SFLG_PORT_DEBUG);
+ ASSERT(!(prt->status & ERTS_PORT_SFLG_FREE));
+ prt->status = ERTS_PORT_SFLG_FREE;
+
+ erts_smp_port_state_unlock(prt);
+ erts_smp_mtx_unlock(mtx);
+
+ if (port_specific) {
+ erts_smp_mtx_destroy(mtx);
+ erts_free(ERTS_ALC_T_PORT_LOCK, mtx);
+ }
+#endif
+
+ if (driver->handle)
+ erts_ddll_dereference_driver(driver->handle);
+}
+
+
+/*
+** Initialize v_start to point to the small fixed vector.
+** Once (reallocated) we never reset the pointer to the small vector
+** This is a possible optimisation.
+*/
+static void initq(Port* prt)
+{
+ ErlIOQueue* q = &prt->ioq;
+
+ ERTS_LC_ASSERT(!prt->port_data_lock);
+
+ q->size = 0;
+ q->v_head = q->v_tail = q->v_start = q->v_small;
+ q->v_end = q->v_small + SMALL_IO_QUEUE;
+ q->b_head = q->b_tail = q->b_start = q->b_small;
+ q->b_end = q->b_small + SMALL_IO_QUEUE;
+}
+
+static void stopq(Port* prt)
+{
+ ErlIOQueue* q;
+ ErlDrvBinary** binp;
+
+ if (prt->port_data_lock)
+ driver_pdl_lock(prt->port_data_lock);
+
+ q = &prt->ioq;
+ binp = q->b_head;
+
+ if (q->v_start != q->v_small)
+ erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
+
+ while(binp < q->b_tail) {
+ if (*binp != NULL)
+ driver_free_binary(*binp);
+ binp++;
+ }
+ if (q->b_start != q->b_small)
+ erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
+ q->v_start = q->v_end = q->v_head = q->v_tail = NULL;
+ q->b_start = q->b_end = q->b_head = q->b_tail = NULL;
+ q->size = 0;
+
+ if (prt->port_data_lock) {
+ driver_pdl_unlock(prt->port_data_lock);
+ driver_pdl_dec_refc(prt->port_data_lock);
+ prt->port_data_lock = NULL;
+ }
+}
+
+
+
+static void
+setup_port(Port* prt, Eterm pid, erts_driver_t *driver,
+ ErlDrvData drv_data, char *name, Uint32 xstatus)
+{
+ ErtsRunQueue *runq = erts_get_runq_current(NULL);
+ char *new_name, *old_name;
+#ifdef DEBUG
+ /* Make sure the debug flags survives until port is freed */
+ xstatus |= ERTS_PORT_SFLG_PORT_DEBUG;
+#endif
+ ASSERT(runq);
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+
+ new_name = (char*) erts_alloc(ERTS_ALC_T_PORT_NAME, sys_strlen(name)+1);
+ sys_strcpy(new_name, name);
+ erts_smp_runq_lock(runq);
+ erts_smp_atomic_inc(&erts_ports_alive);
+ erts_smp_port_state_lock(prt);
+ prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus;
+ prt->snapshot = (Uint32) erts_smp_atomic_read(&erts_ports_snapshot);
+ old_name = prt->name;
+ prt->name = new_name;
+#ifdef ERTS_SMP
+ erts_smp_atomic_set(&prt->run_queue, (long) runq);
+#endif
+ ASSERT(!prt->drv_ptr);
+ prt->drv_ptr = driver;
+ erts_smp_port_state_unlock(prt);
+ erts_smp_runq_unlock(runq);
+#ifdef ERTS_SMP
+ ASSERT(!prt->xports);
+#endif
+ if (old_name) {
+ erts_free(ERTS_ALC_T_PORT_NAME, (void *) old_name);
+ }
+
+ prt->control_flags = 0;
+ prt->connected = pid;
+ prt->drv_data = (long) drv_data;
+ prt->bytes_in = 0;
+ prt->bytes_out = 0;
+ prt->dist_entry = NULL;
+ prt->reg = NULL;
+#ifdef ERTS_SMP
+ prt->ptimer = NULL;
+#else
+ sys_memset(&prt->tm, 0, sizeof(ErlTimer));
+#endif
+ erts_port_task_handle_init(&prt->timeout_task);
+ prt->suspended = NULL;
+ sys_strcpy(prt->name, name);
+ prt->nlinks = NULL;
+ prt->monitors = NULL;
+ prt->linebuf = NULL;
+ prt->bp = NULL;
+ prt->data = am_undefined;
+ /* Set default tracing */
+ erts_get_default_tracing(&(prt->trace_flags), &(prt->tracer_proc));
+
+ prt->psd = NULL;
+
+ initq(prt);
+}
+
+void
+erts_wake_process_later(Port *prt, Process *process)
+{
+ ErtsProcList** p;
+ ErtsProcList* new_p;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (prt->status & ERTS_PORT_SFLGS_DEAD)
+ return;
+
+ for (p = &(prt->suspended); *p != NULL; p = &((*p)->next))
+ /* Empty loop body */;
+
+ new_p = erts_proclist_create(process);
+ new_p->next = NULL;
+ *p = new_p;
+}
+
+/*
+ Opens a driver.
+ Returns the non-negative port number, if successful.
+ If there is an error, -1 or -2 or -3 is returned. -2 means that
+ there is valid error information in *error_number_ptr.
+ Returning -3 means that an error in the given options was detected
+ (*error_number_ptr must contain either BADARG or SYSTEM_LIMIT).
+ The driver start function must obey the same conventions.
+*/
+int
+erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
+ Eterm pid, /* Current process. */
+ char* name, /* Driver name. */
+ SysDriverOpts* opts, /* Options. */
+ int *error_number_ptr) /* errno in case -2 is returned */
+{
+ int port_num;
+ int port_ix;
+ ErlDrvData drv_data = 0;
+ Uint32 xstatus = 0;
+ Port *port;
+ int fpe_was_unmasked;
+
+ if (error_number_ptr)
+ *error_number_ptr = 0;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ if ((port_num = get_free_port()) < 0) {
+ if (error_number_ptr) {
+ *error_number_ptr = SYSTEM_LIMIT;
+ }
+ return -3;
+ }
+
+ port_ix = port_num & erts_port_tab_index_mask;
+ port = &erts_port[port_ix];
+ port->id = make_internal_port(port_num);
+
+ erts_smp_mtx_lock(&erts_driver_list_lock);
+ if (!driver) {
+ for (driver = driver_list; driver; driver = driver->next) {
+ if (sys_strcmp(driver->name, name) == 0)
+ break;
+ }
+ if (!driver) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ if (error_number_ptr)
+ *error_number_ptr = BADARG;
+ return -3;
+ }
+ }
+ if (driver == &spawn_driver) {
+ char *p;
+ erts_driver_t *d;
+
+ /*
+ * Dig out the name of the driver or port program.
+ */
+
+ if (!(opts->spawn_type & ERTS_SPAWN_EXECUTABLE)) {
+ /* No spawn driver default */
+ driver = NULL;
+ }
+
+
+ if (opts->spawn_type != ERTS_SPAWN_EXECUTABLE) {
+ p = name;
+ while(*p != '\0' && *p != ' ')
+ p++;
+ if (*p == '\0')
+ p = NULL;
+ else
+ *p = '\0';
+
+ /*
+ * Search for a driver having this name. Defaults to spawn_driver
+ * if not found.
+ */
+
+ for (d = driver_list; d; d = d->next) {
+ if (strcmp(d->name, name) == 0 &&
+ erts_ddll_driver_ok(d->handle)) {
+ driver = d;
+ break;
+ }
+ }
+ if (p != NULL)
+ *p = ' ';
+ }
+ }
+
+ if (driver == NULL || (driver != &spawn_driver && opts->exit_status)) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ if (error_number_ptr) {
+ *error_number_ptr = BADARG;
+ }
+ /* Need to mark the port as free again */
+ erts_smp_port_state_lock(port);
+ port->status = ERTS_PORT_SFLG_FREE;
+#ifdef ERTS_SMP
+ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&port->refc) == 2);
+ erts_smp_atomic_set(&port->refc, 0);
+#endif
+ erts_smp_port_state_unlock(port);
+ return -3;
+ }
+
+ /*
+ * We'll set up the port before calling the start function,
+ * to allow message sending and setting timers in the start function.
+ */
+
+#ifdef ERTS_SMP
+ ASSERT(!port->lock);
+ port->lock = driver->lock;
+ if (!port->lock) {
+ port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK,
+ sizeof(erts_smp_mtx_t));
+ erts_smp_mtx_init_x(port->lock,
+ "port_lock",
+ port->id);
+ xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
+ }
+#endif
+
+ if (driver->handle != NULL) {
+ erts_ddll_increment_port_count(driver->handle);
+ erts_ddll_reference_driver(driver->handle);
+ }
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+
+#ifdef ERTS_SMP
+ erts_smp_mtx_lock(port->lock);
+#endif
+
+ setup_port(port, pid, driver, drv_data, name, xstatus);
+
+ if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
+ trace_port_open(port,
+ pid,
+ am_atom_put(port->name, strlen(port->name)));
+ }
+
+ if (driver->start) {
+ if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(port, am_in, am_start);
+ }
+ port->caller = pid;
+ fpe_was_unmasked = erts_block_fpe();
+ drv_data = (*driver->start)((ErlDrvPort)(port_ix),
+ name, opts);
+ erts_unblock_fpe(fpe_was_unmasked);
+ port->caller = NIL;
+ erts_unblock_fpe(fpe_was_unmasked);
+ if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(port, am_out, am_start);
+ }
+ if (error_number_ptr && ((long) drv_data) == (long) -2)
+ *error_number_ptr = errno;
+#ifdef ERTS_SMP
+ if (port->xports)
+ erts_smp_xports_unlock(port);
+ ASSERT(!port->xports);
+#endif
+ }
+
+ if (((long)drv_data) == -1 ||
+ ((long)drv_data) == -2 ||
+ ((long)drv_data) == -3) {
+ int res = (int) ((long) drv_data);
+
+ if (res == -3 && error_number_ptr) {
+ *error_number_ptr = BADARG;
+ }
+
+ /*
+ * Must clean up the port.
+ */
+#ifdef ERTS_SMP
+ erts_cancel_smp_ptimer(port->ptimer);
+#else
+ erl_cancel_timer(&(port->tm));
+#endif
+ stopq(port);
+ kill_port(port);
+ if (port->linebuf != NULL) {
+ erts_free(ERTS_ALC_T_LINEBUF,
+ (void *) port->linebuf);
+ port->linebuf = NULL;
+ }
+ if (driver->handle != NULL) {
+ erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_ddll_decrement_port_count(driver->handle);
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ }
+ erts_port_release(port);
+ return res;
+ }
+ port->drv_data = (long) drv_data;
+ return port_ix;
+}
+
+#ifdef ERTS_SMP
+
+struct ErtsXPortsList_ {
+ ErtsXPortsList *next;
+ Port *port;
+};
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(xports_list, ErtsXPortsList, 50, ERTS_ALC_T_XPORTS_LIST)
+
+#endif
+
+/*
+ * Driver function to create new instances of a driver
+ * Historical reason: to be used with inet_drv for creating
+ * accept sockets inorder to avoid a global table.
+ */
+ErlDrvPort
+driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
+ ErlDrvTermData pid, /* Owner/Caller */
+ char* name, /* Driver name */
+ ErlDrvData drv_data) /* Driver data */
+{
+ Port *creator_port;
+ Port* port;
+ erts_driver_t *driver;
+ Process *rp;
+ int port_num;
+ Eterm port_id;
+ Uint32 xstatus = 0;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ creator_port = erts_drvport2port(creator_port_ix);
+ if (!creator_port)
+ return (ErlDrvTermData) -1;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(creator_port));
+
+ driver = creator_port->drv_ptr;
+ erts_smp_mtx_lock(&erts_driver_list_lock);
+ if (!erts_ddll_driver_ok(driver->handle)) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ return (ErlDrvTermData) -1;
+ }
+
+ rp = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_LINK);
+ if (!rp) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ return (ErlDrvTermData) -1; /* pid does not exist */
+ }
+ if ((port_num = get_free_port()) < 0) {
+ errno = ENFILE;
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ return (ErlDrvTermData) -1;
+ }
+
+ port_id = make_internal_port(port_num);
+ port = &erts_port[port_num & erts_port_tab_index_mask];
+
+#ifdef ERTS_SMP
+ ASSERT(!port->lock);
+ port->lock = driver->lock;
+ if (!port->lock) {
+ ErtsXPortsList *xplp = xports_list_alloc();
+ xplp->port = port;
+ xplp->next = creator_port->xports;
+ creator_port->xports = xplp;
+ port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK,
+ sizeof(erts_smp_mtx_t));
+ erts_smp_mtx_init_locked_x(port->lock, "port_lock", port_id);
+ xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
+ }
+
+#endif
+
+ if (driver->handle != NULL) {
+ erts_ddll_increment_port_count(driver->handle);
+ erts_ddll_reference_referenced_driver(driver->handle);
+ }
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+
+ setup_port(port, pid, driver, drv_data, name, xstatus);
+ port->id = port_id;
+
+ erts_add_link(&(port->nlinks), LINK_PID, pid);
+ erts_add_link(&(rp->nlinks), LINK_PID, port_id);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ return port_num & erts_port_tab_index_mask;
+}
+
+#ifdef ERTS_SMP
+void
+erts_smp_xports_unlock(Port *prt)
+{
+ ErtsXPortsList *xplp;
+
+ ASSERT(prt);
+ xplp = prt->xports;
+ ASSERT(xplp);
+ while (xplp) {
+ ErtsXPortsList *free_xplp;
+ if (xplp->port->xports)
+ erts_smp_xports_unlock(xplp->port);
+ erts_port_release(xplp->port);
+ free_xplp = xplp;
+ xplp = xplp->next;
+ xports_list_free(free_xplp);
+ }
+ prt->xports = NULL;
+}
+#endif
+
+/* Fills a possibly deep list of chars and binaries into vec
+** Small characters are first stored in the buffer buf of length ln
+** binaries found are copied and linked into msoh
+** Return vector length on succsess,
+** -1 on overflow
+** -2 on type error
+*/
+
+#define SET_VEC(iov, bv, bin, ptr, len, vlen) do { \
+ (iov)->iov_base = (ptr); \
+ (iov)->iov_len = (len); \
+ *(bv)++ = (bin); \
+ (iov)++; \
+ (vlen)++; \
+} while(0)
+
+static int
+io_list_to_vec(Eterm obj, /* io-list */
+ SysIOVec* iov, /* io vector */
+ ErlDrvBinary** binv, /* binary reference vector */
+ ErlDrvBinary* cbin, /* binary to store characters */
+ int bin_limit) /* small binaries limit */
+{
+ DECLARE_ESTACK(s);
+ Eterm* objp;
+ char *buf = cbin->orig_bytes;
+ int len = cbin->orig_size;
+ int csize = 0;
+ int vlen = 0;
+ char* cptr = buf;
+
+ goto L_jump_start; /* avoid push */
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_jump_start:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ if (len == 0)
+ goto L_overflow;
+ *buf++ = unsigned_val(obj);
+ csize++;
+ len--;
+ } else if (is_binary(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto handle_binary;
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj)) {
+ goto handle_binary;
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ } else if (is_binary(obj)) {
+ Eterm real_bin;
+ Uint offset;
+ Eterm* bptr;
+ int size;
+ int bitoffs;
+ int bitsize;
+
+ handle_binary:
+ size = binary_size(obj);
+ ERTS_GET_REAL_BIN(obj, real_bin, offset, bitoffs, bitsize);
+ ASSERT(bitsize == 0);
+ bptr = binary_val(real_bin);
+ if (*bptr == HEADER_PROC_BIN) {
+ ProcBin* pb = (ProcBin *) bptr;
+ if (bitoffs != 0) {
+ if (len < size) {
+ goto L_overflow;
+ }
+ erts_copy_bits(pb->bytes+offset, bitoffs, 1,
+ (byte *) buf, 0, 1, size*8);
+ csize += size;
+ buf += size;
+ len -= size;
+ } else if (bin_limit && size < bin_limit) {
+ if (len < size) {
+ goto L_overflow;
+ }
+ sys_memcpy(buf, pb->bytes+offset, size);
+ csize += size;
+ buf += size;
+ len -= size;
+ } else {
+ if (csize != 0) {
+ SET_VEC(iov, binv, cbin, cptr, csize, vlen);
+ cptr = buf;
+ csize = 0;
+ }
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ }
+ SET_VEC(iov, binv, Binary2ErlDrvBinary(pb->val),
+ pb->bytes+offset, size, vlen);
+ }
+ } else {
+ ErlHeapBin* hb = (ErlHeapBin *) bptr;
+ if (len < size) {
+ goto L_overflow;
+ }
+ copy_binary_to_buffer(buf, 0,
+ ((byte *) hb->data)+offset, bitoffs,
+ 8*size);
+ csize += size;
+ buf += size;
+ len -= size;
+ }
+ } else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ if (csize != 0) {
+ SET_VEC(iov, binv, cbin, cptr, csize, vlen);
+ }
+
+ DESTROY_ESTACK(s);
+ return vlen;
+
+ L_type_error:
+ DESTROY_ESTACK(s);
+ return -2;
+
+ L_overflow:
+ DESTROY_ESTACK(s);
+ return -1;
+}
+
+#define IO_LIST_VEC_COUNT(obj) \
+do { \
+ int _size = binary_size(obj); \
+ Eterm _real; \
+ Uint _offset; \
+ int _bitoffs; \
+ int _bitsize; \
+ ERTS_GET_REAL_BIN(obj, _real, _offset, _bitoffs, _bitsize); \
+ ASSERT(_bitsize == 0); \
+ if (thing_subtag(*binary_val(_real)) == REFC_BINARY_SUBTAG && \
+ _bitoffs == 0) { \
+ b_size += _size; \
+ in_clist = 0; \
+ v_size++; \
+ if (_size >= bin_limit) { \
+ p_in_clist = 0; \
+ p_v_size++; \
+ } else { \
+ p_c_size += _size; \
+ if (!p_in_clist) { \
+ p_in_clist = 1; \
+ p_v_size++; \
+ } \
+ } \
+ } else { \
+ c_size += _size; \
+ if (!in_clist) { \
+ in_clist = 1; \
+ v_size++; \
+ } \
+ p_c_size += _size; \
+ if (!p_in_clist) { \
+ p_in_clist = 1; \
+ p_v_size++; \
+ } \
+ } \
+} while (0)
+
+
+/*
+** Size of a io list in bytes
+** return -1 if error
+** returns: - Total size of io list
+** vsize - SysIOVec size needed for a writev
+** csize - Number of bytes not in binary (in the common binary)
+** pvsize - SysIOVec size needed if packing small binaries
+** pcsize - Number of bytes in the common binary if packing
+*/
+
+static int
+io_list_vec_len(Eterm obj, int* vsize, int* csize,
+ int bin_limit, /* small binaries limit */
+ int * pvsize, int * pcsize)
+{
+ DECLARE_ESTACK(s);
+ Eterm* objp;
+ int v_size = 0;
+ int c_size = 0;
+ int b_size = 0;
+ int in_clist = 0;
+ int p_v_size = 0;
+ int p_c_size = 0;
+ int p_in_clist = 0;
+
+ goto L_jump_start; /* avoid a push */
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_jump_start:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+
+ if (is_byte(obj)) {
+ c_size++;
+ if (!in_clist) {
+ in_clist = 1;
+ v_size++;
+ }
+ p_c_size++;
+ if (!p_in_clist) {
+ p_in_clist = 1;
+ p_v_size++;
+ }
+ }
+ else if (is_binary(obj)) {
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj)) { /* binary tail is OK */
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+ else if (is_binary(obj)) {
+ IO_LIST_VEC_COUNT(obj);
+ }
+ else if (!is_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ DESTROY_ESTACK(s);
+ if (vsize != NULL)
+ *vsize = v_size;
+ if (csize != NULL)
+ *csize = c_size;
+ if (pvsize != NULL)
+ *pvsize = p_v_size;
+ if (pcsize != NULL)
+ *pcsize = p_c_size;
+ return c_size + b_size;
+
+ L_type_error:
+ DESTROY_ESTACK(s);
+ return -1;
+}
+
+#define ERL_SMALL_IO_BIN_LIMIT (4*ERL_ONHEAP_BIN_LIMIT)
+#define SMALL_WRITE_VEC 16
+
+
+/* write data to a port */
+int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
+{
+ char *buf;
+ erts_driver_t *drv = p->drv_ptr;
+ int size;
+ int fpe_was_unmasked;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ p->caller = caller_id;
+ if (drv->outputv != NULL) {
+ int vsize;
+ int csize;
+ int pvsize;
+ int pcsize;
+ int blimit;
+ SysIOVec iv[SMALL_WRITE_VEC];
+ ErlDrvBinary* bv[SMALL_WRITE_VEC];
+ SysIOVec* ivp;
+ ErlDrvBinary** bvp;
+ ErlDrvBinary* cbin;
+ ErlIOVec ev;
+
+ if ((size = io_list_vec_len(list, &vsize, &csize,
+ ERL_SMALL_IO_BIN_LIMIT,
+ &pvsize, &pcsize)) < 0) {
+ goto bad_value;
+ }
+ /* To pack or not to pack (small binaries) ...? */
+ vsize++;
+ if (vsize <= SMALL_WRITE_VEC) {
+ /* Do NOT pack */
+ blimit = 0;
+ } else {
+ /* Do pack */
+ vsize = pvsize + 1;
+ csize = pcsize;
+ blimit = ERL_SMALL_IO_BIN_LIMIT;
+ }
+ /* Use vsize and csize from now on */
+ if (vsize <= SMALL_WRITE_VEC) {
+ ivp = iv;
+ bvp = bv;
+ } else {
+ ivp = (SysIOVec *) erts_alloc(ERTS_ALC_T_TMP,
+ vsize * sizeof(SysIOVec));
+ bvp = (ErlDrvBinary**) erts_alloc(ERTS_ALC_T_TMP,
+ vsize * sizeof(ErlDrvBinary*));
+ }
+ cbin = driver_alloc_binary(csize);
+ if (!cbin)
+ erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, sizeof(Binary) + csize);
+
+ /* Element 0 is for driver usage to add header block */
+ ivp[0].iov_base = NULL;
+ ivp[0].iov_len = 0;
+ bvp[0] = NULL;
+ ev.vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
+ ev.vsize++;
+#if 0
+ /* This assertion may say something useful, but it can
+ be falsified during the emulator test suites. */
+ ASSERT((ev.vsize >= 0) && (ev.vsize == vsize));
+#endif
+ ev.size = size; /* total size */
+ ev.iov = ivp;
+ ev.binv = bvp;
+ fpe_was_unmasked = erts_block_fpe();
+ (*drv->outputv)((ErlDrvData)p->drv_data, &ev);
+ erts_unblock_fpe(fpe_was_unmasked);
+ if (ivp != iv) {
+ erts_free(ERTS_ALC_T_TMP, (void *) ivp);
+ }
+ if (bvp != bv) {
+ erts_free(ERTS_ALC_T_TMP, (void *) bvp);
+ }
+ driver_free_binary(cbin);
+ } else {
+ int r;
+
+ /* Try with an 8KB buffer first (will often be enough I guess). */
+ size = 8*1024;
+ /* See below why the extra byte is added. */
+ buf = erts_alloc(ERTS_ALC_T_TMP, size+1);
+ r = io_list_to_buf(list, buf, size);
+
+ if (r >= 0) {
+ size -= r;
+ fpe_was_unmasked = erts_block_fpe();
+ (*drv->output)((ErlDrvData)p->drv_data, buf, size);
+ erts_unblock_fpe(fpe_was_unmasked);
+ erts_free(ERTS_ALC_T_TMP, buf);
+ }
+ else if (r == -2) {
+ erts_free(ERTS_ALC_T_TMP, buf);
+ goto bad_value;
+ }
+ else {
+ ASSERT(r == -1); /* Overflow */
+ erts_free(ERTS_ALC_T_TMP, buf);
+ if ((size = io_list_len(list)) < 0) {
+ goto bad_value;
+ }
+
+ /*
+ * I know drivers that pad space with '\0' this is clearly
+ * incorrect but I don't feel like fixing them now, insted
+ * add ONE extra byte.
+ */
+ buf = erts_alloc(ERTS_ALC_T_TMP, size+1);
+ r = io_list_to_buf(list, buf, size);
+ fpe_was_unmasked = erts_block_fpe();
+ (*drv->output)((ErlDrvData)p->drv_data, buf, size);
+ erts_unblock_fpe(fpe_was_unmasked);
+ erts_free(ERTS_ALC_T_TMP, buf);
+ }
+ }
+ p->bytes_out += size;
+ erts_smp_atomic_add(&erts_bytes_out, size);
+
+#ifdef ERTS_SMP
+ if (p->xports)
+ erts_smp_xports_unlock(p);
+ ASSERT(!p->xports);
+#endif
+ p->caller = NIL;
+ return 0;
+
+ bad_value:
+ p->caller = NIL;
+ {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Bad value on output port '%s'\n", p->name);
+ erts_send_error_to_logger_nogl(dsbufp);
+ return 1;
+ }
+}
+
+/* initialize the port array */
+void init_io(void)
+{
+ int i;
+ ErlDrvEntry** dp;
+ ErlDrvEntry* drv;
+ char maxports[21]; /* enough for any 64-bit integer */
+ size_t maxportssize = sizeof(maxports);
+ Uint ports_bits = ERTS_PORTS_BITS;
+ Sint port_extra_shift;
+
+#ifdef ERTS_SMP
+ init_xports_list_alloc();
+#endif
+
+ pdl_init();
+
+ if (erts_sys_getenv("ERL_MAX_PORTS", maxports, &maxportssize) == 0)
+ erts_max_ports = atoi(maxports);
+ else
+ erts_max_ports = sys_max_files();
+
+ if (erts_max_ports > ERTS_MAX_PORTS)
+ erts_max_ports = ERTS_MAX_PORTS;
+ if (erts_max_ports < 1024)
+ erts_max_ports = 1024;
+
+ if (erts_use_r9_pids_ports) {
+ ports_bits = ERTS_R9_PORTS_BITS;
+ if (erts_max_ports > ERTS_MAX_R9_PORTS)
+ erts_max_ports = ERTS_MAX_R9_PORTS;
+ }
+
+ port_extra_shift = erts_fit_in_bits(erts_max_ports - 1);
+ port_num_mask = (1 << ports_bits) - 1;
+
+ erts_port_tab_index_mask = ~(~((Uint) 0) << port_extra_shift);
+ erts_max_ports = 1 << port_extra_shift;
+
+ erts_smp_mtx_init(&erts_driver_list_lock,"driver_list");
+ driver_list = NULL;
+ erts_smp_tsd_key_create(&driver_list_lock_status_key);
+ erts_smp_tsd_key_create(&driver_list_last_error_key);
+
+ if (erts_max_ports * sizeof(Port) <= erts_max_ports) {
+ /* More memory needed than the whole address space. */
+ erts_alloc_enomem(ERTS_ALC_T_PORT_TABLE, ~((Uint) 0));
+ }
+
+ erts_port = (Port *) erts_alloc(ERTS_ALC_T_PORT_TABLE,
+ erts_max_ports * sizeof(Port));
+
+ erts_smp_atomic_init(&erts_bytes_out, 0);
+ erts_smp_atomic_init(&erts_bytes_in, 0);
+ erts_smp_atomic_init(&erts_ports_alive, 0);
+
+ for (i = 0; i < erts_max_ports; i++) {
+ erts_port_task_init_sched(&erts_port[i].sched);
+#ifdef ERTS_SMP
+ erts_smp_atomic_init(&erts_port[i].refc, 0);
+ erts_port[i].lock = NULL;
+ erts_port[i].xports = NULL;
+ erts_smp_spinlock_init(&erts_port[i].state_lck, "port_state");
+#endif
+ erts_port[i].tracer_proc = NIL;
+ erts_port[i].trace_flags = 0;
+
+ erts_port[i].drv_ptr = NULL;
+ erts_port[i].status = ERTS_PORT_SFLG_FREE;
+ erts_port[i].name = NULL;
+ erts_port[i].nlinks = NULL;
+ erts_port[i].monitors = NULL;
+ erts_port[i].linebuf = NULL;
+ erts_port[i].port_data_lock = NULL;
+ }
+
+ erts_smp_atomic_init(&erts_ports_snapshot, (long) 0);
+ last_port_num = 0;
+ erts_smp_spinlock_init(&get_free_port_lck, "get_free_port");
+
+ sys_init_io();
+
+ erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1);
+ erts_smp_mtx_lock(&erts_driver_list_lock);
+
+ init_driver(&fd_driver, &fd_driver_entry, NULL);
+ init_driver(&vanilla_driver, &vanilla_driver_entry, NULL);
+ init_driver(&spawn_driver, &spawn_driver_entry, NULL);
+ for (dp = driver_tab; *dp != NULL; dp++) {
+ drv = *dp;
+ erts_add_driver_entry(*dp, NULL, 1);
+ }
+
+ erts_smp_tsd_set(driver_list_lock_status_key, NULL);
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+}
+
+/*
+ * Buffering of data when using line oriented I/O on ports
+ */
+
+/*
+ * Buffer states
+ */
+#define LINEBUF_MAIN 0
+#define LINEBUF_FULL 1
+#define LINEBUF_CR_INSIDE 2
+#define LINEBUF_CR_AFTER 3
+
+/*
+ * Creates a LineBuf to be added to the port structure,
+ * Returns: Pointer to a newly allocated and initialized LineBuf.
+ * Parameters:
+ * bufsiz - The (maximum) size of the line buffer.
+ */
+LineBuf *allocate_linebuf(bufsiz)
+int bufsiz;
+{
+ int ovsiz = (bufsiz < LINEBUF_INITIAL) ? bufsiz : LINEBUF_INITIAL;
+ LineBuf *lb = (LineBuf *) erts_alloc(ERTS_ALC_T_LINEBUF,
+ sizeof(LineBuf)+ovsiz);
+ lb->ovsiz = ovsiz;
+ lb->bufsiz = bufsiz;
+ lb->ovlen = 0;
+ lb->data[0] = LINEBUF_MAIN; /* state */
+ return lb;
+}
+
+/*
+ * Initializes a LineBufContext to be used in calls to read_linebuf
+ * or flush_linebuf.
+ * Returns: 0 if ok, <0 on error.
+ * Parameters:
+ * lc - Pointer to an allocated LineBufContext.
+ * lb - Pointer to a LineBuf structure (probably from the Port structure).
+ * buf - A buffer containing the data to be read and split to lines.
+ * len - The number of bytes in buf.
+ */
+static int init_linebuf_context(LineBufContext *lc, LineBuf **lb, char *buf, int len)
+{
+ if(lc == NULL || lb == NULL)
+ return -1;
+ lc->b = lb;
+ lc->buf = buf;
+ lc->left = len;
+ return 0;
+}
+
+static void resize_linebuf(LineBuf **b)
+{
+ int newsiz = (((*b)->ovsiz * 2) > (*b)->bufsiz) ? (*b)->bufsiz :
+ (*b)->ovsiz * 2;
+ *b = (LineBuf *) erts_realloc(ERTS_ALC_T_LINEBUF,
+ (void *) *b,
+ sizeof(LineBuf)+newsiz);
+ (*b)->ovsiz = newsiz;
+}
+
+/*
+ * Delivers all data in the buffer regardless of newlines (always
+ * an LINEBUF_NOEOL. Has to be called until it return LINEBUF_EMPTY.
+ * Return values and barameters as read_linebuf (see below).
+ */
+static int flush_linebuf(LineBufContext *bp)
+{
+ bp->retlen = (*bp->b)->ovlen;
+ switch(LINEBUF_STATE(*bp)){
+ case LINEBUF_CR_INSIDE:
+ if((*bp->b)->ovlen >= (*bp->b)->ovsiz)
+ resize_linebuf(bp->b);
+ LINEBUF_DATA(*bp)[((*bp->b)->ovlen)++] = '\r';
+ ++bp->retlen; /* fall through instead of switching state... */
+ case LINEBUF_MAIN:
+ case LINEBUF_FULL:
+ (*bp->b)->ovlen = 0;
+ LINEBUF_STATE(*bp) = LINEBUF_MAIN;
+ if(!bp->retlen)
+ return LINEBUF_EMPTY;
+ return LINEBUF_NOEOL;
+ case LINEBUF_CR_AFTER:
+ LINEBUF_STATE(*bp) = LINEBUF_CR_INSIDE;
+ (*bp->b)->ovlen = 0;
+ if(!bp->retlen)
+ return LINEBUF_EMPTY;
+ return LINEBUF_NOEOL;
+ default:
+ return LINEBUF_ERROR;
+ }
+}
+
+/*
+ * Reads input from a buffer and "chops" it up in lines.
+ * Has to be called repeatedly until it returns LINEBUF_EMPTY
+ * to get all lines in buffer.
+ * Handles both <LF> and <CR><LF> style newlines.
+ * On Unix, this is slightly incorrect, as <CR><LF> is NOT to be regarded
+ * as a newline together, but i treat newlines equally in all systems
+ * to avoid putting this in sys.c or clutter it with #ifdef's.
+ * Returns: LINEBUF_EMPTY if there is no more data that can be
+ * determined as a line (only part of a line left), LINEBUF_EOL if a whole
+ * line could be delivered and LINEBUF_NOEOL if the buffer size has been
+ * exceeded. The data and the data length can be accesed through the
+ * LINEBUF_DATA and the LINEBUF_DATALEN macros applied to the LineBufContext.
+ * Parameters:
+ * bp - A LineBufContext that is initialized with
+ * the init_linebuf_context call. The context has to be retained during
+ * all calls that returns other than LINEBUF_EMPTY. When LINEBUF_EMPTY
+ * is returned the context can be discarded and a new can be created when new
+ * data arrives (the state is saved in the Port structure).
+ */
+static int read_linebuf(LineBufContext *bp)
+{
+ for(;;){
+ if(bp->left == 0)
+ return LINEBUF_EMPTY;
+ if(*bp->buf == '\n'){
+ LINEBUF_STATE(*bp) = LINEBUF_MAIN;
+ ++(bp->buf);
+ --(bp->left);
+ bp->retlen = (*bp->b)->ovlen;
+ (*bp->b)->ovlen = 0;
+ return LINEBUF_EOL;
+ }
+ switch(LINEBUF_STATE(*bp)){
+ case LINEBUF_MAIN:
+ if((*bp->b)->ovlen == (*bp->b)->bufsiz)
+ LINEBUF_STATE(*bp) = LINEBUF_FULL;
+ else if(*bp->buf == '\r'){
+ ++(bp->buf);
+ --(bp->left);
+ LINEBUF_STATE(*bp) = LINEBUF_CR_INSIDE;
+ } else {
+ if((*bp->b)->ovlen >= (*bp->b)->ovsiz)
+ resize_linebuf(bp->b);
+ LINEBUF_DATA(*bp)[((*bp->b)->ovlen)++] = *((bp->buf)++);
+ --(bp->left);
+ }
+ continue;
+ case LINEBUF_FULL:
+ if(*bp->buf == '\r'){
+ ++(bp->buf);
+ --(bp->left);
+ LINEBUF_STATE(*bp) = LINEBUF_CR_AFTER;
+ } else {
+ bp->retlen = (*bp->b)->ovlen;
+ (*bp->b)->ovlen = 0;
+ LINEBUF_STATE(*bp) = LINEBUF_MAIN;
+ return LINEBUF_NOEOL;
+ }
+ continue;
+ case LINEBUF_CR_INSIDE:
+ if((*bp->b)->ovlen >= (*bp->b)->ovsiz)
+ resize_linebuf(bp->b);
+ LINEBUF_DATA(*bp)[((*bp->b)->ovlen)++] = '\r';
+ LINEBUF_STATE(*bp) = LINEBUF_MAIN;
+ continue;
+ case LINEBUF_CR_AFTER:
+ bp->retlen = (*bp->b)->ovlen;
+ (*bp->b)->ovlen = 0;
+ LINEBUF_STATE(*bp) = LINEBUF_CR_INSIDE;
+ return LINEBUF_NOEOL;
+ default:
+ return LINEBUF_ERROR;
+ }
+ }
+}
+
+static void
+deliver_result(Eterm sender, Eterm pid, Eterm res)
+{
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ ASSERT(is_internal_port(sender)
+ && is_internal_pid(pid)
+ && internal_pid_index(pid) < erts_max_processes);
+
+ rp = erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+
+ if (rp) {
+ Eterm tuple;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+ Eterm* hp;
+ Uint sz_res;
+ sz_res = size_object(res);
+ hp = erts_alloc_message_heap(sz_res + 3, &bp, &ohp, rp, &rp_locks);
+ res = copy_struct(res, sz_res, &hp, ohp);
+ tuple = TUPLE2(hp, sender, res);
+ erts_queue_message(rp, &rp_locks, bp, tuple, NIL);
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+ }
+}
+
+
+/*
+ * Deliver a "read" message.
+ * hbuf -- byte that are always formated as a list
+ * hlen -- number of byte in header
+ * buf -- data
+ * len -- length of data
+ */
+
+static void deliver_read_message(Port* prt, Eterm to,
+ char *hbuf, int hlen,
+ char *buf, int len, int eol)
+{
+ int need;
+ Eterm listp;
+ Eterm tuple;
+ Process* rp;
+ Eterm* hp;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+ ErtsProcLocks rp_locks = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ need = 3 + 3 + 2*hlen;
+ if (prt->status & ERTS_PORT_SFLG_LINEBUF_IO) {
+ need += 3;
+ }
+ if (prt->status & ERTS_PORT_SFLG_BINARY_IO && buf != NULL) {
+ need += PROC_BIN_SIZE;
+ } else {
+ need += 2*len;
+ }
+
+ rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ if (!rp)
+ return;
+
+ hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks);
+
+ listp = NIL;
+ if ((prt->status & ERTS_PORT_SFLG_BINARY_IO) == 0) {
+ listp = buf_to_intlist(&hp, buf, len, listp);
+ } else if (buf != NULL) {
+ ProcBin* pb;
+ Binary* bptr;
+
+ bptr = erts_bin_nrml_alloc(len);
+ bptr->flags = 0;
+ bptr->orig_size = len;
+ erts_refc_init(&bptr->refc, 1);
+ sys_memcpy(bptr->orig_bytes, buf, len);
+
+ pb = (ProcBin *) hp;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = len;
+ pb->next = ohp->mso;
+ ohp->mso = pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+ hp += PROC_BIN_SIZE;
+
+ ohp->overhead += pb->size / sizeof(Eterm);
+ listp = make_binary(pb);
+ }
+
+ /* Prepend the header */
+ if (hlen > 0) {
+ listp = buf_to_intlist(&hp, hbuf, hlen, listp);
+ }
+
+ if (prt->status & ERTS_PORT_SFLG_LINEBUF_IO){
+ listp = TUPLE2(hp, (eol) ? am_eol : am_noeol, listp);
+ hp += 3;
+ }
+ tuple = TUPLE2(hp, am_data, listp);
+ hp += 3;
+
+ tuple = TUPLE2(hp, prt->id, tuple);
+ hp += 3;
+
+ erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined);
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+}
+
+/*
+ * Deliver all lines in a line buffer, repeats calls to
+ * deliver_read_message, and takes the same parameters.
+ */
+static void deliver_linebuf_message(Port* prt, Eterm to,
+ char* hbuf, int hlen,
+ char *buf, int len)
+{
+ LineBufContext lc;
+ int ret;
+ if(init_linebuf_context(&lc,&(prt->linebuf), buf, len) < 0)
+ return;
+ while((ret = read_linebuf(&lc)) > LINEBUF_EMPTY)
+ deliver_read_message(prt, to, hbuf, hlen, LINEBUF_DATA(lc),
+ LINEBUF_DATALEN(lc), (ret == LINEBUF_EOL));
+}
+
+/*
+ * Deliver any nonterminated lines in the line buffer before the
+ * port gets closed.
+ * Has to be called before terminate_port.
+ * Parameters:
+ * prt - Pointer to a Port structure for this port.
+ */
+static void flush_linebuf_messages(Port *prt)
+{
+ LineBufContext lc;
+ int ret;
+
+ ERTS_SMP_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
+ if(prt == NULL || !(prt->status & ERTS_PORT_SFLG_LINEBUF_IO))
+ return;
+
+ if(init_linebuf_context(&lc,&(prt->linebuf), NULL, 0) < 0)
+ return;
+ while((ret = flush_linebuf(&lc)) > LINEBUF_EMPTY)
+ deliver_read_message(prt,
+ prt->connected,
+ NULL,
+ 0,
+ LINEBUF_DATA(lc),
+ LINEBUF_DATALEN(lc),
+ (ret == LINEBUF_EOL));
+}
+
+static void
+deliver_vec_message(Port* prt, /* Port */
+ Eterm to, /* Receiving pid */
+ char* hbuf, /* "Header" buffer... */
+ int hlen, /* ... and its length */
+ ErlDrvBinary** binv, /* Vector of binaries */
+ SysIOVec* iov, /* I/O vector */
+ int vsize, /* Size of binv & iov */
+ int csize) /* Size of characters in
+ iov (not hlen) */
+{
+ int need;
+ Eterm listp;
+ Eterm tuple;
+ Process* rp;
+ Eterm* hp;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+ ErtsProcLocks rp_locks = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ /*
+ * Check arguments for validity.
+ */
+
+ rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ if (!rp)
+ return;
+
+ /*
+ * Calculate the exact number of heap words needed.
+ */
+
+ need = 3 + 3; /* Heap space for two tuples */
+ if (prt->status & ERTS_PORT_SFLG_BINARY_IO) {
+ need += (2+PROC_BIN_SIZE)*vsize - 2 + hlen*2;
+ } else {
+ need += (hlen+csize)*2;
+ }
+
+ hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks);
+
+ listp = NIL;
+ iov += vsize;
+
+ if ((prt->status & ERTS_PORT_SFLG_BINARY_IO) == 0) {
+ Eterm* thp = hp;
+ while (vsize--) {
+ iov--;
+ listp = buf_to_intlist(&thp, iov->iov_base, iov->iov_len, listp);
+ }
+ hp = thp;
+ } else {
+ binv += vsize;
+ while (vsize--) {
+ ErlDrvBinary* b;
+ ProcBin* pb = (ProcBin*) hp;
+ byte* base;
+
+ iov--;
+ binv--;
+ if ((b = *binv) == NULL) {
+ b = driver_alloc_binary(iov->iov_len);
+ sys_memcpy(b->orig_bytes, iov->iov_base, iov->iov_len);
+ base = (byte*) b->orig_bytes;
+ } else {
+ /* Must increment reference count, caller calls free */
+ driver_binary_inc_refc(b);
+ base = iov->iov_base;
+ }
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = iov->iov_len;
+ pb->next = ohp->mso;
+ ohp->mso = pb;
+ pb->val = ErlDrvBinary2Binary(b);
+ pb->bytes = base;
+ pb->flags = 0;
+ hp += PROC_BIN_SIZE;
+
+ ohp->overhead += iov->iov_len / sizeof(Eterm);
+
+ if (listp == NIL) { /* compatible with deliver_bin_message */
+ listp = make_binary(pb);
+ } else {
+ listp = CONS(hp, make_binary(pb), listp);
+ hp += 2;
+ }
+ }
+ }
+
+ if (hlen > 0) { /* Prepend the header */
+ Eterm* thp = hp;
+ listp = buf_to_intlist(&thp, hbuf, hlen, listp);
+ hp = thp;
+ }
+
+ tuple = TUPLE2(hp, am_data, listp);
+ hp += 3;
+ tuple = TUPLE2(hp, prt->id, tuple);
+ hp += 3;
+
+ erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined);
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+}
+
+
+static void deliver_bin_message(Port* prt, /* port */
+ Eterm to, /* receiving pid */
+ char* hbuf, /* "header" buffer */
+ int hlen, /* and it's length */
+ ErlDrvBinary* bin, /* binary data */
+ int offs, /* offset into binary */
+ int len) /* length of binary */
+{
+ SysIOVec vec;
+
+ vec.iov_base = bin->orig_bytes+offs;
+ vec.iov_len = len;
+ deliver_vec_message(prt, to, hbuf, hlen, &bin, &vec, 1, len);
+}
+
+/* flush the port I/O queue and terminate if empty */
+/*
+ * Note.
+ *
+ * The test for (p->status & ERTS_PORT_SFLGS_DEAD) == 0 is important since the
+ * driver's flush function might call driver_async, which when using no
+ * threads and being short circuited will notice that the io queue is empty
+ * (after calling the driver's async_ready) and recursively call
+ * terminate_port. So when we get back here, the port is already terminated.
+ */
+static void flush_port(Port *p)
+{
+ int fpe_was_unmasked;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+
+ if (p->drv_ptr->flush != NULL) {
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(p, am_in, am_flush);
+ }
+ fpe_was_unmasked = erts_block_fpe();
+ (*p->drv_ptr->flush)((ErlDrvData)p->drv_data);
+ erts_unblock_fpe(fpe_was_unmasked);
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports_where(p, am_out, am_flush);
+ }
+#ifdef ERTS_SMP
+ if (p->xports)
+ erts_smp_xports_unlock(p);
+ ASSERT(!p->xports);
+#endif
+ }
+ if ((p->status & ERTS_PORT_SFLGS_DEAD) == 0 && is_port_ioq_empty(p)) {
+ terminate_port(p);
+ }
+}
+
+/* stop and delete a port that is ERTS_PORT_SFLG_CLOSING */
+static void
+terminate_port(Port *prt)
+{
+ Eterm send_closed_port_id;
+ Eterm connected_id = NIL /* Initialize to silence compiler */;
+ erts_driver_t *drv;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ ASSERT(!prt->nlinks);
+ ASSERT(!prt->monitors);
+
+ if (prt->status & ERTS_PORT_SFLG_SEND_CLOSED) {
+ erts_port_status_band_set(prt, ~ERTS_PORT_SFLG_SEND_CLOSED);
+ send_closed_port_id = prt->id;
+ connected_id = prt->connected;
+ }
+ else {
+ send_closed_port_id = NIL;
+ }
+
+#ifdef ERTS_SMP
+ erts_cancel_smp_ptimer(prt->ptimer);
+#else
+ erl_cancel_timer(&prt->tm);
+#endif
+
+ drv = prt->drv_ptr;
+ if ((drv != NULL) && (drv->stop != NULL)) {
+ int fpe_was_unmasked = erts_block_fpe();
+ (*drv->stop)((ErlDrvData)prt->drv_data);
+ erts_unblock_fpe(fpe_was_unmasked);
+#ifdef ERTS_SMP
+ if (prt->xports)
+ erts_smp_xports_unlock(prt);
+ ASSERT(!prt->xports);
+#endif
+ }
+ if(drv->handle != NULL) {
+ erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_ddll_decrement_port_count(drv->handle);
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ }
+ stopq(prt); /* clear queue memory */
+ if(prt->linebuf != NULL){
+ erts_free(ERTS_ALC_T_LINEBUF, (void *) prt->linebuf);
+ prt->linebuf = NULL;
+ }
+ if (prt->bp != NULL) {
+ free_message_buffer(prt->bp);
+ prt->bp = NULL;
+ prt->data = am_undefined;
+ }
+
+ if (prt->psd)
+ erts_free(ERTS_ALC_T_PRTSD, prt->psd);
+
+ kill_port(prt);
+
+ /*
+ * We don't want to send the closed message until after the
+ * port has been removed from the port table (in kill_port()).
+ */
+ if (is_internal_port(send_closed_port_id))
+ deliver_result(send_closed_port_id, connected_id, am_closed);
+
+ ASSERT(prt->dist_entry == NULL);
+}
+
+void
+erts_terminate_port(Port *pp)
+{
+ terminate_port(pp);
+}
+
+static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
+{
+ ErtsMonitor *rmon;
+ Process *rp;
+
+ ASSERT(mon->type == MON_ORIGIN);
+ ASSERT(is_internal_pid(mon->pid));
+ rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
+ if (!rp) {
+ goto done;
+ }
+ rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (rmon == NULL) {
+ goto done;
+ }
+ erts_destroy_monitor(rmon);
+ done:
+ erts_destroy_monitor(mon);
+}
+
+
+
+typedef struct {
+ Eterm port;
+ Eterm reason;
+} SweepContext;
+
+static void sweep_one_link(ErtsLink *lnk, void *vpsc)
+{
+ SweepContext *psc = vpsc;
+ DistEntry *dep;
+ Process *rp;
+
+
+ ASSERT(lnk->type == LINK_PID);
+
+ if (is_external_pid(lnk->pid)) {
+ dep = external_pid_dist_entry(lnk->pid);
+ if(dep != erts_this_dist_entry) {
+ ErtsDistLinkData dld;
+ ErtsDSigData dsd;
+ int code;
+ code = erts_dsig_prepare(&dsd, dep, NULL, ERTS_DSP_NO_LOCK, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ break;
+ case ERTS_DSIG_PREP_CONNECTED:
+ erts_remove_dist_link(&dld, psc->port, lnk->pid, dep);
+ erts_destroy_dist_link(&dld);
+ code = erts_dsig_send_exit(&dsd, psc->port, lnk->pid,
+ psc->reason);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ break;
+ default:
+ ASSERT(! "Invalid dsig prepare result");
+ break;
+ }
+ }
+ } else {
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
+ ASSERT(is_internal_pid(lnk->pid));
+ rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
+ if (rp) {
+ ErtsLink *rlnk = erts_remove_link(&(rp->nlinks), psc->port);
+
+ if (rlnk) {
+ int xres = erts_send_exit_signal(NULL,
+ psc->port,
+ rp,
+ &rp_locks,
+ psc->reason,
+ NIL,
+ NULL,
+ 0);
+ if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ /* We didn't exit the process and it is traced */
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ trace_proc(NULL, rp, am_getting_unlinked,
+ psc->port);
+ }
+ }
+ erts_destroy_link(rlnk);
+ }
+
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ }
+ erts_destroy_link(lnk);
+}
+
+/* 'from' is sending 'this_port' an exit signal, (this_port must be internal).
+ * If reason is normal we don't do anything, *unless* from is our connected
+ * process in which case we close the port. Any other reason kills the port.
+ * If 'from' is ourself we always die.
+ * When a driver has data in ioq then driver will be set to closing
+ * and become inaccessible to the processes. One exception exists and
+ * that is to kill a port till reason kill. Then the port is stopped.
+ *
+ */
+void
+erts_do_exit_port(Port *p, Eterm from, Eterm reason)
+{
+ ErtsLink *lnk;
+ Eterm rreason;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+
+ rreason = (reason == am_kill) ? am_killed : reason;
+
+ if ((p->status & (ERTS_PORT_SFLGS_DEAD
+ | ERTS_PORT_SFLG_EXITING
+ | ERTS_PORT_SFLG_IMMORTAL))
+ || ((reason == am_normal) &&
+ ((from != p->connected) && (from != p->id)))) {
+ return;
+ }
+
+ if (IS_TRACED_FL(p, F_TRACE_PORTS)) {
+ trace_port(p, am_closed, reason);
+ }
+
+ erts_trace_check_exiting(p->id);
+
+ /*
+ * Setting the port to not busy here, frees the list of pending
+ * processes and makes them runnable.
+ */
+ set_busy_port((ErlDrvPort)internal_port_index(p->id), 0);
+
+ if (p->reg != NULL)
+ (void) erts_unregister_name(NULL, 0, p, p->reg->name);
+
+ erts_port_status_bor_set(p, ERTS_PORT_SFLG_EXITING);
+
+ {
+ SweepContext sc = {p->id, rreason};
+ lnk = p->nlinks;
+ p->nlinks = NULL;
+ erts_sweep_links(lnk, &sweep_one_link, &sc);
+ }
+ {
+ ErtsMonitor *moni = p->monitors;
+ p->monitors = NULL;
+ erts_sweep_monitors(moni, &sweep_one_monitor, NULL);
+ }
+
+
+ if ((p->status & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) {
+ erts_do_net_exits(p->dist_entry, rreason);
+ erts_deref_dist_entry(p->dist_entry);
+ p->dist_entry = NULL;
+ erts_port_status_band_set(p, ~ERTS_PORT_SFLG_DISTRIBUTION);
+ }
+
+ if ((reason != am_kill) && !is_port_ioq_empty(p)) {
+ erts_port_status_bandor_set(p,
+ ~ERTS_PORT_SFLG_EXITING, /* must turn it off */
+ ERTS_PORT_SFLG_CLOSING);
+ flush_port(p);
+ }
+ else {
+ terminate_port(p);
+ }
+}
+
+/* About the states ERTS_PORT_SFLG_EXITING and ERTS_PORT_SFLG_CLOSING used above.
+**
+** ERTS_PORT_SFLG_EXITING is a recursion protection for erts_do_exit_port().
+** It is unclear whether this state is necessary or not, it might be possible
+** to merge it with ERTS_PORT_SFLG_CLOSING. ERTS_PORT_SFLG_EXITING only persists
+** over a section of sequential (but highly recursive) code.
+**
+** ERTS_PORT_SFLG_CLOSING is a state where the port is in Limbo, waiting to
+** pass on. All links are removed, and the port receives in/out-put events so
+** as soon as the port queue gets empty terminate_port() is called.
+*/
+
+
+
+/* Command should be of the form
+** {PID, close}
+** {PID, {command, io-list}}
+** {PID, {connect, New_PID}}
+**
+**
+*/
+void erts_port_command(Process *proc,
+ Eterm caller_id,
+ Port *port,
+ Eterm command)
+{
+ Eterm *tp;
+ Eterm pid;
+
+ if (!port)
+ return;
+
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ASSERT(!INVALID_PORT(port, port->id));
+
+ if (is_tuple_arity(command, 2)) {
+ tp = tuple_val(command);
+ if ((pid = port->connected) == tp[1]) {
+ /* PID must be connected */
+ if (tp[2] == am_close) {
+ erts_port_status_bor_set(port, ERTS_PORT_SFLG_SEND_CLOSED);
+ erts_do_exit_port(port, pid, am_normal);
+ goto done;
+ } else if (is_tuple_arity(tp[2], 2)) {
+ tp = tuple_val(tp[2]);
+ if (tp[1] == am_command) {
+ if (erts_write_to_port(caller_id, port, tp[2]) == 0)
+ goto done;
+ } else if ((tp[1] == am_connect) && is_internal_pid(tp[2])) {
+ port->connected = tp[2];
+ deliver_result(port->id, pid, am_connected);
+ goto done;
+ }
+ }
+ }
+ }
+
+ {
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ Process* rp = erts_pid2proc_opt(NULL, 0,
+ port->connected, rp_locks,
+ ERTS_P2P_FLG_SMP_INC_REFC);
+ if (rp) {
+ (void) erts_send_exit_signal(NULL,
+ port->id,
+ rp,
+ &rp_locks,
+ am_badsig,
+ NIL,
+ NULL,
+ 0);
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+ }
+
+ }
+ done:
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+}
+
+/*
+ * Control a port synchronously.
+ * Returns either a list or a binary.
+ */
+Eterm
+erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist)
+{
+ byte* to_port = NULL; /* Buffer to write to port. */
+ /* Initialization is for shutting up
+ warning about use before set. */
+ int to_len = 0; /* Length of buffer. */
+ int must_free = 0; /* True if the buffer should be freed. */
+ char port_result[ERL_ONHEAP_BIN_LIMIT]; /* Default buffer for result from port. */
+ char* port_resp; /* Pointer to result buffer. */
+ int n;
+ int (*control)(ErlDrvData, unsigned, char*, int, char**, int);
+ int fpe_was_unmasked;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if ((control = prt->drv_ptr->control) == NULL) {
+ return THE_NON_VALUE;
+ }
+
+ /*
+ * Convert the iolist to a buffer, pointed to by to_port,
+ * and with its length in to_len.
+ */
+ if (is_binary(iolist) && binary_bitoffset(iolist) == 0) {
+ Uint bitoffs;
+ Uint bitsize;
+ ERTS_GET_BINARY_BYTES(iolist, to_port, bitoffs, bitsize);
+ to_len = binary_size(iolist);
+ } else {
+ int r;
+
+ /* Try with an 8KB buffer first (will often be enough I guess). */
+ to_len = 8*1024;
+ to_port = erts_alloc(ERTS_ALC_T_TMP, to_len);
+ must_free = 1;
+
+ /*
+ * In versions before R10B, we used to reserve random
+ * amounts of extra memory. From R10B, we allocate the
+ * exact amount.
+ */
+ r = io_list_to_buf(iolist, (char*) to_port, to_len);
+ if (r >= 0) {
+ to_len -= r;
+ } else if (r == -2) { /* Type error */
+ erts_free(ERTS_ALC_T_TMP, (void *) to_port);
+ return THE_NON_VALUE;
+ } else {
+ ASSERT(r == -1); /* Overflow */
+ erts_free(ERTS_ALC_T_TMP, (void *) to_port);
+ if ((to_len = io_list_len(iolist)) < 0) { /* Type error */
+ return THE_NON_VALUE;
+ }
+ must_free = 1;
+ to_port = erts_alloc(ERTS_ALC_T_TMP, to_len);
+ r = io_list_to_buf(iolist, (char*) to_port, to_len);
+ ASSERT(r == 0);
+ }
+ }
+
+ prt->caller = p->id; /* Internal pid */
+
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ /*
+ * Call the port's control routine.
+ */
+
+ port_resp = port_result;
+ fpe_was_unmasked = erts_block_fpe();
+ n = control((ErlDrvData)prt->drv_data, command, (char*)to_port, to_len,
+ &port_resp, sizeof(port_result));
+ erts_unblock_fpe(fpe_was_unmasked);
+ if (must_free) {
+ erts_free(ERTS_ALC_T_TMP, (void *) to_port);
+ }
+ prt->caller = NIL;
+#ifdef ERTS_SMP
+ if (prt->xports)
+ erts_smp_xports_unlock(prt);
+ ASSERT(!prt->xports);
+#endif
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ /*
+ * Handle the result.
+ */
+
+ if (n < 0) {
+ return THE_NON_VALUE;
+ }
+
+ if ((prt->control_flags & PORT_CONTROL_FLAG_BINARY) == 0) { /* List result */
+ Eterm ret;
+ Eterm* hp = HAlloc(p, 2*n);
+ ret = buf_to_intlist(&hp, port_resp, n, NIL);
+ if (port_resp != port_result) {
+ driver_free(port_resp);
+ }
+ return ret;
+ }
+ else if (port_resp == NULL) {
+ return NIL;
+ }
+ else { /* Binary result */
+ ErlDrvBinary *dbin;
+ ErlHeapBin *hbin;
+ if (port_resp != port_result) {
+ dbin = (ErlDrvBinary *) port_resp;
+ if (dbin->orig_size > ERL_ONHEAP_BIN_LIMIT) {
+ ProcBin* pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = dbin->orig_size;
+ pb->next = MSO(p).mso;
+ MSO(p).mso = pb;
+ pb->val = ErlDrvBinary2Binary(dbin);
+ pb->bytes = (byte*) dbin->orig_bytes;
+ pb->flags = 0;
+ MSO(p).overhead += dbin->orig_size / sizeof(Eterm);
+ return make_binary(pb);
+ }
+ port_resp = dbin->orig_bytes;
+ n = dbin->orig_size;
+ } else {
+ dbin = NULL;
+ }
+ hbin = (ErlHeapBin*) HAlloc(p, heap_bin_size(n));
+ ASSERT(n <= ERL_ONHEAP_BIN_LIMIT);
+ hbin->thing_word = header_heap_bin(n);
+ hbin->size = n;
+ sys_memcpy(hbin->data, port_resp, n);
+ if (dbin != NULL) {
+ driver_free_binary(dbin);
+ }
+ return make_binary(hbin);
+ }
+}
+
+typedef struct {
+ int to;
+ void *arg;
+} prt_one_lnk_data;
+
+static void prt_one_monitor(ErtsMonitor *mon, void *vprtd)
+{
+ prt_one_lnk_data *prtd = (prt_one_lnk_data *) vprtd;
+ erts_print(prtd->to, prtd->arg, "(%T,%T)", mon->pid,mon->ref);
+}
+
+static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
+{
+ prt_one_lnk_data *prtd = (prt_one_lnk_data *) vprtd;
+ erts_print(prtd->to, prtd->arg, "%T", lnk->pid);
+}
+
+void
+print_port_info(int to, void *arg, int i)
+{
+ Port* p = &erts_port[i];
+
+ if (p->status & ERTS_PORT_SFLGS_DEAD)
+ return;
+
+ erts_print(to, arg, "=port:%T\n", p->id);
+ erts_print(to, arg, "Slot: %d\n", i);
+ if (p->status & ERTS_PORT_SFLG_CONNECTED) {
+ erts_print(to, arg, "Connected: %T", p->connected);
+ erts_print(to, arg, "\n");
+ }
+
+ if (p->nlinks != NULL) {
+ prt_one_lnk_data prtd;
+ prtd.to = to;
+ prtd.arg = arg;
+ erts_print(to, arg, "Links: ");
+ erts_doforall_links(p->nlinks, &prt_one_lnk, &prtd);
+ erts_print(to, arg, "\n");
+ }
+ if (p->monitors != NULL) {
+ prt_one_lnk_data prtd;
+ prtd.to = to;
+ prtd.arg = arg;
+ erts_print(to, arg, "Monitors: ");
+ erts_doforall_monitors(p->monitors, &prt_one_monitor, &prtd);
+ erts_print(to, arg, "\n");
+ }
+
+ if (p->reg != NULL)
+ erts_print(to, arg, "Registered as: %T\n", p->reg->name);
+
+ if (p->drv_ptr == &fd_driver) {
+ erts_print(to, arg, "Port is UNIX fd not opened by emulator: %s\n", p->name);
+ } else if (p->drv_ptr == &vanilla_driver) {
+ erts_print(to, arg, "Port is a file: %s\n",p->name);
+ } else if (p->drv_ptr == &spawn_driver) {
+ erts_print(to, arg, "Port controls external process: %s\n",p->name);
+ } else {
+ erts_print(to, arg, "Port controls linked-in driver: %s\n",p->name);
+ }
+}
+
+void
+set_busy_port(ErlDrvPort port_num, int on)
+{
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[port_num]));
+
+ if (on) {
+ erts_port_status_bor_set(&erts_port[port_num],
+ ERTS_PORT_SFLG_PORT_BUSY);
+ } else {
+ ErtsProcList* plp = erts_port[port_num].suspended;
+ erts_port_status_band_set(&erts_port[port_num],
+ ~ERTS_PORT_SFLG_PORT_BUSY);
+ erts_port[port_num].suspended = NULL;
+
+ if (erts_port[port_num].dist_entry) {
+ /*
+ * Processes suspended on distribution ports are
+ * normally queued on the dist entry.
+ */
+ erts_dist_port_not_busy(&erts_port[port_num]);
+ }
+
+ /*
+ * Resume, in a round-robin fashion, all processes waiting on the port.
+ *
+ * This version submitted by Tony Rogvall. The earlier version used
+ * to resume the processes in order, which caused starvation of all but
+ * the first process.
+ */
+
+ if (plp) {
+ /* First proc should be resumed last */
+ if (plp->next) {
+ erts_resume_processes(plp->next);
+ plp->next = NULL;
+ }
+ erts_resume_processes(plp);
+ }
+ }
+}
+
+void set_port_control_flags(ErlDrvPort port_num, int flags)
+{
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[port_num]));
+
+ erts_port[port_num].control_flags = flags;
+}
+
+int get_port_flags(ErlDrvPort ix) {
+ Port* prt = erts_drvport2port(ix);
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (prt == NULL)
+ return 0;
+
+ return (prt->status & ERTS_PORT_SFLG_BINARY_IO ? PORT_FLAG_BINARY : 0)
+ | (prt->status & ERTS_PORT_SFLG_LINEBUF_IO ? PORT_FLAG_LINE : 0);
+}
+
+
+void erts_raw_port_command(Port* p, byte* buf, Uint len)
+{
+ int fpe_was_unmasked;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+
+ if (len > (Uint) INT_MAX)
+ erl_exit(ERTS_ABORT_EXIT,
+ "Absurdly large data buffer (%bpu bytes) passed to"
+ "output callback of %s driver.\n",
+ len,
+ p->drv_ptr->name ? p->drv_ptr->name : "unknown");
+
+ p->caller = NIL;
+ fpe_was_unmasked = erts_block_fpe();
+ (*p->drv_ptr->output)((ErlDrvData)p->drv_data, (char*) buf, (int) len);
+ erts_unblock_fpe(fpe_was_unmasked);
+}
+
+int async_ready(Port *p, void* data)
+{
+ int need_free = 1;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ if (p) {
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ASSERT(!(p->status & ERTS_PORT_SFLGS_DEAD));
+ if (p->drv_ptr->ready_async != NULL) {
+ (*p->drv_ptr->ready_async)((ErlDrvData)p->drv_data, data);
+ need_free = 0;
+#ifdef ERTS_SMP
+ if (p->xports)
+ erts_smp_xports_unlock(p);
+ ASSERT(!p->xports);
+#endif
+ }
+ if ((p->status & ERTS_PORT_SFLG_CLOSING) && is_port_ioq_empty(p)) {
+ terminate_port(p);
+ }
+ }
+ return need_free;
+}
+
+static void
+report_missing_drv_callback(Port *p, char *drv_type, char *callback)
+{
+ ErtsPortNames *pnp = erts_get_port_names(p->id);
+ char *unknown = "<unknown>";
+ char *drv_name = pnp->driver_name ? pnp->driver_name : unknown;
+ char *prt_name = pnp->name ? pnp->name : unknown;
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "%T: %s driver '%s' ", p->id, drv_type, drv_name);
+ if (sys_strcmp(drv_name, prt_name) != 0)
+ erts_dsprintf(dsbufp, "(%s) ", prt_name);
+ erts_dsprintf(dsbufp, "does not implement the %s callback!\n", callback);
+ erts_free_port_names(pnp);
+ erts_send_error_to_logger_nogl(dsbufp);
+}
+
+void
+erts_stale_drv_select(Eterm port,
+ ErlDrvEvent hndl,
+ int mode,
+ int deselect)
+{
+ char *type;
+ ErlDrvPort drv_port = internal_port_index(port);
+ ErtsPortNames *pnp = erts_get_port_names(port);
+ erts_dsprintf_buf_t *dsbufp;
+
+ switch (mode) {
+ case ERL_DRV_READ | ERL_DRV_WRITE:
+ type = "Input/Output";
+ goto deselect;
+ case ERL_DRV_WRITE:
+ type = "Output";
+ goto deselect;
+ case ERL_DRV_READ:
+ type = "Input";
+ deselect:
+ if (deselect) {
+ driver_select(drv_port, hndl,
+ mode | ERL_DRV_USE_NO_CALLBACK,
+ 0);
+ }
+ break;
+ default:
+ type = "Event";
+ if (deselect)
+ driver_event(drv_port, hndl, NULL);
+ break;
+ }
+
+ dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "%T: %s: %s driver gone away without deselecting!\n",
+ port,
+ pnp->name ? pnp->name : "<unknown>",
+ type);
+ erts_free_port_names(pnp);
+ erts_send_error_to_logger_nogl(dsbufp);
+}
+
+ErtsPortNames *
+erts_get_port_names(Eterm id)
+{
+ ErtsPortNames *pnp;
+ ASSERT(is_nil(id) || is_internal_port(id));
+
+ if (is_not_internal_port(id)) {
+ pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, sizeof(ErtsPortNames));
+ pnp->name = NULL;
+ pnp->driver_name = NULL;
+ }
+ else {
+ Port* prt = &erts_port[internal_port_index(id)];
+ int do_realloc = 1;
+ int len = -1;
+ size_t pnp_len = sizeof(ErtsPortNames);
+#ifndef DEBUG
+ pnp_len += 100; /* In most cases 100 characters will be enough... */
+#endif
+ pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, pnp_len);
+ do {
+ int nlen;
+ char *name, *driver_name;
+ if (len > 0) {
+ erts_free(ERTS_ALC_T_PORT_NAMES, pnp);
+ pnp_len = sizeof(ErtsPortNames) + len;
+ pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, pnp_len);
+ }
+ erts_smp_port_state_lock(prt);
+ if (id != prt->id) {
+ len = nlen = 0;
+ name = driver_name = NULL;
+ }
+ else {
+ name = prt->name;
+ len = nlen = name ? sys_strlen(name) + 1 : 0;
+ driver_name = (prt->drv_ptr ? prt->drv_ptr->name : NULL);
+ len += driver_name ? sys_strlen(driver_name) + 1 : 0;
+ }
+ if (len <= pnp_len - sizeof(ErtsPortNames)) {
+ if (!name)
+ pnp->name = NULL;
+ else {
+ pnp->name = ((char *) pnp) + sizeof(ErtsPortNames);
+ sys_strcpy(pnp->name, name);
+ }
+ if (!driver_name)
+ pnp->driver_name = NULL;
+ else {
+ pnp->driver_name = (((char *) pnp)
+ + sizeof(ErtsPortNames)
+ + nlen);
+ sys_strcpy(pnp->driver_name, driver_name);
+ }
+ do_realloc = 0;
+ }
+ erts_smp_port_state_unlock(prt);
+ } while (do_realloc);
+ }
+ return pnp;
+}
+
+void
+erts_free_port_names(ErtsPortNames *pnp)
+{
+ erts_free(ERTS_ALC_T_PORT_NAMES, pnp);
+}
+
+static void schedule_port_timeout(Port *p)
+{
+ /*
+ * Scheduling of port timeouts can be done without port locking, but
+ * since the task handle is stored in the port structure and the ptimer
+ * structure is protected by the port lock we require the port to be
+ * locked for now...
+ *
+ * TODO: Implement scheduling of port timeouts without locking
+ * the port.
+ * /Rickard
+ */
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ (void) erts_port_task_schedule(p->id,
+ &p->timeout_task,
+ ERTS_PORT_TASK_TIMEOUT,
+ (ErlDrvEvent) -1,
+ NULL);
+}
+
+ErlDrvTermData driver_mk_term_nil(void)
+{
+ return driver_term_nil;
+}
+
+void driver_report_exit(int ix, int status)
+{
+ Port* prt = erts_drvport2port(ix);
+ Eterm* hp;
+ Eterm tuple;
+ Process *rp;
+ Eterm pid;
+ ErlHeapFragment *bp = NULL;
+ ErlOffHeap *ohp;
+ ErtsProcLocks rp_locks = 0;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ pid = prt->connected;
+ ASSERT(is_internal_pid(pid));
+ rp = erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ if (!rp)
+ return;
+
+ hp = erts_alloc_message_heap(3+3, &bp, &ohp, rp, &rp_locks);
+
+ tuple = TUPLE2(hp, am_exit_status, make_small(status));
+ hp += 3;
+ tuple = TUPLE2(hp, prt->id, tuple);
+
+ erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined);
+
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+}
+
+
+static ERTS_INLINE int
+deliver_term_check_port(ErlDrvPort drvport)
+{
+ int res;
+ int ix = (int) drvport;
+ if (ix < 0 || erts_max_ports <= ix)
+ res = -1; /* invalid */
+ else {
+ Port* prt = &erts_port[ix];
+ erts_smp_port_state_lock(prt);
+ if (!(prt->status & ERTS_PORT_SFLGS_INVALID_LOOKUP))
+ res = 1; /* ok */
+ else if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ res = 0; /* closing */
+ else
+ res = -1; /* invalid (dead) */
+ erts_smp_port_state_unlock(prt);
+ }
+ return res;
+}
+
+#define ERTS_B2T_STATES_DEF_STATES_SZ 5
+#define ERTS_B2T_STATES_DEF_STATES_INC 100
+
+struct b2t_states__ {
+ int len;
+ int ix;
+ int used;
+ ErtsBinary2TermState *state;
+ ErtsBinary2TermState def_states[ERTS_B2T_STATES_DEF_STATES_SZ];
+#ifdef DEBUG
+ byte **org_ext;
+ byte *def_org_ext[ERTS_B2T_STATES_DEF_STATES_SZ];
+#endif
+};
+
+static ERTS_INLINE void
+init_b2t_states(struct b2t_states__ *b2tsp)
+{
+ b2tsp->len = ERTS_B2T_STATES_DEF_STATES_SZ;
+ b2tsp->ix = 0;
+ b2tsp->used = 0;
+ b2tsp->state = &b2tsp->def_states[0];
+#ifdef DEBUG
+ b2tsp->org_ext = &b2tsp->def_org_ext[0];
+#endif
+}
+
+static ERTS_INLINE void
+grow_b2t_states(struct b2t_states__ *b2tsp)
+{
+ if (b2tsp->state != &b2tsp->def_states[0]) {
+ b2tsp->len += ERTS_B2T_STATES_DEF_STATES_INC;
+ b2tsp->state = erts_realloc(ERTS_ALC_T_TMP,
+ b2tsp->state,
+ sizeof(ErtsBinary2TermState)*b2tsp->len);
+#ifdef DEBUG
+ b2tsp->org_ext = erts_realloc(ERTS_ALC_T_TMP,
+ b2tsp->org_ext,
+ sizeof(char *)*b2tsp->len);
+#endif
+ }
+ else {
+ ErtsBinary2TermState *new_states;
+ new_states = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(ErtsBinary2TermState)
+ *ERTS_B2T_STATES_DEF_STATES_INC));
+ sys_memcpy((void *) new_states,
+ (void *) b2tsp->state,
+ sizeof(ErtsBinary2TermState)*ERTS_B2T_STATES_DEF_STATES_SZ);
+ b2tsp->state = new_states;
+ b2tsp->len = ERTS_B2T_STATES_DEF_STATES_INC;
+#ifdef DEBUG
+ {
+ byte **new_org_ext = erts_alloc(ERTS_ALC_T_TMP,
+ (sizeof(char *)
+ *ERTS_B2T_STATES_DEF_STATES_INC));
+ sys_memcpy((void *) new_org_ext,
+ (void *) b2tsp->org_ext,
+ sizeof(char *)*ERTS_B2T_STATES_DEF_STATES_SZ);
+ b2tsp->org_ext = new_org_ext;
+ }
+#endif
+ }
+}
+
+static ERTS_INLINE void
+cleanup_b2t_states(struct b2t_states__ *b2tsp)
+{
+ if (b2tsp->state != &b2tsp->def_states[0]) {
+ erts_free(ERTS_ALC_T_TMP, b2tsp->state);
+#ifdef DEBUG
+ erts_free(ERTS_ALC_T_TMP, b2tsp->org_ext);
+#endif
+ }
+}
+
+
+/*
+ * Generate an Erlang term from data in an array (representing a simple stack
+ * machine to build terms).
+ * Returns:
+ * -1 on error in input data
+ * 0 if the message was not delivered (bad to pid or closed port)
+ * 1 if the message was delivered successfully
+ */
+
+static int
+driver_deliver_term(ErlDrvPort port,
+ Eterm to,
+ ErlDrvTermData* data,
+ int len)
+{
+#define ERTS_DDT_FAIL do { res = -1; goto done; } while (0)
+ Uint need = 0;
+ int depth = 0;
+ int res;
+ Eterm *hp = NULL, *hp_start = NULL, *hp_end = NULL;
+ ErlDrvTermData* ptr;
+ ErlDrvTermData* ptr_end;
+ DECLARE_ESTACK(stack);
+ Eterm mess = NIL; /* keeps compiler happy */
+ Process* rp = NULL;
+ ErlHeapFragment *bp = NULL;
+ ErlOffHeap *ohp;
+ ErtsProcLocks rp_locks = 0;
+ struct b2t_states__ b2t;
+
+ init_b2t_states(&b2t);
+
+ /*
+ * We used to check port and process here. In the SMP enabled emulator,
+ * however, we don't want to that until we have verified the term.
+ */
+
+ /*
+ * Check ErlDrvTermData for consistency and calculate needed heap size
+ * and stack depth.
+ */
+ ptr = data;
+ ptr_end = ptr + len;
+
+ while (ptr < ptr_end) {
+ ErlDrvTermData tag = *ptr++;
+
+#define ERTS_DDT_CHK_ENOUGH_ARGS(NEED) \
+ if (ptr+((NEED)-1) >= ptr_end) ERTS_DDT_FAIL;
+
+ switch(tag) {
+ case ERL_DRV_NIL: /* no arguments */
+ depth++;
+ break;
+ case ERL_DRV_ATOM: /* atom argument */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ if (is_not_atom(ptr[0])) ERTS_DDT_FAIL;
+ ptr++;
+ depth++;
+ break;
+ case ERL_DRV_INT: /* signed int argument */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ /* check for bignum */
+ if (!IS_SSMALL((Sint)ptr[0]))
+ need += BIG_UINT_HEAP_SIZE; /* use small_to_big */
+ ptr++;
+ depth++;
+ break;
+ case ERL_DRV_UINT: /* unsigned int argument */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ /* check for bignum */
+ if (!IS_USMALL(0, (Uint)ptr[0]))
+ need += BIG_UINT_HEAP_SIZE; /* use small_to_big */
+ ptr++;
+ depth++;
+ break;
+ case ERL_DRV_INT64: /* pointer to signed 64-bit int argument */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ erts_bld_sint64(NULL, &need, *((Sint64 *) ptr[0]));
+ ptr++;
+ depth++;
+ break;
+ case ERL_DRV_UINT64: /* pointer to unsigned 64-bit int argument */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ erts_bld_uint64(NULL, &need, *((Uint64 *) ptr[0]));
+ ptr++;
+ depth++;
+ break;
+ case ERL_DRV_PORT: /* port argument */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ if (is_not_internal_port(ptr[0])) ERTS_DDT_FAIL;
+ ptr++;
+ depth++;
+ break;
+ case ERL_DRV_BINARY: { /* ErlDrvBinary*, size, offs */
+ ErlDrvBinary* b;
+ Uint size;
+ Uint offset;
+ ERTS_DDT_CHK_ENOUGH_ARGS(3);
+ b = (ErlDrvBinary*) ptr[0];
+ size = ptr[1];
+ offset = ptr[2];
+ if (!b || size + offset > b->orig_size)
+ ERTS_DDT_FAIL; /* No binary or outside the binary */
+ need += (size <= ERL_ONHEAP_BIN_LIMIT
+ ? heap_bin_size(size)
+ : PROC_BIN_SIZE);
+ ptr += 3;
+ depth++;
+ break;
+ }
+ case ERL_DRV_BUF2BINARY: { /* char*, size */
+ byte *bufp;
+ Uint size;
+ ERTS_DDT_CHK_ENOUGH_ARGS(2);
+ bufp = (byte *) ptr[0];
+ size = (Uint) ptr[1];
+ if (!bufp && size > 0) ERTS_DDT_FAIL;
+ need += (size <= ERL_ONHEAP_BIN_LIMIT
+ ? heap_bin_size(size)
+ : PROC_BIN_SIZE);
+ ptr += 2;
+ depth++;
+ break;
+ }
+ case ERL_DRV_STRING: /* char*, length */
+ ERTS_DDT_CHK_ENOUGH_ARGS(2);
+ if ((char *) ptr[0] == NULL || (int) ptr[1] < 0) ERTS_DDT_FAIL;
+ need += ptr[1] * 2;
+ ptr += 2;
+ depth++;
+ break;
+ case ERL_DRV_STRING_CONS: /* char*, length */
+ ERTS_DDT_CHK_ENOUGH_ARGS(2);
+ if ((char *) ptr[0] == NULL || (int) ptr[1] < 0) ERTS_DDT_FAIL;
+ need += ptr[1] * 2;
+ if (depth < 1) ERTS_DDT_FAIL;
+ ptr += 2;
+ break;
+ case ERL_DRV_LIST: /* int */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ if ((int) ptr[0] <= 0) ERTS_DDT_FAIL;
+ need += (ptr[0]-1)*2; /* list cells */
+ depth -= ptr[0];
+ if (depth < 0) ERTS_DDT_FAIL;
+ ptr++;
+ depth++;
+ break;
+ case ERL_DRV_TUPLE: { /* int */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ if ((int) ptr[0] < 0) ERTS_DDT_FAIL;
+ need += ptr[0]+1; /* vector positions + arityval */
+ depth -= ptr[0];
+ if (depth < 0) ERTS_DDT_FAIL;
+ ptr++;
+ depth++;
+ break;
+ }
+ case ERL_DRV_PID: /* pid argument */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ if (is_not_internal_pid(ptr[0])) ERTS_DDT_FAIL;
+ ptr++;
+ depth++;
+ break;
+ case ERL_DRV_FLOAT: /* double * */
+ ERTS_DDT_CHK_ENOUGH_ARGS(1);
+ need += FLOAT_SIZE_OBJECT;
+ ptr++;
+ depth++;
+ break;
+ case ERL_DRV_EXT2TERM: { /* char *ext, int size */
+ byte* ext;
+ Sint size;
+ Sint hsz;
+
+ ERTS_DDT_CHK_ENOUGH_ARGS(2);
+ ext = (byte *) ptr[0];
+ size = (Sint) ptr[1];
+ if (!ext || size <= 0)
+ ERTS_DDT_FAIL;
+ if (b2t.len <= b2t.ix)
+ grow_b2t_states(&b2t);
+#ifdef DEBUG
+ b2t.org_ext[b2t.ix] = ext;
+#endif
+ hsz = erts_binary2term_prepare(&b2t.state[b2t.ix++], ext, size);
+ if (hsz < 0)
+ ERTS_DDT_FAIL; /* Invalid data */
+ need += hsz;
+ ptr += 2;
+ depth++;
+ break;
+ }
+ default:
+ ERTS_DDT_FAIL;
+ }
+#undef ERTS_DDT_CHK_ENOUGH_ARGS
+ }
+
+ if ((depth != 1) || (ptr != ptr_end))
+ ERTS_DDT_FAIL;
+
+ b2t.used = b2t.ix;
+ b2t.ix = 0;
+
+ /*
+ * The term is OK. Go ahead and validate the port and process.
+ */
+ res = deliver_term_check_port(port);
+ if (res <= 0)
+ goto done;
+
+ rp = erts_pid2proc_opt(NULL, 0, to, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC);
+ if (!rp) {
+ res = 0;
+ goto done;
+ }
+
+ hp_start = hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks);
+ hp_end = hp + need;
+
+ /*
+ * Interpret the instructions and build the term.
+ */
+ ptr = data;
+ while (ptr < ptr_end) {
+ ErlDrvTermData tag = *ptr++;
+
+ switch(tag) {
+ case ERL_DRV_NIL: /* no arguments */
+ mess = NIL;
+ break;
+
+ case ERL_DRV_ATOM: /* atom argument */
+ mess = ptr[0];
+ ptr++;
+ break;
+
+ case ERL_DRV_INT: /* signed int argument */
+ if (IS_SSMALL((Sint)ptr[0]))
+ mess = make_small((Sint)ptr[0]);
+ else {
+ mess = small_to_big((Sint)ptr[0], hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ ptr++;
+ break;
+
+ case ERL_DRV_UINT: /* unsigned int argument */
+ if (IS_USMALL(0, (Uint)ptr[0]))
+ mess = make_small((Uint)ptr[0]);
+ else {
+ mess = uint_to_big((Uint)ptr[0], hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+ ptr++;
+ break;
+
+ case ERL_DRV_INT64: /* pointer to unsigned 64-bit int argument */
+ mess = erts_bld_sint64(&hp, NULL, *((Sint64 *) ptr[0]));
+ ptr++;
+ break;
+
+ case ERL_DRV_UINT64: /* pointer to unsigned 64-bit int argument */
+ mess = erts_bld_uint64(&hp, NULL, *((Uint64 *) ptr[0]));
+ ptr++;
+ break;
+
+ case ERL_DRV_PORT: /* port argument */
+ mess = ptr[0];
+ ptr++;
+ break;
+
+ case ERL_DRV_BINARY: { /* ErlDrvBinary*, size, offs */
+ ErlDrvBinary* b = (ErlDrvBinary*) ptr[0];
+ Uint size = ptr[1];
+ Uint offset = ptr[2];
+
+ if (size <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hbp = (ErlHeapBin *) hp;
+ hp += heap_bin_size(size);
+ hbp->thing_word = header_heap_bin(size);
+ hbp->size = size;
+ if (size > 0) {
+ sys_memcpy((void *) hbp->data, (void *) (((byte*) b->orig_bytes) + offset), size);
+ }
+ mess = make_binary(hbp);
+ }
+ else {
+ ProcBin* pb = (ProcBin *) hp;
+ driver_binary_inc_refc(b); /* caller will free binary */
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = size;
+ pb->next = ohp->mso;
+ ohp->mso = pb;
+ pb->val = ErlDrvBinary2Binary(b);
+ pb->bytes = ((byte*) b->orig_bytes) + offset;
+ pb->flags = 0;
+ mess = make_binary(pb);
+ hp += PROC_BIN_SIZE;
+ ohp->overhead += pb->size / sizeof(Eterm);
+ }
+ ptr += 3;
+ break;
+ }
+
+ case ERL_DRV_BUF2BINARY: { /* char*, size */
+ byte *bufp = (byte *) ptr[0];
+ Uint size = (Uint) ptr[1];
+ if (size <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hbp = (ErlHeapBin *) hp;
+ hp += heap_bin_size(size);
+ hbp->thing_word = header_heap_bin(size);
+ hbp->size = size;
+ if (size > 0) {
+ ASSERT(bufp);
+ sys_memcpy((void *) hbp->data, (void *) bufp, size);
+ }
+ mess = make_binary(hbp);
+ }
+ else {
+ ProcBin* pbp;
+ Binary* bp = erts_bin_nrml_alloc(size);
+ ASSERT(bufp);
+ bp->flags = 0;
+ bp->orig_size = (long) size;
+ erts_refc_init(&bp->refc, 1);
+ sys_memcpy((void *) bp->orig_bytes, (void *) bufp, size);
+ pbp = (ProcBin *) hp;
+ hp += PROC_BIN_SIZE;
+ pbp->thing_word = HEADER_PROC_BIN;
+ pbp->size = size;
+ pbp->next = ohp->mso;
+ ohp->mso = pbp;
+ pbp->val = bp;
+ pbp->bytes = (byte*) bp->orig_bytes;
+ pbp->flags = 0;
+ ohp->overhead += (pbp->size / sizeof(Eterm));
+ mess = make_binary(pbp);
+ }
+ ptr += 2;
+ break;
+ }
+
+ case ERL_DRV_STRING: /* char*, length */
+ mess = buf_to_intlist(&hp, (char*)ptr[0], ptr[1], NIL);
+ ptr += 2;
+ break;
+
+ case ERL_DRV_STRING_CONS: /* char*, length */
+ mess = ESTACK_POP(stack);
+ mess = buf_to_intlist(&hp, (char*)ptr[0], ptr[1], mess);
+ ptr += 2;
+ break;
+
+ case ERL_DRV_LIST: { /* unsigned */
+ Uint i = (int) ptr[0]; /* i > 0 */
+
+ mess = ESTACK_POP(stack);
+ i--;
+ while(i > 0) {
+ Eterm hd = ESTACK_POP(stack);
+
+ mess = CONS(hp, hd, mess);
+ hp += 2;
+ i--;
+ }
+ ptr++;
+ break;
+ }
+
+ case ERL_DRV_TUPLE: { /* int */
+ int size = (int)ptr[0];
+ Eterm* tp = hp;
+
+ *tp = make_arityval(size);
+ mess = make_tuple(tp);
+
+ tp += size; /* point at last element */
+ hp = tp+1; /* advance "heap" pointer */
+
+ while(size--) {
+ *tp-- = ESTACK_POP(stack);
+ }
+ ptr++;
+ break;
+ }
+
+ case ERL_DRV_PID: /* pid argument */
+ mess = ptr[0];
+ ptr++;
+ break;
+
+ case ERL_DRV_FLOAT: { /* double * */
+ FloatDef f;
+
+ mess = make_float(hp);
+ f.fd = *((double *) ptr[0]);
+ PUT_DOUBLE(f, hp);
+ hp += FLOAT_SIZE_OBJECT;
+ ptr++;
+ break;
+ }
+
+ case ERL_DRV_EXT2TERM: /* char *ext, int size */
+ ASSERT(b2t.org_ext[b2t.ix] == (byte *) ptr[0]);
+ mess = erts_binary2term_create(&b2t.state[b2t.ix++], &hp, ohp);
+ if (mess == THE_NON_VALUE)
+ ERTS_DDT_FAIL;
+ ptr += 2;
+ break;
+
+ }
+ ESTACK_PUSH(stack, mess);
+ }
+
+ res = 1;
+
+ done:
+
+ if (res > 0) {
+ mess = ESTACK_POP(stack); /* get resulting value */
+ if (bp)
+ bp = erts_resize_message_buffer(bp, hp - hp_start, &mess, 1);
+ else {
+ ASSERT(hp);
+ HRelease(rp, hp_end, hp);
+ }
+ /* send message */
+ erts_queue_message(rp, &rp_locks, bp, mess, am_undefined);
+ }
+ else {
+ if (b2t.ix > b2t.used)
+ b2t.used = b2t.ix;
+ for (b2t.ix = 0; b2t.ix < b2t.used; b2t.ix++)
+ erts_binary2term_abort(&b2t.state[b2t.ix]);
+ if (bp)
+ free_message_buffer(bp);
+ else if (hp) {
+ HRelease(rp, hp_end, hp);
+ }
+ }
+#ifdef ERTS_SMP
+ if (rp) {
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+ }
+#endif
+ cleanup_b2t_states(&b2t);
+ DESTROY_ESTACK(stack);
+ return res;
+#undef ERTS_DDT_FAIL
+}
+
+
+int
+driver_output_term(ErlDrvPort ix, ErlDrvTermData* data, int len)
+{
+ Port* prt = erts_drvport2port(ix);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (prt == NULL)
+ return -1;
+ return driver_deliver_term(ix, prt->connected, data, len);
+}
+
+
+int
+driver_send_term(ErlDrvPort ix, ErlDrvTermData to, ErlDrvTermData* data, int len)
+{
+ return driver_deliver_term(ix, to, data, len);
+}
+
+
+/*
+ * Output a binary with hlen bytes from hbuf as list header
+ * and data is len length of bin starting from offset offs.
+ */
+
+int driver_output_binary(ErlDrvPort ix, char* hbuf, int hlen,
+ ErlDrvBinary* bin, int offs, int len)
+{
+ Port* prt = erts_drvport2port(ix);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ if (prt == NULL)
+ return -1;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ return 0;
+
+ prt->bytes_in += (hlen + len);
+ erts_smp_atomic_add(&erts_bytes_in, (long) (hlen + len));
+ if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
+ return erts_net_message(prt,
+ prt->dist_entry,
+ (byte*) hbuf, hlen,
+ (byte*) (bin->orig_bytes+offs), len);
+ }
+ else
+ deliver_bin_message(prt, prt->connected,
+ hbuf, hlen, bin, offs, len);
+ return 0;
+}
+
+/* driver_output2:
+** Delivers hlen bytes from hbuf to the port owner as a list;
+** after that, the port settings apply, buf is sent as binary or list.
+**
+** Example: if hlen = 3 then the port owner will receive the data
+** [H1,H2,H3 | T]
+*/
+int driver_output2(ErlDrvPort ix, char* hbuf, int hlen, char* buf, int len)
+{
+ Port* prt = erts_drvport2port(ix);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ if (prt == NULL)
+ return -1;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ return 0;
+
+ prt->bytes_in += (hlen + len);
+ erts_smp_atomic_add(&erts_bytes_in, (long) (hlen + len));
+ if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
+ if (len == 0)
+ return erts_net_message(prt,
+ prt->dist_entry,
+ NULL, 0,
+ (byte*) hbuf, hlen);
+ else
+ return erts_net_message(prt,
+ prt->dist_entry,
+ (byte*) hbuf, hlen,
+ (byte*) buf, len);
+ }
+ else if(prt->status & ERTS_PORT_SFLG_LINEBUF_IO)
+ deliver_linebuf_message(prt, prt->connected, hbuf, hlen, buf, len);
+ else
+ deliver_read_message(prt, prt->connected, hbuf, hlen, buf, len, 0);
+ return 0;
+}
+
+/* Interface functions available to driver writers */
+
+int driver_output(ErlDrvPort ix, char* buf, int len)
+{
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ return driver_output2(ix, NULL, 0, buf, len);
+}
+
+int driver_outputv(ErlDrvPort ix, char* hbuf, int hlen, ErlIOVec* vec, int skip)
+{
+ int n;
+ int len;
+ int size;
+ SysIOVec* iov;
+ ErlDrvBinary** binv;
+ Port* prt;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ size = vec->size - skip; /* Size of remaining bytes in vector */
+ ASSERT(size >= 0);
+ if (size <= 0)
+ return driver_output2(ix, hbuf, hlen, NULL, 0);
+ ASSERT(hlen >= 0); /* debug only */
+ if (hlen < 0)
+ hlen = 0;
+
+ prt = erts_drvport2port(ix);
+ if (prt == NULL)
+ return -1;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ return 0;
+
+ /* size > 0 ! */
+ iov = vec->iov;
+ binv = vec->binv;
+ n = vec->vsize;
+ /* we use do here to strip iov_len=0 from beginning */
+ do {
+ len = iov->iov_len;
+ if (len <= skip) {
+ skip -= len;
+ iov++;
+ binv++;
+ n--;
+ } else {
+ iov->iov_base += skip;
+ iov->iov_len -= skip;
+ skip = 0;
+ }
+ } while (skip > 0);
+
+ /* XXX handle distribution !!! */
+ prt->bytes_in += (hlen + size);
+ erts_smp_atomic_add(&erts_bytes_in, (long) (hlen + size));
+ deliver_vec_message(prt, prt->connected, hbuf, hlen, binv, iov, n, size);
+ return 0;
+}
+
+/* Copy bytes from a vector into a buffer
+** input is a vector a buffer and a max length
+** return bytes copied
+*/
+int driver_vec_to_buf(vec, buf, len)
+ErlIOVec* vec;
+char* buf;
+int len;
+{
+ SysIOVec* iov = vec->iov;
+ int n = vec->vsize;
+ int orig_len = len;
+
+ while(n--) {
+ int ilen = iov->iov_len;
+ if (ilen < len) {
+ sys_memcpy(buf, iov->iov_base, ilen);
+ len -= ilen;
+ buf += ilen;
+ iov++;
+ }
+ else {
+ sys_memcpy(buf, iov->iov_base, len);
+ return orig_len;
+ }
+ }
+ return (orig_len - len);
+}
+
+
+/*
+ * - driver_alloc_binary() is thread safe (efile driver depend on it).
+ * - driver_realloc_binary(), and driver_free_binary() are *not* thread safe.
+ */
+
+/*
+ * reference count on driver binaries...
+ */
+
+long
+driver_binary_get_refc(ErlDrvBinary *dbp)
+{
+ Binary* bp = ErlDrvBinary2Binary(dbp);
+ return erts_refc_read(&bp->refc, 1);
+}
+
+long
+driver_binary_inc_refc(ErlDrvBinary *dbp)
+{
+ Binary* bp = ErlDrvBinary2Binary(dbp);
+ return erts_refc_inctest(&bp->refc, 2);
+}
+
+long
+driver_binary_dec_refc(ErlDrvBinary *dbp)
+{
+ Binary* bp = ErlDrvBinary2Binary(dbp);
+ return erts_refc_dectest(&bp->refc, 1);
+}
+
+
+/*
+** Allocation/Deallocation of binary objects
+*/
+
+ErlDrvBinary*
+driver_alloc_binary(int size)
+{
+ Binary* bin;
+
+ if (size < 0)
+ return NULL;
+
+ bin = erts_bin_drv_alloc_fnf((Uint) size);
+ if (!bin)
+ return NULL; /* The driver write must take action */
+ bin->flags = BIN_FLAG_DRV;
+ erts_refc_init(&bin->refc, 1);
+ bin->orig_size = (long) size;
+ return Binary2ErlDrvBinary(bin);
+}
+
+/* Reallocate space hold by binary */
+
+ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, int size)
+{
+ Binary* oldbin;
+ Binary* newbin;
+
+ if (!bin || size < 0) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "Bad use of driver_realloc_binary(%p, %d): "
+ "called with ",
+ bin, size);
+ if (!bin) {
+ erts_dsprintf(dsbufp, "NULL pointer as first argument");
+ if (size < 0)
+ erts_dsprintf(dsbufp, ", and ");
+ }
+ if (size < 0) {
+ erts_dsprintf(dsbufp, "negative size as second argument");
+ size = 0;
+ }
+ erts_send_warning_to_logger_nogl(dsbufp);
+ if (!bin)
+ return driver_alloc_binary(size);
+ }
+
+ oldbin = ErlDrvBinary2Binary(bin);
+ newbin = (Binary *) erts_bin_realloc_fnf(oldbin, size);
+ if (!newbin)
+ return NULL;
+
+ newbin->orig_size = size;
+ return Binary2ErlDrvBinary(newbin);
+}
+
+
+void driver_free_binary(dbin)
+ErlDrvBinary* dbin;
+{
+ Binary *bin;
+ if (!dbin) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "Bad use of driver_free_binary(%p): called with "
+ "NULL pointer as argument", dbin);
+ erts_send_warning_to_logger_nogl(dsbufp);
+ return;
+ }
+
+ bin = ErlDrvBinary2Binary(dbin);
+ if (erts_refc_dectest(&bin->refc, 0) == 0)
+ erts_bin_free(bin);
+}
+
+
+/*
+ * Allocation/deallocation of memory for drivers
+ */
+
+void *driver_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_DRV, (Uint) size);
+}
+
+void *driver_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_DRV, ptr, (Uint) size);
+}
+
+void driver_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_DRV, ptr);
+}
+
+/*
+ * Port Data Lock
+ */
+
+static void
+pdl_init(void)
+{
+}
+
+static ERTS_INLINE void
+pdl_init_refc(ErlDrvPDL pdl)
+{
+ erts_atomic_init(&pdl->refc, 1);
+}
+
+static ERTS_INLINE long
+pdl_read_refc(ErlDrvPDL pdl)
+{
+ long refc = erts_atomic_read(&pdl->refc);
+ ERTS_LC_ASSERT(refc >= 0);
+ return refc;
+}
+
+static ERTS_INLINE void
+pdl_inc_refc(ErlDrvPDL pdl)
+{
+ erts_atomic_inc(&pdl->refc);
+ ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) > 1);
+}
+
+static ERTS_INLINE long
+pdl_inctest_refc(ErlDrvPDL pdl)
+{
+ long refc = erts_atomic_inctest(&pdl->refc);
+ ERTS_LC_ASSERT(refc > 1);
+ return refc;
+}
+
+#if 0 /* unused */
+static ERTS_INLINE void
+pdl_dec_refc(ErlDrvPDL pdl)
+{
+ erts_atomic_dec(&pdl->refc);
+ ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) > 0);
+}
+#endif
+
+static ERTS_INLINE long
+pdl_dectest_refc(ErlDrvPDL pdl)
+{
+ long refc = erts_atomic_dectest(&pdl->refc);
+ ERTS_LC_ASSERT(refc >= 0);
+ return refc;
+}
+
+static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl)
+{
+ ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) == 0);
+ erts_mtx_destroy(&pdl->mtx);
+ erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl);
+}
+
+/*
+ * exported driver_pdl_* functions ...
+ */
+
+ErlDrvPDL
+driver_pdl_create(ErlDrvPort dp)
+{
+ ErlDrvPDL pdl;
+ Port *pp = erts_drvport2port(dp);
+ if (!pp || pp->port_data_lock)
+ return NULL;
+ pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
+ sizeof(struct erl_drv_port_data_lock));
+ erts_mtx_init(&pdl->mtx, "port_data_lock");
+ pdl_init_refc(pdl);
+ pp->port_data_lock = pdl;
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "driver_pdl_create(%T) -> 0x%08X\r\n",pp->id,(unsigned) pdl);
+#endif
+ return pdl;
+}
+
+void
+driver_pdl_lock(ErlDrvPDL pdl)
+{
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "driver_pdl_lock(0x%08X)\r\n",(unsigned) pdl);
+#endif
+ pdl_inc_refc(pdl);
+ erts_mtx_lock(&pdl->mtx);
+}
+
+void
+driver_pdl_unlock(ErlDrvPDL pdl)
+{
+ long refc;
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "driver_pdl_unlock(0x%08X)\r\n",(unsigned) pdl);
+#endif
+ erts_mtx_unlock(&pdl->mtx);
+ refc = pdl_dectest_refc(pdl);
+ if (!refc)
+ pdl_destroy(pdl);
+}
+
+long
+driver_pdl_get_refc(ErlDrvPDL pdl)
+{
+ return pdl_read_refc(pdl);
+}
+
+long
+driver_pdl_inc_refc(ErlDrvPDL pdl)
+{
+ long refc = pdl_inctest_refc(pdl);
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "driver_pdl_inc_refc(0x%08X) -> %ld\r\n",(unsigned) pdl, refc);
+#endif
+ return refc;
+}
+
+long
+driver_pdl_dec_refc(ErlDrvPDL pdl)
+{
+ long refc = pdl_dectest_refc(pdl);
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "driver_pdl_dec_refc(0x%08X) -> %ld\r\n",(unsigned) pdl, refc);
+#endif
+ if (!refc)
+ pdl_destroy(pdl);
+ return refc;
+}
+
+/* expand queue to hold n elements in tail or head */
+static int expandq(ErlIOQueue* q, int n, int tail)
+/* tail: 0 if make room in head, make room in tail otherwise */
+{
+ int h_sz; /* room before header */
+ int t_sz; /* room after tail */
+ int q_sz; /* occupied */
+ int nvsz;
+ SysIOVec* niov;
+ ErlDrvBinary** nbinv;
+
+ h_sz = q->v_head - q->v_start;
+ t_sz = q->v_end - q->v_tail;
+ q_sz = q->v_tail - q->v_head;
+
+ if (tail && (n <= t_sz)) /* do we need to expand tail? */
+ return 0;
+ else if (!tail && (n <= h_sz)) /* do we need to expand head? */
+ return 0;
+ else if (n > (h_sz + t_sz)) { /* need to allocate */
+ /* we may get little extra but it ok */
+ nvsz = (q->v_end - q->v_start) + n;
+
+ niov = erts_alloc_fnf(ERTS_ALC_T_IOQ, nvsz * sizeof(SysIOVec));
+ if (!niov)
+ return -1;
+ nbinv = erts_alloc_fnf(ERTS_ALC_T_IOQ, nvsz * sizeof(ErlDrvBinary**));
+ if (!nbinv) {
+ erts_free(ERTS_ALC_T_IOQ, (void *) niov);
+ return -1;
+ }
+ if (tail) {
+ sys_memcpy(niov, q->v_head, q_sz*sizeof(SysIOVec));
+ if (q->v_start != q->v_small)
+ erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
+ q->v_start = niov;
+ q->v_end = niov + nvsz;
+ q->v_head = q->v_start;
+ q->v_tail = q->v_head + q_sz;
+
+ sys_memcpy(nbinv, q->b_head, q_sz*sizeof(ErlDrvBinary*));
+ if (q->b_start != q->b_small)
+ erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
+ q->b_start = nbinv;
+ q->b_end = nbinv + nvsz;
+ q->b_head = q->b_start;
+ q->b_tail = q->b_head + q_sz;
+ }
+ else {
+ sys_memcpy(niov+nvsz-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
+ if (q->v_start != q->v_small)
+ erts_free(ERTS_ALC_T_IOQ, (void *) q->v_start);
+ q->v_start = niov;
+ q->v_end = niov + nvsz;
+ q->v_tail = q->v_end;
+ q->v_head = q->v_tail - q_sz;
+
+ sys_memcpy(nbinv+nvsz-q_sz, q->b_head, q_sz*sizeof(ErlDrvBinary*));
+ if (q->b_start != q->b_small)
+ erts_free(ERTS_ALC_T_IOQ, (void *) q->b_start);
+ q->b_start = nbinv;
+ q->b_end = nbinv + nvsz;
+ q->b_tail = q->b_end;
+ q->b_head = q->b_tail - q_sz;
+ }
+ }
+ else if (tail) { /* move to beginning to make room in tail */
+ sys_memmove(q->v_start, q->v_head, q_sz*sizeof(SysIOVec));
+ q->v_head = q->v_start;
+ q->v_tail = q->v_head + q_sz;
+ sys_memmove(q->b_start, q->b_head, q_sz*sizeof(ErlDrvBinary*));
+ q->b_head = q->b_start;
+ q->b_tail = q->b_head + q_sz;
+ }
+ else { /* move to end to make room */
+ sys_memmove(q->v_end-q_sz, q->v_head, q_sz*sizeof(SysIOVec));
+ q->v_tail = q->v_end;
+ q->v_head = q->v_tail-q_sz;
+ sys_memmove(q->b_end-q_sz, q->b_head, q_sz*sizeof(ErlDrvBinary*));
+ q->b_tail = q->b_end;
+ q->b_head = q->b_tail-q_sz;
+ }
+
+ return 0;
+}
+
+
+
+/* Put elements from vec at q tail */
+int driver_enqv(ErlDrvPort ix, ErlIOVec* vec, int skip)
+{
+ int n;
+ int len;
+ int size;
+ SysIOVec* iov;
+ ErlDrvBinary** binv;
+ ErlDrvBinary* b;
+ ErlIOQueue* q = drvport2ioq(ix);
+
+ if (q == NULL)
+ return -1;
+
+ size = vec->size - skip;
+ ASSERT(size >= 0); /* debug only */
+ if (size <= 0)
+ return 0;
+
+ iov = vec->iov;
+ binv = vec->binv;
+ n = vec->vsize;
+
+ /* we use do here to strip iov_len=0 from beginning */
+ do {
+ len = iov->iov_len;
+ if (len <= skip) {
+ skip -= len;
+ iov++;
+ binv++;
+ n--;
+ }
+ else {
+ iov->iov_base += skip;
+ iov->iov_len -= skip;
+ skip = 0;
+ }
+ } while(skip > 0);
+
+ if (q->v_tail + n >= q->v_end)
+ expandq(q, n, 1);
+
+ /* Queue and reference all binaries (remove zero length items) */
+ while(n--) {
+ if ((len = iov->iov_len) > 0) {
+ if ((b = *binv) == NULL) { /* speical case create binary ! */
+ b = driver_alloc_binary(len);
+ sys_memcpy(b->orig_bytes, iov->iov_base, len);
+ *q->b_tail++ = b;
+ q->v_tail->iov_len = len;
+ q->v_tail->iov_base = b->orig_bytes;
+ q->v_tail++;
+ }
+ else {
+ driver_binary_inc_refc(b);
+ *q->b_tail++ = b;
+ *q->v_tail++ = *iov;
+ }
+ }
+ iov++;
+ binv++;
+ }
+ q->size += size; /* update total size in queue */
+ return 0;
+}
+
+/* Put elements from vec at q head */
+int driver_pushqv(ErlDrvPort ix, ErlIOVec* vec, int skip)
+{
+ int n;
+ int len;
+ int size;
+ SysIOVec* iov;
+ ErlDrvBinary** binv;
+ ErlDrvBinary* b;
+ ErlIOQueue* q = drvport2ioq(ix);
+
+ if (q == NULL)
+ return -1;
+
+ if ((size = vec->size - skip) <= 0)
+ return 0;
+ iov = vec->iov;
+ binv = vec->binv;
+ n = vec->vsize;
+
+ /* we use do here to strip iov_len=0 from beginning */
+ do {
+ len = iov->iov_len;
+ if (len <= skip) {
+ skip -= len;
+ iov++;
+ binv++;
+ n--;
+ }
+ else {
+ iov->iov_base += skip;
+ iov->iov_len -= skip;
+ skip = 0;
+ }
+ } while(skip > 0);
+
+ if (q->v_head - n < q->v_start)
+ expandq(q, n, 0);
+
+ /* Queue and reference all binaries (remove zero length items) */
+ iov += (n-1); /* move to end */
+ binv += (n-1); /* move to end */
+ while(n--) {
+ if ((len = iov->iov_len) > 0) {
+ if ((b = *binv) == NULL) { /* speical case create binary ! */
+ b = driver_alloc_binary(len);
+ sys_memcpy(b->orig_bytes, iov->iov_base, len);
+ *--q->b_head = b;
+ q->v_head--;
+ q->v_head->iov_len = len;
+ q->v_head->iov_base = b->orig_bytes;
+ }
+ else {
+ driver_binary_inc_refc(b);
+ *--q->b_head = b;
+ *--q->v_head = *iov;
+ }
+ }
+ iov--;
+ binv--;
+ }
+ q->size += size; /* update total size in queue */
+ return 0;
+}
+
+
+/*
+** Remove size bytes from queue head
+** Return number of bytes that remain in queue
+*/
+int driver_deq(ErlDrvPort ix, int size)
+{
+ ErlIOQueue* q = drvport2ioq(ix);
+ int len;
+ int sz;
+
+ if ((q == NULL) || (sz = (q->size - size)) < 0)
+ return -1;
+ q->size = sz;
+ while (size > 0) {
+ ASSERT(q->v_head != q->v_tail);
+
+ len = q->v_head->iov_len;
+ if (len <= size) {
+ size -= len;
+ driver_free_binary(*q->b_head);
+ *q->b_head++ = NULL;
+ q->v_head++;
+ }
+ else {
+ q->v_head->iov_base += size;
+ q->v_head->iov_len -= size;
+ size = 0;
+ }
+ }
+
+ /* restart pointers (optimised for enq) */
+ if (q->v_head == q->v_tail) {
+ q->v_head = q->v_tail = q->v_start;
+ q->b_head = q->b_tail = q->b_start;
+ }
+ return sz;
+}
+
+
+int driver_peekqv(ErlDrvPort ix, ErlIOVec *ev) {
+ ErlIOQueue *q = drvport2ioq(ix);
+ ASSERT(ev);
+
+ if (! q) {
+ return -1;
+ } else {
+ if ((ev->vsize = q->v_tail - q->v_head) == 0) {
+ ev->size = 0;
+ ev->iov = NULL;
+ ev->binv = NULL;
+ } else {
+ ev->size = q->size;
+ ev->iov = q->v_head;
+ ev->binv = q->b_head;
+ }
+ return q->size;
+ }
+}
+
+SysIOVec* driver_peekq(ErlDrvPort ix, int* vlenp) /* length of io-vector */
+{
+ ErlIOQueue* q = drvport2ioq(ix);
+
+ if (q == NULL) {
+ *vlenp = -1;
+ return NULL;
+ }
+ if ((*vlenp = (q->v_tail - q->v_head)) == 0)
+ return NULL;
+ return q->v_head;
+}
+
+
+int driver_sizeq(ErlDrvPort ix)
+{
+ ErlIOQueue* q = drvport2ioq(ix);
+
+ if (q == NULL)
+ return -1;
+ return q->size;
+}
+
+
+/* Utils */
+
+/* Enqueue a binary */
+int driver_enq_bin(ErlDrvPort ix, ErlDrvBinary* bin, int offs, int len)
+{
+ SysIOVec iov;
+ ErlIOVec ev;
+
+ ASSERT(len >= 0);
+ if (len == 0)
+ return 0;
+ iov.iov_base = bin->orig_bytes + offs;
+ iov.iov_len = len;
+ ev.vsize = 1;
+ ev.size = len;
+ ev.iov = &iov;
+ ev.binv = &bin;
+ return driver_enqv(ix, &ev, 0);
+}
+
+int driver_enq(ErlDrvPort ix, char* buffer, int len)
+{
+ int code;
+ ErlDrvBinary* bin;
+
+ ASSERT(len >= 0);
+ if (len == 0)
+ return 0;
+ if ((bin = driver_alloc_binary(len)) == NULL)
+ return -1;
+ sys_memcpy(bin->orig_bytes, buffer, len);
+ code = driver_enq_bin(ix, bin, 0, len);
+ driver_free_binary(bin); /* dereference */
+ return code;
+}
+
+int driver_pushq_bin(ErlDrvPort ix, ErlDrvBinary* bin, int offs, int len)
+{
+ SysIOVec iov;
+ ErlIOVec ev;
+
+ ASSERT(len >= 0);
+ if (len == 0)
+ return 0;
+ iov.iov_base = bin->orig_bytes + offs;
+ iov.iov_len = len;
+ ev.vsize = 1;
+ ev.size = len;
+ ev.iov = &iov;
+ ev.binv = &bin;
+ return driver_pushqv(ix, &ev, 0);
+}
+
+int driver_pushq(ErlDrvPort ix, char* buffer, int len)
+{
+ int code;
+ ErlDrvBinary* bin;
+
+ ASSERT(len >= 0);
+ if (len == 0)
+ return 0;
+
+ if ((bin = driver_alloc_binary(len)) == NULL)
+ return -1;
+ sys_memcpy(bin->orig_bytes, buffer, len);
+ code = driver_pushq_bin(ix, bin, 0, len);
+ driver_free_binary(bin); /* dereference */
+ return code;
+}
+
+static ERTS_INLINE void
+drv_cancel_timer(Port *prt)
+{
+#ifdef ERTS_SMP
+ erts_cancel_smp_ptimer(prt->ptimer);
+#else
+ erl_cancel_timer(&prt->tm);
+#endif
+ if (erts_port_task_is_scheduled(&prt->timeout_task))
+ erts_port_task_abort(prt->id, &prt->timeout_task);
+}
+
+int driver_set_timer(ErlDrvPort ix, Uint t)
+{
+ Port* prt = erts_drvport2port(ix);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ if (prt == NULL)
+ return -1;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ if (prt->drv_ptr->timeout == NULL)
+ return -1;
+ drv_cancel_timer(prt);
+#ifdef ERTS_SMP
+ erts_create_smp_ptimer(&prt->ptimer,
+ prt->id,
+ (ErlTimeoutProc) schedule_port_timeout,
+ t);
+#else
+ erl_set_timer(&prt->tm,
+ (ErlTimeoutProc) schedule_port_timeout,
+ NULL,
+ prt,
+ t);
+#endif
+ return 0;
+}
+
+int driver_cancel_timer(ErlDrvPort ix)
+{
+ Port* prt = erts_drvport2port(ix);
+ if (prt == NULL)
+ return -1;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ drv_cancel_timer(prt);
+ return 0;
+}
+
+
+int
+driver_read_timer(ErlDrvPort ix, unsigned long* t)
+{
+ Port* prt = erts_drvport2port(ix);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ if (prt == NULL)
+ return -1;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+#ifdef ERTS_SMP
+ *t = prt->ptimer ? time_left(&prt->ptimer->timer.tm) : 0;
+#else
+ *t = time_left(&prt->tm);
+#endif
+ return 0;
+}
+
+int
+driver_get_now(ErlDrvNowData *now_data)
+{
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ if (now_data == NULL) {
+ return -1;
+ }
+ get_now(&(now_data->megasecs),&(now_data->secs),&(now_data->microsecs));
+ return 0;
+}
+
+static void ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
+{
+ RefThing *refp;
+ ASSERT(is_internal_ref(ref));
+ ASSERT(sizeof(RefThing) <= sizeof(ErlDrvMonitor));
+ refp = ref_thing_ptr(ref);
+ memset(mon,0,sizeof(ErlDrvMonitor));
+ memcpy(mon,refp,sizeof(RefThing));
+}
+
+int driver_monitor_process(ErlDrvPort port,
+ ErlDrvTermData process,
+ ErlDrvMonitor *monitor)
+{
+ Port *prt = erts_drvport2port(port);
+ Process *rp;
+ Eterm ref;
+ Eterm buf[REF_THING_SIZE];
+ if (prt->drv_ptr->process_exit == NULL) {
+ return -1;
+ }
+ rp = erts_pid2proc_opt(NULL, 0,
+ (Eterm) process, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ if (!rp) {
+ return 1;
+ }
+ ref = erts_make_ref_in_buffer(buf);
+ erts_add_monitor(&(prt->monitors), MON_ORIGIN, ref, rp->id, NIL);
+ erts_add_monitor(&(rp->monitors), MON_TARGET, ref, prt->id, NIL);
+
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ ref_to_driver_monitor(ref,monitor);
+ return 0;
+}
+
+int driver_demonitor_process(ErlDrvPort port,
+ const ErlDrvMonitor *monitor)
+{
+ Port *prt = erts_drvport2port(port);
+ Process *rp;
+ Eterm ref;
+ Eterm buf[REF_THING_SIZE];
+ ErtsMonitor *mon;
+ Eterm to;
+
+ memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
+ ref = make_internal_ref(buf);
+ mon = erts_lookup_monitor(prt->monitors, ref);
+ if (mon == NULL) {
+ return 1;
+ }
+ ASSERT(mon->type == MON_ORIGIN);
+ to = mon->pid;
+ ASSERT(is_internal_pid(to));
+ rp = erts_pid2proc_opt(NULL,
+ 0,
+ to,
+ ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ mon = erts_remove_monitor(&(prt->monitors), ref);
+ if (mon) {
+ erts_destroy_monitor(mon);
+ }
+ if (rp) {
+ ErtsMonitor *rmon;
+ rmon = erts_remove_monitor(&(rp->monitors), ref);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (rmon != NULL) {
+ erts_destroy_monitor(rmon);
+ }
+ }
+ return 0;
+}
+
+ErlDrvTermData driver_get_monitored_process(ErlDrvPort port,
+ const ErlDrvMonitor *monitor)
+{
+ Port *prt = erts_drvport2port(port);
+ Eterm ref;
+ Eterm buf[REF_THING_SIZE];
+ ErtsMonitor *mon;
+ Eterm to;
+
+ memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
+ ref = make_internal_ref(buf);
+ mon = erts_lookup_monitor(prt->monitors, ref);
+ if (mon == NULL) {
+ return driver_term_nil;
+ }
+ ASSERT(mon->type == MON_ORIGIN);
+ to = mon->pid;
+ ASSERT(is_internal_pid(to));
+ return (ErlDrvTermData) to;
+}
+
+int driver_compare_monitors(const ErlDrvMonitor *monitor1,
+ const ErlDrvMonitor *monitor2)
+{
+ return memcmp(monitor1,monitor2,sizeof(ErlDrvMonitor));
+}
+
+void erts_fire_port_monitor(Port *prt, Eterm ref)
+{
+ ErtsMonitor *rmon;
+ void (*callback)(ErlDrvData drv_data, ErlDrvMonitor *monitor);
+ ErlDrvMonitor drv_monitor;
+ int fpe_was_unmasked;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ASSERT(prt->drv_ptr != NULL);
+
+ if (erts_lookup_monitor(prt->monitors,ref) == NULL) {
+ return;
+ }
+ callback = prt->drv_ptr->process_exit;
+ ASSERT(callback != NULL);
+ ref_to_driver_monitor(ref,&drv_monitor);
+ fpe_was_unmasked = erts_block_fpe();
+ (*callback)((ErlDrvData) (prt->drv_data), &drv_monitor);
+ erts_unblock_fpe(fpe_was_unmasked);
+ /* remove monitor *after* callback */
+ rmon = erts_remove_monitor(&(prt->monitors),ref);
+ if (rmon) {
+ erts_destroy_monitor(rmon);
+ }
+}
+
+
+static int
+driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
+{
+ Port* prt = erts_drvport2port(ix);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ if (prt == NULL)
+ return -1;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ if (eof)
+ flush_linebuf_messages(prt);
+ if (prt->status & ERTS_PORT_SFLG_CLOSING) {
+ terminate_port(prt);
+ } else if (eof && (prt->status & ERTS_PORT_SFLG_SOFT_EOF)) {
+ deliver_result(prt->id, prt->connected, am_eof);
+ } else {
+ /* XXX UGLY WORK AROUND, Let do_exit_port terminate the port */
+ if (prt->port_data_lock)
+ driver_pdl_lock(prt->port_data_lock);
+ prt->ioq.size = 0;
+ if (prt->port_data_lock)
+ driver_pdl_unlock(prt->port_data_lock);
+ erts_do_exit_port(prt, prt->id, eof ? am_normal : term);
+ }
+ return 0;
+}
+
+
+
+/*
+** Do a (soft) exit. unlink the connected process before doing
+** driver posix error or (normal)
+*/
+int driver_exit(ErlDrvPort ix, int err)
+{
+ Port* prt = erts_drvport2port(ix);
+ Process* rp;
+ ErtsLink *lnk, *rlnk = NULL;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ if (prt == NULL)
+ return -1;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ rp = erts_pid2proc(NULL, 0, prt->connected, ERTS_PROC_LOCK_LINK);
+ if (rp) {
+ rlnk = erts_remove_link(&(rp->nlinks),prt->id);
+ }
+
+ lnk = erts_remove_link(&(prt->nlinks),prt->connected);
+
+#ifdef ERTS_SMP
+ if (rp)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+#endif
+
+ if (rlnk != NULL) {
+ erts_destroy_link(rlnk);
+ }
+
+ if (lnk != NULL) {
+ erts_destroy_link(lnk);
+ }
+
+ if (err == 0)
+ return driver_failure_term(ix, am_normal, 0);
+ else {
+ char* err_str = erl_errno_id(err);
+ Eterm am_err = am_atom_put(err_str, sys_strlen(err_str));
+ return driver_failure_term(ix, am_err, 0);
+ }
+}
+
+
+int driver_failure(ErlDrvPort ix, int code)
+{
+ return driver_failure_term(ix, make_small(code), code == 0);
+}
+
+int driver_failure_atom(ErlDrvPort ix, char* string)
+{
+ Eterm am = am_atom_put(string, strlen(string));
+ return driver_failure_term(ix, am, 0);
+}
+
+int driver_failure_posix(ErlDrvPort ix, int err)
+{
+ return driver_failure_atom(ix, erl_errno_id(err));
+}
+
+int driver_failure_eof(ErlDrvPort ix)
+{
+ return driver_failure_term(ix, NIL, 1);
+}
+
+
+
+ErlDrvTermData driver_mk_atom(char* string)
+{
+ Eterm am = am_atom_put(string, sys_strlen(string));
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ return (ErlDrvTermData) am;
+}
+
+ErlDrvTermData driver_mk_port(ErlDrvPort ix)
+{
+ Port* prt = erts_drvport2port(ix);
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ return (ErlDrvTermData) prt->id;
+}
+
+ErlDrvTermData driver_connected(ErlDrvPort ix)
+{
+ Port* prt = erts_drvport2port(ix);
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ if (prt == NULL)
+ return NIL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ return prt->connected;
+}
+
+ErlDrvTermData driver_caller(ErlDrvPort ix)
+{
+ Port* prt = erts_drvport2port(ix);
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ if (prt == NULL)
+ return NIL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ return prt->caller;
+}
+
+int driver_lock_driver(ErlDrvPort ix)
+{
+ Port* prt = erts_drvport2port(ix);
+ DE_Handle* dh;
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+
+ erts_smp_mtx_lock(&erts_driver_list_lock);
+
+ if (prt == NULL) return -1;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ return -1;
+ }
+ erts_ddll_lock_driver(dh, prt->drv_ptr->name);
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ return 0;
+}
+
+
+static int maybe_lock_driver_list(void)
+{
+ void *rec_lock;
+ rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
+ if (rec_lock == 0) {
+ erts_smp_mtx_lock(&erts_driver_list_lock);
+ return 1;
+ }
+ return 0;
+}
+static void maybe_unlock_driver_list(int doit)
+{
+ if (doit) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ }
+}
+/*
+ These old interfaces are certainly not MT friendly. Hopefully they are only used internally,
+ but you never know, so they are kept for BC. As The sys ddll code has no notion
+ of locking, I use the driver list lock to mutex this from the code in erl_bif_ddll.c.
+ To allow dynamic code loading in the init functions of a driver, recursive locking is
+ handled as in add_driver_entry etc.
+ A TSD variable holds the last error for a thread, so that code like
+ ...
+ x = driver_dl_open(...);
+ if (x == NULL)
+ y = driver_dl_error();
+ ...
+ works as long as execution happens in one driver callback even in an SMP emulator.
+ Writing code using these interfaces spanning several driver callbacks between loading/lookup
+ and error handling may give undesired results...
+*/
+void *driver_dl_open(char * path)
+{
+ void *ptr;
+ int res;
+ int *last_error_p = erts_smp_tsd_get(driver_list_last_error_key);
+ int locked = maybe_lock_driver_list();
+ if ((res = erts_sys_ddll_open(path, &ptr)) == 0) {
+ maybe_unlock_driver_list(locked);
+ return ptr;
+ } else {
+ if (!last_error_p) {
+ last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int));
+ erts_smp_tsd_set(driver_list_last_error_key,last_error_p);
+ }
+ *last_error_p = res;
+ maybe_unlock_driver_list(locked);
+ return NULL;
+ }
+}
+
+void *driver_dl_sym(void * handle, char *func_name)
+{
+ void *ptr;
+ int res;
+ int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key);
+ int locked = maybe_lock_driver_list();
+ if ((res = erts_sys_ddll_sym(handle, func_name, &ptr)) == 0) {
+ maybe_unlock_driver_list(locked);
+ return ptr;
+ } else {
+ if (!last_error_p) {
+ last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int));
+ erts_smp_tsd_set(driver_list_lock_status_key,last_error_p);
+ }
+ *last_error_p = res;
+ maybe_unlock_driver_list(locked);
+ return NULL;
+ }
+}
+
+int driver_dl_close(void *handle)
+{
+ int res;
+ int locked = maybe_lock_driver_list();
+ res = erts_sys_ddll_close(handle);
+ maybe_unlock_driver_list(locked);
+ return res;
+}
+
+char *driver_dl_error(void)
+{
+ char *res;
+ int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key);
+ int locked = maybe_lock_driver_list();
+ res = erts_ddll_error((last_error_p != NULL) ? (*last_error_p) : ERL_DE_ERROR_UNSPECIFIED);
+ maybe_unlock_driver_list(locked);
+ return res;
+}
+
+
+#define ERL_DRV_SYS_INFO_SIZE(LAST_FIELD) \
+ (((size_t) &((ErlDrvSysInfo *) 0)->LAST_FIELD) \
+ + sizeof(((ErlDrvSysInfo *) 0)->LAST_FIELD))
+
+void
+driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
+{
+ /*
+ * When adding fields in the ErlDrvSysInfo struct
+ * remember to increment ERL_DRV_EXTENDED_MINOR_VERSION
+ */
+
+ /*
+ * 'smp_support' is the last field in the first version
+ * of ErlDrvSysInfo (introduced in driver version 1.0).
+ */
+ if (!sip || si_size < ERL_DRV_SYS_INFO_SIZE(smp_support))
+ erl_exit(1,
+ "driver_system_info(%p, %ld) called with invalid arguments\n",
+ sip, si_size);
+
+ /*
+ * 'smp_support' is the last field in the first version
+ * of ErlDrvSysInfo (introduced in driver version 1.0).
+ */
+ if (si_size >= ERL_DRV_SYS_INFO_SIZE(smp_support)) {
+ sip->driver_major_version = ERL_DRV_EXTENDED_MAJOR_VERSION;
+ sip->driver_minor_version = ERL_DRV_EXTENDED_MINOR_VERSION;
+ sip->erts_version = ERLANG_VERSION;
+ sip->otp_release = ERLANG_OTP_RELEASE;
+ sip->thread_support =
+#ifdef USE_THREADS
+ 1
+#else
+ 0
+#endif
+ ;
+ sip->smp_support =
+#ifdef ERTS_SMP
+ 1
+#else
+ 0
+#endif
+ ;
+
+ }
+
+ /*
+ * 'scheduler_threads' is the last field in the second version
+ * of ErlDrvSysInfo (introduced in driver version 1.1).
+ */
+ if (si_size >= ERL_DRV_SYS_INFO_SIZE(scheduler_threads)) {
+ sip->async_threads = erts_async_max_threads;
+ sip->scheduler_threads = erts_no_schedulers;
+ }
+
+}
+
+
+static ERTS_INLINE Port *
+get_current_port(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+ ASSERT(esdp->current_port);
+ return esdp->current_port;
+}
+
+/*
+ * Default callbacks used if not supplied by driver.
+ */
+
+static void
+no_output_callback(ErlDrvData drv_data, char *buf, int len)
+{
+
+}
+
+static void
+no_event_callback(ErlDrvData drv_data, ErlDrvEvent event, ErlDrvEventData event_data)
+{
+ Port *prt = get_current_port();
+ report_missing_drv_callback(prt, "Event", "event()");
+ driver_event((ErlDrvPort) internal_port_index(prt->id), event, NULL);
+}
+
+static void
+no_ready_input_callback(ErlDrvData drv_data, ErlDrvEvent event)
+{
+ Port *prt = get_current_port();
+ report_missing_drv_callback(prt, "Input", "ready_input()");
+ driver_select((ErlDrvPort) internal_port_index(prt->id), event,
+ (ERL_DRV_READ | ERL_DRV_USE_NO_CALLBACK), 0);
+}
+
+static void
+no_ready_output_callback(ErlDrvData drv_data, ErlDrvEvent event)
+{
+ Port *prt = get_current_port();
+ report_missing_drv_callback(prt, "Output", "ready_output()");
+ driver_select((ErlDrvPort) internal_port_index(prt->id), event,
+ (ERL_DRV_WRITE | ERL_DRV_USE_NO_CALLBACK), 0);
+}
+
+static void
+no_timeout_callback(ErlDrvData drv_data)
+{
+
+}
+
+static void
+no_stop_select_callback(ErlDrvEvent event, void* private)
+{
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Driver does not implement stop_select callback "
+ "(event=%ld, private=%p)!\n", (long)event, private);
+ erts_send_error_to_logger_nogl(dsbufp);
+}
+
+
+static int
+init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
+{
+ drv->name = de->driver_name;
+ if (de->extended_marker == ERL_DRV_EXTENDED_MARKER) {
+ drv->version.major = de->major_version;
+ drv->version.minor = de->minor_version;
+ drv->flags = de->driver_flags;
+ }
+ else {
+ drv->version.major = 0;
+ drv->version.minor = 0;
+ drv->flags = 0;
+ }
+ drv->handle = handle;
+#ifdef ERTS_SMP
+ if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING)
+ drv->lock = NULL;
+ else {
+ drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK,
+ sizeof(erts_smp_mtx_t));
+ erts_smp_mtx_init_x(drv->lock,
+ "driver_lock",
+#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
+ am_atom_put(drv->name, sys_strlen(drv->name))
+#else
+ NIL
+#endif
+ );
+ }
+#endif
+ drv->entry = de;
+
+ drv->start = de->start;
+ drv->stop = de->stop;
+ drv->finish = de->finish;
+ drv->flush = de->flush;
+ drv->output = de->output ? de->output : no_output_callback;
+ drv->outputv = de->outputv;
+ drv->control = de->control;
+ drv->call = de->call;
+ drv->event = de->event ? de->event : no_event_callback;
+ drv->ready_input = de->ready_input ? de->ready_input : no_ready_input_callback;
+ drv->ready_output = de->ready_output ? de->ready_output : no_ready_output_callback;
+ drv->timeout = de->timeout ? de->timeout : no_timeout_callback;
+ drv->ready_async = de->ready_async;
+ if (de->extended_marker == ERL_DRV_EXTENDED_MARKER)
+ drv->process_exit = de->process_exit;
+ else
+ drv->process_exit = NULL;
+ if (de->minor_version >= 3/*R13A*/ && de->stop_select)
+ drv->stop_select = de->stop_select;
+ else
+ drv->stop_select = no_stop_select_callback;
+
+ if (!de->init)
+ return 0;
+ else {
+ int res;
+ int fpe_was_unmasked = erts_block_fpe();
+ res = (*de->init)();
+ erts_unblock_fpe(fpe_was_unmasked);
+ return res;
+ }
+}
+
+void
+erts_destroy_driver(erts_driver_t *drv)
+{
+#ifdef ERTS_SMP
+ if (drv->lock) {
+ erts_smp_mtx_destroy(drv->lock);
+ erts_free(ERTS_ALC_T_DRIVER_LOCK, drv->lock);
+ }
+#endif
+ erts_free(ERTS_ALC_T_DRIVER, drv);
+}
+
+/*
+ * Functions for maintaining a list of driver_entry struct
+ * Exposed in the driver interface, and therefore possibly locking directly.
+ */
+
+void add_driver_entry(ErlDrvEntry *drv){
+ void *rec_lock;
+ rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
+ /*
+ * Ignore result of erts_add_driver_entry, the init is not
+ * allowed to fail when drivers are added by drivers.
+ */
+ erts_add_driver_entry(drv, NULL, rec_lock != NULL);
+}
+
+int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_locked)
+{
+ erts_driver_t *dp = erts_alloc(ERTS_ALC_T_DRIVER, sizeof(erts_driver_t));
+ int res;
+
+ if (!driver_list_locked) {
+ erts_smp_mtx_lock(&erts_driver_list_lock);
+ }
+
+ dp->next = driver_list;
+ dp->prev = NULL;
+ if (driver_list != NULL) {
+ driver_list->prev = dp;
+ }
+ driver_list = dp;
+
+ if (!driver_list_locked) {
+ erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1);
+ }
+
+ res = init_driver(dp, de, handle);
+
+ if (res != 0) {
+ /*
+ * Remove it all again...
+ */
+ driver_list = dp->next;
+ if (driver_list != NULL) {
+ driver_list->prev = NULL;
+ }
+ erts_destroy_driver(dp);
+ }
+
+ if (!driver_list_locked) {
+ erts_smp_tsd_set(driver_list_lock_status_key, NULL);
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ }
+ return res;
+}
+
+/* Not allowed for dynamic drivers */
+int remove_driver_entry(ErlDrvEntry *drv)
+{
+ erts_driver_t *dp;
+ void *rec_lock;
+
+ rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
+ if (rec_lock == NULL) {
+ erts_smp_mtx_lock(&erts_driver_list_lock);
+ }
+ dp = driver_list;
+ while (dp && dp->entry != drv)
+ dp = dp->next;
+ if (dp) {
+ if (dp->handle) {
+ if (rec_lock == NULL) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ }
+ return -1;
+ }
+ if (dp->prev == NULL) {
+ driver_list = dp->next;
+ } else {
+ dp->prev->next = dp->next;
+ }
+ if (dp->next != NULL) {
+ dp->next->prev = dp->prev;
+ }
+ erts_destroy_driver(dp);
+ if (rec_lock == NULL) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ }
+ return 1;
+ }
+ if (rec_lock == NULL) {
+ erts_smp_mtx_unlock(&erts_driver_list_lock);
+ }
+ return 0;
+}
+
+/* very useful function that can be used in entries that are not used
+ * so that not every driver writer must supply a personal version
+ */
+int null_func(void)
+{
+ return 0;
+}
+
+int
+erl_drv_putenv(char *key, char *value)
+{
+ return erts_write_env(key, value);
+}
+
+int
+erl_drv_getenv(char *key, char *value, size_t *value_size)
+{
+ return erts_sys_getenv(key, value, value_size);
+}
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
new file mode 100644
index 0000000000..57a43c89f4
--- /dev/null
+++ b/erts/emulator/beam/module.c
@@ -0,0 +1,134 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "module.h"
+
+#define MODULE_SIZE 50
+#define MODULE_LIMIT (64*1024)
+
+static IndexTable module_table;
+
+/*
+ * SMP note: We don't need to look accesses to the module table because
+ * there is one only scheduler thread when we update it.
+ */
+
+#include "erl_smp.h"
+
+void module_info(int to, void *to_arg)
+{
+ index_info(to, to_arg, &module_table);
+}
+
+
+static HashValue module_hash(Module* x)
+{
+ return (HashValue) x->module;
+}
+
+
+static int module_cmp(Module* tmpl, Module* obj)
+{
+ return tmpl->module != obj->module;
+}
+
+
+static Module* module_alloc(Module* tmpl)
+{
+ Module* obj = (Module*) erts_alloc(ERTS_ALC_T_MODULE, sizeof(Module));
+
+ obj->module = tmpl->module;
+ obj->code = 0;
+ obj->old_code = 0;
+ obj->code_length = 0;
+ obj->old_code_length = 0;
+ obj->slot.index = -1;
+ obj->nif.handle = NULL;
+ obj->old_nif.handle = NULL;
+ obj->nif.entry = NULL;
+ obj->old_nif.entry = NULL;
+ obj->nif.data = NULL;
+ obj->old_nif.data = NULL;
+ return obj;
+}
+
+
+void init_module_table(void)
+{
+ HashFunctions f;
+
+ f.hash = (H_FUN) module_hash;
+ f.cmp = (HCMP_FUN) module_cmp;
+ f.alloc = (HALLOC_FUN) module_alloc;
+ f.free = 0;
+
+ erts_index_init(ERTS_ALC_T_MODULE_TABLE, &module_table, "module_code",
+ MODULE_SIZE, MODULE_LIMIT, f);
+}
+
+Module*
+erts_get_module(Eterm mod)
+{
+ Module e;
+ int index;
+
+ ASSERT(is_atom(mod));
+ e.module = atom_val(mod);
+ index = index_get(&module_table, (void*) &e);
+ if (index == -1) {
+ return NULL;
+ } else {
+ return (Module*) erts_index_lookup(&module_table, index);
+ }
+}
+
+Module*
+erts_put_module(Eterm mod)
+{
+ Module e;
+ int index;
+
+ ASSERT(is_atom(mod));
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_smp_is_system_blocked(0));
+ e.module = atom_val(mod);
+ index = index_put(&module_table, (void*) &e);
+ return (Module*) erts_index_lookup(&module_table, index);
+}
+
+Module *module_code(int i)
+{
+ return (Module*) erts_index_lookup(&module_table, i);
+}
+
+int module_code_size(void)
+{
+ return module_table.entries;
+}
+
+int module_table_sz(void)
+{
+ return index_table_sz(&module_table);
+}
diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h
new file mode 100644
index 0000000000..314be8e2ee
--- /dev/null
+++ b/erts/emulator/beam/module.h
@@ -0,0 +1,56 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __MODULE_H__
+#define __MODULE_H__
+
+#ifndef __INDEX_H__
+#include "index.h"
+#endif
+
+struct erl_module_nif {
+ void* handle;
+ struct enif_entry_t* entry;
+ void* data;
+};
+
+typedef struct erl_module {
+ IndexSlot slot; /* Must be located at top of struct! */
+ int module; /* Atom index for module (not tagged). */
+
+ Eterm* code;
+ Eterm* old_code;
+ int code_length; /* Length of loaded code in bytes. */
+ int old_code_length; /* Length of old loaded code in bytes */
+ unsigned catches, old_catches;
+ struct erl_module_nif nif;
+ struct erl_module_nif old_nif;
+} Module;
+
+Module* erts_get_module(Eterm mod);
+Module* erts_put_module(Eterm mod);
+
+void init_module_table(void);
+void module_info(int, void *);
+
+Module *module_code(int);
+int module_code_size(void);
+int module_table_sz(void);
+
+#endif
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
new file mode 100644
index 0000000000..ce1df74f03
--- /dev/null
+++ b/erts/emulator/beam/ops.tab
@@ -0,0 +1,1430 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 1997-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# The instructions that follows are only known by the loader and the emulator.
+# They can be changed without recompiling old Beam files.
+#
+# Instructions starting with a "i_" prefix are instructions produced by
+# instruction transformations; thus, they never occur in BEAM files.
+#
+
+# Special instruction used to generate an error message when
+# trying to load a module compiled by the V1 compiler (R5 & R6).
+# (Specially treated in beam_load.c.)
+
+too_old_compiler/0
+too_old_compiler
+
+#
+# Obsolete instruction usage follow. (Nowdays we use f with
+# a zero label instead of p.)
+#
+
+is_list p S => too_old_compiler
+is_nonempty_list p R => too_old_compiler
+is_nil p R => too_old_compiler
+
+is_tuple p S => too_old_compiler
+test_arity p S Arity => too_old_compiler
+
+is_integer p R => too_old_compiler
+is_float p R => too_old_compiler
+is_atom p R => too_old_compiler
+
+is_eq_exact p S1 S2 => too_old_compiler
+
+# In R9C and earlier, the loader used to insert special instructions inside
+# the module_info/0,1 functions. (In R10B and later, the compiler inserts
+# an explicit call to an undocumented BIF, so that no loader trickery is
+# necessary.) Since the instructions don't work correctly in R12B, simply
+# refuse to load the module.
+
+func_info M=a a==am_module_info A=u==0 | label L | move n r => too_old_compiler
+func_info M=a a==am_module_info A=u==1 | label L | move n r => too_old_compiler
+
+# The undocumented and unsupported guard BIF is_constant/1 was removed
+# in R13. The is_constant/2 operation is marked as obosolete in genop.tab,
+# so the loader will automatically generate a too_old_compiler message
+# it is used, but we need to handle the is_constant/1 BIF specially here.
+
+bif1 Fail u$func:erlang:is_constant/1 Src Dst => too_old_compiler
+
+
+#
+# All the other instructions.
+#
+
+label L
+i_func_info I a a I
+int_code_end
+
+i_trace_breakpoint
+i_mtrace_breakpoint
+i_debug_breakpoint
+i_count_breakpoint
+i_return_to_trace
+i_yield
+i_global_cons
+i_global_tuple
+i_global_copy
+
+return
+
+%macro: allocate Allocate -pack
+%macro: allocate_zero AllocateZero -pack
+%macro: allocate_heap AllocateHeap -pack
+%macro: allocate_heap_zero AllocateHeapZero -pack
+%macro: test_heap TestHeap -pack
+
+allocate t t
+allocate_heap I I I
+deallocate I
+init y
+allocate_zero t t
+allocate_heap_zero I I I
+
+trim N Remaining => i_trim N
+i_trim I
+
+test_heap I I
+
+allocate_heap S u==0 R => allocate S R
+allocate_heap_zero S u==0 R => allocate_zero S R
+
+init2 y y
+init3 y y y
+init Y1 | init Y2 | init Y3 => init3 Y1 Y2 Y3
+init Y1 | init Y2 => init2 Y1 Y2
+%macro: init2 Init2 -pack
+%macro: init3 Init3 -pack
+
+#
+# Warning: The put_string instruction is specially treated in the loader.
+# Don't change the instruction format unless you change the loader too.
+#
+put_string I I d
+
+# Selecting values
+
+select_val S=q Fail=f Size=u Rest=* => const_select_val(S, Fail, Size, Rest)
+
+select_val S=s Fail=f Size=u Rest=* | use_jump_tab(Size, Rest) => \
+ gen_jump_tab(S, Fail, Size, Rest)
+
+is_integer Fail=f S | select_val S=s Fail=f Size=u Rest=* | use_jump_tab(Size, Rest) => \
+ gen_jump_tab(S, Fail, Size, Rest)
+
+select_val S=s Fail=f Size=u Rest=* | mixed_types(Size, Rest) => \
+ gen_split_values(S, Fail, Size, Rest)
+
+is_integer Fail=f S | select_val S=s Fail=f Size=u Rest=* | \
+ fixed_size_values(Size, Rest) => gen_select_val(S, Fail, Size, Rest)
+
+is_atom Fail=f S | select_val S=s Fail=f Size=u Rest=* | \
+ fixed_size_values(Size, Rest) => gen_select_val(S, Fail, Size, Rest)
+
+select_val S=s Fail=f Size=u Rest=* | fixed_size_values(Size, Rest) => \
+ gen_select_val(S, Fail, Size, Rest)
+
+select_val S=s Fail=f Size=u Rest=* | all_values_are_big(Size, Rest) => \
+ gen_select_big(S, Fail, Size, Rest)
+
+is_tuple Fail=f S | select_tuple_arity S=s Fail=f Size=u Rest=* => \
+ gen_select_tuple_arity(S, Fail, Size, Rest)
+
+select_tuple_arity S=s Fail=f Size=u Rest=* => \
+ gen_select_tuple_arity(S, Fail, Size, Rest)
+
+i_select_val s f I
+i_select_tuple_arity s f I
+i_select_big s f
+i_select_float s f I
+
+i_jump_on_val_zero s f I
+i_jump_on_val s f I I
+
+%macro: get_list GetList -pack
+get_list x x x
+get_list x x y
+get_list x x r
+get_list x y x
+get_list x y y
+get_list x y r
+get_list x r x
+get_list x r y
+
+get_list y x x
+get_list y x y
+get_list y x r
+get_list y y x
+get_list y y y
+get_list y y r
+get_list y r x
+get_list y r y
+
+get_list r x x
+get_list r x y
+get_list r x r
+get_list r y x
+get_list r y y
+get_list r y r
+get_list r r x
+get_list r r y
+
+# Old-style catch.
+catch y f
+catch_end y
+
+# Try/catch.
+try Y F => catch Y F
+try_case Y => try_end Y
+try_end y
+
+try_case_end Literal=q => move Literal x | try_case_end x
+try_case_end s
+
+# Destructive set tuple element
+
+set_tuple_element Lit=q Tuple Pos => move Lit x | set_tuple_element x Tuple Pos
+set_tuple_element s d P
+
+# Get tuple element
+
+%macro: i_get_tuple_element GetTupleElement -pack
+i_get_tuple_element x P x
+i_get_tuple_element r P x
+i_get_tuple_element y P x
+i_get_tuple_element x P r
+i_get_tuple_element y P r
+
+%cold
+i_get_tuple_element r P r
+i_get_tuple_element x P y
+i_get_tuple_element r P y
+i_get_tuple_element y P y
+%hot
+
+%macro: is_number IsNumber -fail_action
+%cold
+is_number f r
+is_number f x
+is_number f y
+%hot
+is_number Fail=f i =>
+is_number Fail=f na => jump Fail
+is_number Fail Literal=q => move Literal x | is_number Fail x
+
+jump f
+
+case_end Literal=q => move Literal x | case_end x
+badmatch Literal=q => move Literal x | badmatch x
+
+case_end s
+badmatch s
+if_end
+raise s s
+
+# Internal now, but could be useful to make known to the compiler.
+badarg j
+system_limit j
+
+move R R =>
+
+move X1=x Y1=y | move X2=x Y2=y => move2 X1 Y1 X2 Y2
+move Y1=y X1=x | move Y2=y X2=x => move2 Y1 X1 Y2 X2
+
+%macro: move2 Move2 -pack
+move2 x y x y
+move2 y x y x
+
+%macro:move Move -pack -gen_dest
+move x x
+move x y
+move x r
+move y x
+move y r
+move r x
+move r y
+move c r
+move c x
+move c y
+move n x
+move n r
+move y y
+
+%cold
+move s d
+%hot
+
+# Receive operations.
+
+loop_rec Fail Src | smp_mark_target_label(Fail) => i_loop_rec Fail Src
+
+label L | wait_timeout Fail Src | smp_already_locked(L) => label L | i_wait_timeout_locked Fail Src
+wait_timeout Fail Src => i_wait_timeout Fail Src
+i_wait_timeout Fail Src=aiq => gen_literal_timeout(Fail, Src)
+i_wait_timeout_locked Fail Src=aiq => gen_literal_timeout_locked(Fail, Src)
+
+label L | wait Fail | smp_already_locked(L) => label L | wait_locked Fail
+wait Fail | smp() => wait_unlocked Fail
+
+label L | timeout | smp_already_locked(L) => label L | timeout_locked
+
+remove_message
+timeout
+timeout_locked
+i_loop_rec f r
+loop_rec_end f
+wait f
+wait_locked f
+wait_unlocked f
+i_wait_timeout f I
+i_wait_timeout f s
+i_wait_timeout_locked f I
+i_wait_timeout_locked f s
+i_wait_error
+i_wait_error_locked
+
+send
+
+#
+# Comparisions.
+#
+
+is_eq_exact Lbl=f R=rxy C=ian => i_is_eq_immed Lbl R C
+is_eq Lbl=f R=rxy C=an => i_is_eq_immed Lbl R C
+
+is_ge Lbl S1 S2 => i_fetch S1 S2 | i_is_ge Lbl
+is_lt Lbl S1 S2 => i_fetch S1 S2 | i_is_lt Lbl
+is_eq Lbl S1 S2 => i_fetch S1 S2 | i_is_eq Lbl
+is_ne Lbl S1 S2 => i_fetch S1 S2 | i_is_ne Lbl
+
+is_eq_exact Lbl=f S1 S2 => i_fetch S1 S2 | i_is_eq_exact Lbl
+is_ne_exact Lbl S1 S2 => i_fetch S1 S2 | i_is_ne_exact Lbl
+
+i_is_lt f
+i_is_ge f
+i_is_eq f
+i_is_ne f
+i_is_eq_exact f
+i_is_ne_exact f
+
+%macro: i_is_eq_immed EqualImmed -fail_action
+i_is_eq_immed f r c
+i_is_eq_immed f x c
+i_is_eq_immed f y c
+
+#
+# Putting things.
+#
+
+put_tuple u==0 Dst => i_put_tuple_only u Dst
+put_tuple Arity Dst | put V => i_put_tuple Arity V Dst
+
+i_put_tuple_only A d
+
+%macro: i_put_tuple PutTuple -pack
+i_put_tuple A x x
+i_put_tuple A y x
+i_put_tuple A r x
+i_put_tuple A n x
+i_put_tuple A c x
+i_put_tuple A x y
+i_put_tuple A x r
+i_put_tuple A y r
+i_put_tuple A n r
+i_put_tuple A c r
+
+%cold
+i_put_tuple A r y
+i_put_tuple A y y
+i_put_tuple A c y
+%hot
+
+%macro:put_list PutList -pack -gen_dest
+
+put_list x n x
+put_list y n x
+put_list x x x
+put_list y x x
+put_list c n x
+put_list x x r
+put_list y r r
+put_list c n r
+
+put_list y y x
+put_list x y x
+put_list r x x
+put_list r y x
+put_list r x r
+put_list y y r
+put_list y r x
+put_list r n x
+
+# put_list SrcReg Constant Dst
+put_list r c r
+put_list r c x
+put_list r c y
+
+put_list x c r
+put_list x c x
+put_list x c y
+
+put_list y c r
+put_list y c x
+put_list y c y
+
+# put_list Constant SrcReg Dst
+put_list c r r
+put_list c r x
+put_list c r y
+
+put_list c x r
+put_list c x x
+put_list c x y
+
+put_list c y r
+put_list c y x
+put_list c y y
+
+%cold
+put_list x r r
+put_list s s d
+%hot
+
+%macro: put Put
+put x
+put r
+put y
+put c
+put n
+
+%macro: i_fetch FetchArgs -pack
+i_fetch c c
+i_fetch c r
+i_fetch c x
+i_fetch c y
+i_fetch r c
+i_fetch r x
+i_fetch r y
+i_fetch x c
+i_fetch x r
+i_fetch x x
+i_fetch x y
+i_fetch y c
+i_fetch y r
+i_fetch y x
+i_fetch y y
+
+%cold
+i_fetch s s
+%hot
+
+#
+# Some more only used by the emulator
+#
+
+normal_exit
+continue_exit
+apply_bif
+call_nif
+call_error_handler
+error_action_code
+call_traced_function
+return_trace
+
+#
+# Instruction transformations & folded instructions.
+#
+
+# Note: There is no 'move_return y r', since there never are any y registers
+# when we do move_return (if we have y registers, we must do move_deallocate_return).
+
+move S r | return => move_return S r
+
+%macro: move_return MoveReturn -nonext
+move_return x r
+move_return c r
+move_return n r
+
+move S r | deallocate D | return => move_deallocate_return S r D
+
+%macro: move_deallocate_return MoveDeallocateReturn -nonext
+move_deallocate_return x r P
+move_deallocate_return y r P
+move_deallocate_return c r P
+move_deallocate_return n r P
+
+deallocate D | return => deallocate_return D
+
+%macro: deallocate_return DeallocateReturn -nonext
+deallocate_return P
+
+test_heap Need u==1 | put_list Y=y r r => test_heap_1_put_list Need Y
+
+test_heap_1_put_list I y
+
+# Test tuple & arity (head)
+
+is_tuple Fail Literal=q => move Literal x | is_tuple Fail x
+is_tuple Fail=f c => jump Fail
+is_tuple Fail=f S=rxy | test_arity Fail=f S=rxy Arity => is_tuple_of_arity Fail S Arity
+
+%macro:is_tuple_of_arity IsTupleOfArity -fail_action
+
+is_tuple_of_arity f x A
+is_tuple_of_arity f y A
+is_tuple_of_arity f r A
+
+%macro: is_tuple IsTuple -fail_action
+is_tuple f x
+is_tuple f y
+is_tuple f r
+
+test_arity Fail Literal=q Arity => move Literal x | test_arity Fail x Arity
+test_arity Fail=f c Arity => jump Fail
+
+%macro: test_arity IsArity -fail_action
+test_arity f x A
+test_arity f y A
+test_arity f r A
+
+is_tuple_of_arity Fail=f Reg Arity | get_tuple_element Reg P=u==0 Dst=xy => \
+ is_tuple_of_arity Fail Reg Arity | extract_next_element Dst | original_reg Reg P
+
+test_arity Fail Reg Arity | get_tuple_element Reg P=u==0 Dst=xy => \
+ test_arity Fail Reg Arity | extract_next_element Dst | original_reg Reg P
+
+original_reg Reg P1 | get_tuple_element Reg P2 Dst=xy | succ(P1, P2) => \
+ extract_next_element Dst | original_reg Reg P2
+
+get_tuple_element Reg P Dst => i_get_tuple_element Reg P Dst | original_reg Reg P
+
+original_reg Reg Pos =>
+
+get_tuple_element Reg P Dst => i_get_tuple_element Reg P Dst
+
+original_reg/2
+
+extract_next_element D1=xy | original_reg Reg P1 | get_tuple_element Reg P2 D2=xy | \
+succ(P1, P2) | succ(D1, D2) => \
+ extract_next_element2 D1 | original_reg Reg P2
+
+extract_next_element2 D1=xy | original_reg Reg P1 | get_tuple_element Reg P2 D2=xy | \
+succ(P1, P2) | succ2(D1, D2) => \
+ extract_next_element3 D1 | original_reg Reg P2
+
+#extract_next_element3 D1=xy | original_reg Reg P1 | get_tuple_element Reg P2 D2=xy | \
+#succ(P1, P2) | succ3(D1, D2) => \
+# extract_next_element4 D1 | original_reg Reg P2
+
+%macro: extract_next_element ExtractNextElement -pack
+extract_next_element x
+extract_next_element y
+
+%macro: extract_next_element2 ExtractNextElement2 -pack
+extract_next_element2 x
+extract_next_element2 y
+
+%macro: extract_next_element3 ExtractNextElement3 -pack
+extract_next_element3 x
+extract_next_element3 y
+
+#%macro: extract_next_element4 ExtractNextElement4 -pack
+#extract_next_element4 x
+#extract_next_element4 y
+
+is_integer Fail=f i =>
+is_integer Fail=f an => jump Fail
+is_integer Fail Literal=q => move Literal x | is_integer Fail x
+
+is_integer Fail=f S=rx | allocate Need Regs => is_integer_allocate Fail S Need Regs
+
+%macro: is_integer_allocate IsIntegerAllocate -fail_action
+is_integer_allocate f x I I
+is_integer_allocate f r I I
+
+%macro: is_integer IsInteger -fail_action
+is_integer f x
+is_integer f y
+is_integer f r
+
+is_list Fail=f n =>
+is_list Fail Literal=q => move Literal x | is_list Fail x
+is_list Fail=f c => jump Fail
+%macro: is_list IsList -fail_action
+is_list f r
+is_list f x
+%cold
+is_list f y
+%hot
+
+is_nonempty_list Fail=f S=rx | allocate Need Rs => is_nonempty_list_allocate Fail S Need Rs
+
+%macro:is_nonempty_list_allocate IsNonemptyListAllocate -fail_action
+is_nonempty_list_allocate f x I I
+is_nonempty_list_allocate f r I I
+
+is_nonempty_list F=f r | test_heap I1 I2 => is_non_empty_list_test_heap F r I1 I2
+
+%macro: is_non_empty_list_test_heap IsNonemptyListTestHeap -fail_action
+is_non_empty_list_test_heap f r I I
+
+%macro: is_nonempty_list IsNonemptyList -fail_action
+is_nonempty_list f x
+is_nonempty_list f y
+is_nonempty_list f r
+
+%macro: is_atom IsAtom -fail_action
+is_atom f x
+is_atom f r
+%cold
+is_atom f y
+%hot
+is_atom Fail=f a =>
+is_atom Fail=f niq => jump Fail
+
+%macro: is_float IsFloat -fail_action
+is_float f r
+is_float f x
+%cold
+is_float f y
+%hot
+is_float Fail=f nai => jump Fail
+is_float Fail Literal=q => move Literal x | is_float Fail x
+
+is_nil Fail=f n =>
+is_nil Fail=f qia => jump Fail
+
+%macro: is_nil IsNil -fail_action
+is_nil f x
+is_nil f y
+is_nil f r
+
+is_binary Fail Literal=q => move Literal x | is_binary Fail x
+is_binary Fail=f c => jump Fail
+%macro: is_binary IsBinary -fail_action
+is_binary f r
+is_binary f x
+%cold
+is_binary f y
+%hot
+
+# XXX Deprecated.
+is_bitstr Fail Term => is_bitstring Fail Term
+
+is_bitstring Fail Literal=q => move Literal x | is_bitstring Fail x
+is_bitstring Fail=f c => jump Fail
+%macro: is_bitstring IsBitstring -fail_action
+is_bitstring f r
+is_bitstring f x
+%cold
+is_bitstring f y
+%hot
+
+is_reference Fail=f cq => jump Fail
+%macro: is_reference IsRef -fail_action
+is_reference f r
+is_reference f x
+%cold
+is_reference f y
+%hot
+
+is_pid Fail=f cq => jump Fail
+%macro: is_pid IsPid -fail_action
+is_pid f r
+is_pid f x
+%cold
+is_pid f y
+%hot
+
+is_port Fail=f cq => jump Fail
+%macro: is_port IsPort -fail_action
+is_port f r
+is_port f x
+%cold
+is_port f y
+%hot
+
+is_boolean Fail=f a==am_true =>
+is_boolean Fail=f a==am_false =>
+is_boolean Fail=f ac => jump Fail
+
+%cold
+%macro: is_boolean IsBoolean -fail_action
+is_boolean f r
+is_boolean f x
+is_boolean f y
+%hot
+
+is_function2 Fail=f acq Arity => jump Fail
+is_function2 Fail=f Fun a => jump Fail
+is_function2 Fail Fun Literal=q => move Literal x | is_function2 Fail Fun x
+
+is_function2 f s s
+%macro: is_function2 IsFunction2 -fail_action
+
+# Allocating & initializing.
+allocate Need Regs | init Y => allocate_init Need Regs Y
+init Y1 | init Y2 => init2 Y1 Y2
+
+%macro: allocate_init AllocateInit -pack
+allocate_init t I y
+
+#################################################################
+# External function and bif calls.
+#################################################################
+
+#
+# The BIFs erlang:check_process_code/2 must be called like a function,
+# to ensure that c_p->i (program counter) is set correctly (an ordinary
+# BIF call doesn't set it).
+#
+
+call_ext u==2 Bif=u$bif:erlang:check_process_code/2 => i_call_ext Bif
+call_ext_last u==2 Bif=u$bif:erlang:check_process_code/2 D => i_call_ext_last Bif D
+call_ext_only u==2 Bif=u$bif:erlang:check_process_code/2 => i_call_ext_only Bif
+
+#
+# The BIFs erlang:garbage_collect/0,1 must be called like functions,
+# to allow them to invoke the garbage collector. (The stack pointer must
+# be saved and p->arity must be zeroed, which is not done on ordinary BIF calls.)
+#
+
+call_ext u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext Bif
+call_ext_last u==0 Bif=u$bif:erlang:garbage_collect/0 D => i_call_ext_last Bif D
+call_ext_only u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext_only Bif
+
+call_ext u==1 Bif=u$bif:erlang:garbage_collect/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erlang:garbage_collect/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erlang:garbage_collect/1 => i_call_ext_only Bif
+
+#
+# put/2 and erase/1 must be able to do garbage collection, so we must call
+# them like functions.
+#
+
+call_ext u==2 Bif=u$bif:erlang:put/2 => i_call_ext Bif
+call_ext_last u==2 Bif=u$bif:erlang:put/2 D => i_call_ext_last Bif D
+call_ext_only u==2 Bif=u$bif:erlang:put/2 => i_call_ext_only Bif
+
+call_ext u==1 Bif=u$bif:erlang:erase/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erlang:erase/1 D => i_call_ext_last Bif D
+call_ext_only u==1 Bif=u$bif:erlang:erase/1 => i_call_ext_only Bif
+
+#
+# The process_info/1,2 BIF should be called like a function, to force
+# the emulator to set c_p->current before calling it (a BIF call doesn't
+# set it).
+#
+# In addition, we force the use of a non-tail-recursive call. This will ensure
+# that c_p->cp points into the function making the call.
+#
+
+call_ext u==1 Bif=u$bif:erlang:process_info/1 => i_call_ext Bif
+call_ext_last u==1 Bif=u$bif:erlang:process_info/1 D => i_call_ext Bif | deallocate_return D
+call_ext_only Ar=u==1 Bif=u$bif:erlang:process_info/1 => allocate u Ar | i_call_ext Bif | deallocate_return u
+
+call_ext u==2 Bif=u$bif:erlang:process_info/2 => i_call_ext Bif
+call_ext_last u==2 Bif=u$bif:erlang:process_info/2 D => i_call_ext Bif | deallocate_return D
+call_ext_only Ar=u==2 Bif=u$bif:erlang:process_info/2 => allocate u Ar | i_call_ext Bif | deallocate_return u
+
+#
+# load_nif/2 also needs to know calling function like process_info
+#
+call_ext u==2 Bif=u$bif:erlang:load_nif/2 => i_call_ext Bif
+call_ext_last u==2 Bif=u$bif:erlang:load_nif/2 D => i_call_ext Bif | deallocate_return D
+call_ext_only Ar=u==2 Bif=u$bif:erlang:load_nif/2 => allocate u Ar | i_call_ext Bif | deallocate_return u
+
+
+#
+# The apply/2 and apply/3 BIFs are instructions.
+#
+
+call_ext u==2 u$func:erlang:apply/2 => i_apply_fun
+call_ext_last u==2 u$func:erlang:apply/2 D => i_apply_fun_last D
+call_ext_only u==2 u$func:erlang:apply/2 => i_apply_fun_only
+
+call_ext u==3 u$func:erlang:apply/3 => i_apply
+call_ext_last u==3 u$func:erlang:apply/3 D => i_apply_last D
+call_ext_only u==3 u$func:erlang:apply/3 => i_apply_only
+
+#
+# The exit/1 and throw/1 BIFs never execute the instruction following them;
+# thus there is no need to generate any return instruction.
+#
+
+call_ext_last u==1 Bif=u$bif:erlang:exit/1 D => call_bif1 Bif
+call_ext_last u==1 Bif=u$bif:erlang:throw/1 D => call_bif1 Bif
+
+call_ext_only u==1 Bif=u$bif:erlang:exit/1 => call_bif1 Bif
+call_ext_only u==1 Bif=u$bif:erlang:throw/1 => call_bif1 Bif
+
+#
+# The error/1 and error/2 BIFs never execute the instruction following them;
+# thus there is no need to generate any return instruction.
+# However, they generate stack backtraces, so if the call instruction
+# is call_ext_only/2 instruction, we explicitly do an allocate/2 to store
+# the continuation pointer on the stack.
+#
+
+call_ext_last u==1 Bif=u$bif:erlang:error/1 D => call_bif1 Bif
+call_ext_last u==2 Bif=u$bif:erlang:error/2 D => call_bif2 Bif
+
+call_ext_only Ar=u==1 Bif=u$bif:erlang:error/1 => \
+ allocate u Ar | call_bif1 Bif
+call_ext_only Ar=u==2 Bif=u$bif:erlang:error/2 => \
+ allocate u Ar | call_bif2 Bif
+
+#
+# The yield/0 BIF is an instruction
+#
+
+call_ext u==0 u$func:erlang:yield/0 => i_yield
+call_ext_last u==0 u$func:erlang:yield/0 D => i_yield | deallocate_return D
+call_ext_only u==0 u$func:erlang:yield/0 => i_yield | return
+
+#
+# The hibernate/3 BIF is an instruction.
+#
+call_ext u==3 u$func:erlang:hibernate/3 => i_hibernate
+call_ext_last u==3 u$func:erlang:hibernate/3 D => i_hibernate
+call_ext_only u==3 u$func:erlang:hibernate/3 => i_hibernate
+
+#
+# Hybrid memory architecture need special cons and tuple instructions
+# that allocate on the message area. These looks like BIFs in the BEAM code.
+#
+
+call_ext u==2 u$func:hybrid:cons/2 => i_global_cons
+call_ext_last u==2 u$func:hybrid:cons/2 D => i_global_cons | deallocate_return D
+call_ext_only Ar=u==2 u$func:hybrid:cons/2 => i_global_cons | return
+
+call_ext u==1 u$func:hybrid:tuple/1 => i_global_tuple
+call_ext_last u==1 u$func:hybrid:tuple/1 D => i_global_tuple | deallocate_return D
+call_ext_only Ar=u==1 u$func:hybrid:tuple/1 => i_global_tuple | return
+
+call_ext u==1 u$func:hybrid:copy/1 => i_global_copy
+call_ext_last u==1 u$func:hybrid:copy/1 D => i_global_copy | deallocate_return D
+call_ext_only u==1 Ar=u$func:hybrid:copy/1 => i_global_copy | return
+
+#
+# The general case for BIFs that have no special instructions.
+# A BIF used in the tail must be followed by a return instruction.
+#
+# To make trapping and stack backtraces work correctly, we make sure that
+# the continuation pointer is always stored on the stack.
+
+call_ext u==0 Bif=u$is_bif => call_bif0 Bif
+call_ext u==1 Bif=u$is_bif => call_bif1 Bif
+call_ext u==2 Bif=u$is_bif => call_bif2 Bif
+call_ext u==3 Bif=$is_bif => call_bif3 Bif
+
+call_ext_last u==0 Bif=u$is_bif D => call_bif0 Bif | deallocate_return D
+call_ext_last u==1 Bif=u$is_bif D => call_bif1 Bif | deallocate_return D
+call_ext_last u==2 Bif=u$is_bif D => call_bif2 Bif | deallocate_return D
+call_ext_last u==3 Bif=u$is_bif D => call_bif3 Bif | deallocate_return D
+
+call_ext_only Ar=u==0 Bif=u$is_bif => \
+ allocate u Ar | call_bif0 Bif | deallocate_return u
+call_ext_only Ar=u==1 Bif=u$is_bif => \
+ allocate u Ar | call_bif1 Bif | deallocate_return u
+call_ext_only Ar=u==2 Bif=u$is_bif => \
+ allocate u Ar | call_bif2 Bif | deallocate_return u
+call_ext_only Ar=u==3 Bif=u$is_bif => \
+ allocate u Ar | call_bif3 Bif | deallocate_return u
+
+#
+# Any remaining calls are calls to Erlang functions, not BIFs.
+# We rename the instructions to internal names. This is necessary,
+# to avoid an end-less loop, because we want to call a few BIFs
+# with call instructions.
+#
+
+move S=c r | call_ext Ar=u Func=u$is_not_bif => i_move_call_ext S r Func
+move S=c r | call_ext_last Ar=u Func=u$is_not_bif D => i_move_call_ext_last Func D S r
+move S=c r | call_ext_only Ar=u Func=u$is_not_bif => i_move_call_ext_only Func S r
+
+call_ext Ar=u Func => i_call_ext Func
+call_ext_last Ar=u Func D => i_call_ext_last Func D
+call_ext_only Ar=u Func => i_call_ext_only Func
+
+i_apply
+i_apply_last P
+i_apply_only
+
+i_apply_fun
+i_apply_fun_last P
+i_apply_fun_only
+
+i_hibernate
+
+call_bif0 e
+call_bif1 e
+call_bif2 e
+call_bif3 e
+
+#
+# Calls to non-building and guard BIFs.
+#
+
+bif0 u$bif:erlang:self/0 Dst=d => self Dst
+bif0 u$bif:erlang:node/0 Dst=d => node Dst
+
+bif1 Fail Bif=u$bif:erlang:get/1 Src=s Dst=d => i_get Src Dst
+
+bif2 Jump=j u$bif:erlang:element/2 S1=s S2=s Dst=d => gen_element(Jump, S1, S2, Dst)
+
+bif1 Fail Bif Literal=q Dst => move Literal x | bif1 Fail Bif x Dst
+bif1 p Bif S1 Dst => bif1_body Bif S1 Dst
+
+bif1_body Bif Literal=q Dst => move Literal x | bif1_body Bif x Dst
+
+bif2 p Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2_body Bif Dst
+bif2 Fail=f Bif S1 S2 Dst => i_fetch S1 S2 | i_bif2 Fail Bif Dst
+
+i_get s d
+
+%macro: self Self
+self r
+self x
+self y
+
+%macro: node Node
+node r
+node x
+%cold
+node y
+%hot
+
+i_fast_element j I s d
+i_element j s s d
+
+bif1 f b s d
+bif1_body b s d
+i_bif2 f b d
+i_bif2_body b d
+
+#
+# Internal calls.
+#
+
+move S=c r | call Ar P=f => i_move_call S r P
+move S=s r | call Ar P=f => move_call S r P
+
+i_move_call c r f
+
+%macro:move_call MoveCall -arg_f -size -nonext
+move_call/3
+
+move_call x r f
+move_call y r f
+
+move S=c r | call_last Ar P=f D => i_move_call_last P D S r
+move S r | call_last Ar P=f D => move_call_last S r P D
+
+i_move_call_last f P c r
+
+%macro:move_call_last MoveCallLast -arg_f -nonext
+
+move_call_last/4
+move_call_last x r f P
+move_call_last y r f P
+
+move S=c r | call_only Ar P=f => i_move_call_only P S r
+move S=x r | call_only Ar P=f => move_call_only S r P
+
+i_move_call_only f c r
+
+%macro:move_call_only MoveCallOnly -arg_f -nonext
+move_call_only/3
+
+move_call_only x r f
+
+call Ar Func => i_call Func
+call_last Ar Func D => i_call_last Func D
+call_only Ar Func => i_call_only Func
+
+i_call f
+i_call_last f P
+i_call_only f
+
+i_call_ext e
+i_call_ext_last e P
+i_call_ext_only e
+
+i_move_call_ext c r e
+i_move_call_ext_last e P c r
+i_move_call_ext_only e c r
+
+# Fun calls.
+
+call_fun Arity=u | deallocate D | return => i_call_fun_last Arity D
+call_fun Arity=u => i_call_fun Arity
+
+i_call_fun I
+i_call_fun_last I P
+
+make_fun2 OldIndex=u => gen_make_fun2(OldIndex)
+
+%macro: i_make_fun MakeFun -pack
+%cold
+i_make_fun I t
+%hot
+
+%macro: is_function IsFunction -fail_action
+is_function f x
+is_function f y
+is_function f r
+is_function Fail=f c => jump Fail
+
+func_info M=a F=a A=u | label L => gen_func_info(M, F, A, L)
+
+# ================================================================
+# New bit syntax matching (R11B).
+# ================================================================
+
+%cold
+bs_start_match2 Fail=f ica X Y D => jump Fail
+bs_start_match2 Fail Bin X Y D => i_bs_start_match2 Bin Fail X Y D
+i_bs_start_match2 r f I I d
+i_bs_start_match2 x f I I d
+i_bs_start_match2 y f I I d
+
+bs_save2 Reg Index => gen_bs_save(Reg, Index)
+i_bs_save2 r I
+i_bs_save2 x I
+
+bs_restore2 Reg Index => gen_bs_restore(Reg, Index)
+i_bs_restore2 r I
+i_bs_restore2 x I
+
+# Matching integers
+bs_match_string Fail Ms Bits Val => i_bs_match_string Ms Fail Bits Val
+
+i_bs_match_string r f I I
+i_bs_match_string x f I I
+
+# Fetching integers from binaries.
+bs_get_integer2 Fail=f Ms=rx Live=u Sz=sq Unit=u Flags=u Dst=d => \
+ gen_get_integer2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
+
+i_bs_get_integer_small_imm r I f I d
+i_bs_get_integer_small_imm x I f I d
+i_bs_get_integer_imm r I I f I d
+i_bs_get_integer_imm x I I f I d
+i_bs_get_integer f I I d
+i_bs_get_integer_8 r f d
+i_bs_get_integer_8 x f d
+i_bs_get_integer_16 r f d
+i_bs_get_integer_16 x f d
+i_bs_get_integer_32 r f I d
+i_bs_get_integer_32 x f I d
+
+# Fetching binaries from binaries.
+bs_get_binary2 Fail=f Ms=rx Live=u Sz=sq Unit=u Flags=u Dst=d => \
+ gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
+
+%macro: i_bs_get_binary_imm2 BsGetBinaryImm_2 -fail_action -gen_dest
+%macro: i_bs_get_binary2 BsGetBinary_2 -fail_action -gen_dest
+%macro: i_bs_get_binary_all2 BsGetBinaryAll_2 -fail_action -gen_dest
+
+i_bs_get_binary_imm2 f r I I I d
+i_bs_get_binary_imm2 f x I I I d
+i_bs_get_binary2 f r I s I d
+i_bs_get_binary2 f x I s I d
+i_bs_get_binary_all2 f r I I d
+i_bs_get_binary_all2 f x I I d
+i_bs_get_binary_all_reuse r f I
+i_bs_get_binary_all_reuse x f I
+
+# Fetching float from binaries.
+bs_get_float2 Fail=f Ms=rx Live=u Sz=s Unit=u Flags=u Dst=d => \
+ gen_get_float2(Fail, Ms, Live, Sz, Unit, Flags, Dst)
+
+bs_get_float2 Fail=f Ms=rx Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail
+
+%macro: i_bs_get_float2 BsGetFloat2 -fail_action -gen_dest
+i_bs_get_float2 f r I s I d
+i_bs_get_float2 f x I s I d
+
+# Miscellanous
+
+bs_skip_bits2 Fail=f Ms=rx Sz=s Unit=u Flags=u => \
+ gen_skip_bits2(Fail, Ms, Sz, Unit, Flags)
+bs_skip_bits2 Fail=f Ms=rx Sz=q Unit=u Flags=u => \
+ gen_skip_bits2(Fail, Ms, Sz, Unit, Flags)
+
+%macro: i_bs_skip_bits_imm2 BsSkipBitsImm2 -fail_action
+i_bs_skip_bits_imm2 f r I
+i_bs_skip_bits_imm2 f x I
+
+%macro: i_bs_skip_bits2 BsSkipBits2 -fail_action
+i_bs_skip_bits2 f r x I
+i_bs_skip_bits2 f r y I
+i_bs_skip_bits2 f x x I
+i_bs_skip_bits2 f x r I
+i_bs_skip_bits2 f x y I
+
+%macro: i_bs_skip_bits_all2 BsSkipBitsAll2 -fail_action
+i_bs_skip_bits_all2 f r I
+i_bs_skip_bits_all2 f x I
+
+bs_test_tail2 Fail=f Ms=rx Bits=u==0 => bs_test_zero_tail2 Fail Ms
+bs_test_tail2 Fail=f Ms=rx Bits=u => bs_test_tail_imm2 Fail Ms Bits
+bs_test_zero_tail2 f r
+bs_test_zero_tail2 f x
+bs_test_tail_imm2 f r I
+bs_test_tail_imm2 f x I
+
+bs_test_unit F Ms Unit=u==8 => bs_test_unit8 F Ms
+bs_test_unit f r I
+bs_test_unit f x I
+bs_test_unit8 f r
+bs_test_unit8 f x
+
+bs_context_to_binary r
+bs_context_to_binary x
+bs_context_to_binary y
+
+#
+# Utf8/utf16/utf32 support. (R12B-5)
+#
+bs_get_utf8 Fail=f Ms=rx u u Dst=d => i_bs_get_utf8 Ms Fail Dst
+i_bs_get_utf8 r f d
+i_bs_get_utf8 x f d
+
+bs_skip_utf8 Fail=f Ms=rx u u => i_bs_get_utf8 Ms Fail x
+
+bs_get_utf16 Fail=f Ms=rx u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst
+bs_skip_utf16 Fail=f Ms=rx u Flags=u => i_bs_get_utf16 Ms Fail Flags x
+
+i_bs_get_utf16 r f I d
+i_bs_get_utf16 x f I d
+
+bs_get_utf32 Fail=f Ms=rx Live=u Flags=u Dst=d => \
+ bs_get_integer2 Fail Ms Live i=32 u=1 Flags Dst | \
+ i_fetch Dst Ms | \
+ i_bs_validate_unicode_retract Fail
+bs_skip_utf32 Fail=f Ms=rx Live=u Flags=u => \
+ bs_get_integer2 Fail Ms Live i=32 u=1 Flags x | \
+ i_fetch x Ms | \
+ i_bs_validate_unicode_retract Fail
+
+i_bs_validate_unicode_retract j
+%hot
+
+#
+# Constructing binaries
+#
+%cold
+
+bs_init2 Fail Sz Words Regs Flags Dst | binary_too_big(Sz) => system_limit Fail
+
+bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst | should_gen_heap_bin(Sz) => \
+ i_bs_init_heap_bin Sz Regs Dst
+bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init Sz Regs Dst
+
+bs_init2 Fail Sz=u Words Regs Flags Dst | should_gen_heap_bin(Sz) => \
+ i_bs_init_heap_bin_heap Sz Words Regs Dst
+bs_init2 Fail Sz=u Words Regs Flags Dst => \
+ i_bs_init_heap Sz Words Regs Dst
+
+bs_init2 Fail Sz Words=u==0 Regs Flags Dst => \
+ i_bs_init_fail Sz Fail Regs Dst
+bs_init2 Fail Sz Words Regs Flags Dst => \
+ i_fetch Sz r | i_bs_init_fail_heap Words Fail Regs Dst
+
+i_bs_init_fail r j I d
+i_bs_init_fail x j I d
+i_bs_init_fail y j I d
+
+i_bs_init_fail_heap I j I d
+
+i_bs_init I I d
+i_bs_init_heap_bin I I d
+
+i_bs_init_heap I I I d
+i_bs_init_heap_bin_heap I I I d
+
+
+bs_init_bits Fail Sz Words Regs Flags Dst | binary_too_big_bits(Sz) => system_limit Fail
+
+bs_init_bits Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init_bits Sz Regs Dst
+bs_init_bits Fail Sz=u Words Regs Flags Dst => i_bs_init_bits_heap Sz Words Regs Dst
+
+bs_init_bits Fail Sz Words=u==0 Regs Flags Dst => \
+ i_bs_init_bits_fail Sz Fail Regs Dst
+bs_init_bits Fail Sz Words Regs Flags Dst => \
+ i_fetch Sz r | i_bs_init_bits_fail_heap Words Fail Regs Dst
+
+i_bs_init_bits_fail r j I d
+i_bs_init_bits_fail x j I d
+i_bs_init_bits_fail y j I d
+
+i_bs_init_bits_fail_heap I j I d
+
+i_bs_init_bits I I d
+i_bs_init_bits_heap I I I d
+
+bs_bits_to_bytes Fail Src Dst => i_bs_bits_to_bytes Src Fail Dst
+
+i_bs_bits_to_bytes r j d
+i_bs_bits_to_bytes x j d
+i_bs_bits_to_bytes y j d
+
+bs_add Fail S1=i==0 S2 Unit=u==1 D => move S2 D
+bs_add Fail S1 S2 Unit D => i_fetch S1 S2 | i_bs_add Fail Unit D
+
+i_bs_add j I d
+
+bs_append Fail Size Extra Live Unit Bin Flags Dst => \
+ i_fetch Size Bin | i_bs_append Fail Extra Live Unit Dst
+
+bs_private_append Fail Size Unit Bin Flags Dst => \
+ i_fetch Size Bin | i_bs_private_append Fail Unit Dst
+
+bs_init_writable
+
+i_bs_append j I I I d
+i_bs_private_append j I d
+
+#
+# Storing integers into binaries.
+#
+
+bs_put_integer Fail=j Sz=s Unit=u Flags=u Literal=q => \
+ move Literal x | bs_put_integer Fail Sz Unit Flags x
+bs_put_integer Fail=j Sz=sq Unit=u Flags=u Src=s => \
+ gen_put_integer(Fail, Sz, Unit, Flags, Src)
+
+%macro: i_new_bs_put_integer NewBsPutInteger
+%macro: i_new_bs_put_integer_imm NewBsPutIntegerImm
+
+i_new_bs_put_integer j s I s
+i_new_bs_put_integer_imm j I I s
+
+#
+# Utf8/utf16/utf32 support. (R12B-5)
+#
+
+bs_utf8_size Fail Literal=q Dst=d => \
+ move Literal x | bs_utf8_size Fail x Dst
+bs_utf8_size j Src=s Dst=d => i_bs_utf8_size Src Dst
+
+i_bs_utf8_size s d
+
+bs_utf16_size Fail Literal=q Dst=d => \
+ move Literal x | bs_utf16_size Fail x Dst
+bs_utf16_size j Src=s Dst=d => i_bs_utf16_size Src Dst
+
+i_bs_utf16_size s d
+
+bs_put_utf8 Fail=j Flags=u Literal=q => \
+ move Literal x | bs_put_utf8 Fail Flags x
+bs_put_utf8 Fail=j u Src=s => i_bs_put_utf8 Fail Src
+
+i_bs_put_utf8 j s
+
+bs_put_utf16 Fail=j Flags=u Literal=q => \
+ move Literal x | bs_put_utf16 Fail Flags x
+bs_put_utf16 Fail=j Flags=u Src=s => i_bs_put_utf16 Fail Flags Src
+
+i_bs_put_utf16 j I s
+
+bs_put_utf32 Fail=j Flags=u Literal=q => \
+ move Literal x | bs_put_utf32 Fail Flags x
+bs_put_utf32 Fail=j Flags=u Src=s => \
+ i_bs_validate_unicode Fail Src | bs_put_integer Fail i=32 u=1 Flags Src
+
+i_bs_validate_unicode j s
+
+#
+# Storing floats into binaries.
+#
+bs_put_float Fail Sz=q Unit Flags Val => badarg Fail
+
+bs_put_float Fail=j Sz Unit=u Flags=u Literal=q => \
+ move Literal x | bs_put_float Fail Sz Unit Flags x
+
+bs_put_float Fail=j Sz=s Unit=u Flags=u Src=s => \
+ gen_put_float(Fail, Sz, Unit, Flags, Src)
+
+%macro: i_new_bs_put_float NewBsPutFloat
+%macro: i_new_bs_put_float_imm NewBsPutFloatImm
+
+i_new_bs_put_float j s I s
+i_new_bs_put_float_imm j I I s
+
+#
+# Storing binaries into binaries.
+#
+
+bs_put_binary Fail Sz Unit Flags Literal=q => \
+ move Literal x | bs_put_binary Fail Sz Unit Flags x
+bs_put_binary Fail=j Sz=s Unit=u Flags=u Src=s => \
+ gen_put_binary(Fail, Sz, Unit, Flags, Src)
+
+%macro: i_new_bs_put_binary NewBsPutBinary
+i_new_bs_put_binary j s I s
+
+%macro: i_new_bs_put_binary_imm NewBsPutBinaryImm
+i_new_bs_put_binary_imm j I s
+
+%macro: i_new_bs_put_binary_all NewBsPutBinaryAll
+i_new_bs_put_binary_all j s I
+
+#
+# Warning: The i_bs_put_string and i_new_bs_put_string instructions
+# are specially treated in the loader.
+# Don't change the instruction format unless you change the loader too.
+#
+
+bs_put_string I I
+
+%hot
+
+#
+# New floating point instructions (R8).
+#
+
+fadd p FR1 FR2 FR3 => i_fadd FR1 FR2 FR3
+fsub p FR1 FR2 FR3 => i_fsub FR1 FR2 FR3
+fmul p FR1 FR2 FR3 => i_fmul FR1 FR2 FR3
+fdiv p FR1 FR2 FR3 => i_fdiv FR1 FR2 FR3
+fnegate p FR1 FR2 => i_fnegate FR1 FR2
+
+fconv Int=iq Dst=l => move Int x | fconv x Dst
+
+fmove q l
+fmove d l
+fconv d l
+
+i_fadd l l l
+i_fsub l l l
+i_fmul l l l
+i_fdiv l l l
+i_fnegate l l
+
+fclearerror | no_fpe_signals() =>
+fcheckerror p | no_fpe_signals() =>
+fcheckerror p => i_fcheckerror
+
+i_fcheckerror
+fclearerror
+
+fmove FR=l Dst=d | new_float_allocation() => fmove_new FR Dst
+
+# The new instruction for moving a float out of a floating point register.
+# (No allocation.)
+fmove_new l d
+
+#
+# New apply instructions in R10B.
+#
+
+apply I
+apply_last I P
+
+#
+# New GCing arithmetic instructions.
+#
+
+gc_bif2 Fail I u$bif:erlang:splus/2 S1 S2 Dst=d => i_fetch S1 S2 | i_plus Fail I Dst
+gc_bif2 Fail I u$bif:erlang:sminus/2 S1 S2 Dst=d => i_fetch S1 S2 | i_minus Fail I Dst
+gc_bif2 Fail I u$bif:erlang:stimes/2 S1 S2 Dst=d => i_fetch S1 S2 | i_times Fail I Dst
+gc_bif2 Fail I u$bif:erlang:div/2 S1 S2 Dst=d => i_fetch S1 S2 | i_m_div Fail I Dst
+
+gc_bif2 Fail I u$bif:erlang:intdiv/2 S1 S2 Dst=d => i_fetch S1 S2 | i_int_div Fail I Dst
+gc_bif2 Fail I u$bif:erlang:rem/2 S1 S2 Dst=d => i_fetch S1 S2 | i_rem Fail I Dst
+
+gc_bif2 Fail I u$bif:erlang:bsl/2 S1 S2 Dst=d => i_fetch S1 S2 | i_bsl Fail I Dst
+gc_bif2 Fail I u$bif:erlang:bsr/2 S1 S2 Dst=d => i_fetch S1 S2 | i_bsr Fail I Dst
+
+gc_bif2 Fail I u$bif:erlang:band/2 S1 S2 Dst=d => i_fetch S1 S2 | i_band Fail I Dst
+gc_bif2 Fail I u$bif:erlang:bor/2 S1 S2 Dst=d => i_fetch S1 S2 | i_bor Fail I Dst
+gc_bif2 Fail I u$bif:erlang:bxor/2 S1 S2 Dst=d => i_fetch S1 S2 | i_bxor Fail I Dst
+
+gc_bif1 Fail I u$bif:erlang:bnot/1 Src Dst=d => i_int_bnot Fail Src I Dst
+
+gc_bif1 Fail I u$bif:erlang:sminus/1 Src Dst=d => i_fetch i Src | i_minus Fail I Dst
+gc_bif1 Fail I u$bif:erlang:splus/1 Src Dst=d => i_fetch i Src | i_plus Fail I Dst
+
+i_plus j I d
+i_minus j I d
+i_times j I d
+i_m_div j I d
+i_int_div j I d
+i_rem j I d
+
+i_bsl j I d
+i_bsr j I d
+
+i_band j I d
+i_bor j I d
+i_bxor j I d
+
+i_int_bnot j s I d
+
+#
+# Old guard BIFs that creates heap fragments are no longer allowed.
+#
+bif1 Fail u$bif:erlang:length/1 s d => too_old_compiler
+bif1 Fail u$bif:erlang:size/1 s d => too_old_compiler
+bif1 Fail u$bif:erlang:abs/1 s d => too_old_compiler
+bif1 Fail u$bif:erlang:float/1 s d => too_old_compiler
+bif1 Fail u$bif:erlang:round/1 s d => too_old_compiler
+bif1 Fail u$bif:erlang:trunc/1 s d => too_old_compiler
+
+#
+# Guard BIFs.
+#
+gc_bif1 Fail I Bif=u$bif:erlang:length/1 Src Dst=d => \
+ gen_guard_bif(Fail, I, Bif, Src, Dst)
+
+gc_bif1 Fail I Bif=u$bif:erlang:size/1 Src Dst=d => \
+ gen_guard_bif(Fail, I, Bif, Src, Dst)
+
+gc_bif1 Fail I Bif=u$bif:erlang:bit_size/1 Src Dst=d => \
+ gen_guard_bif(Fail, I, Bif, Src, Dst)
+
+gc_bif1 Fail I Bif=u$bif:erlang:byte_size/1 Src Dst=d => \
+ gen_guard_bif(Fail, I, Bif, Src, Dst)
+
+gc_bif1 Fail I Bif=u$bif:erlang:abs/1 Src Dst=d => \
+ gen_guard_bif(Fail, I, Bif, Src, Dst)
+
+gc_bif1 Fail I Bif=u$bif:erlang:float/1 Src Dst=d => \
+ gen_guard_bif(Fail, I, Bif, Src, Dst)
+
+gc_bif1 Fail I Bif=u$bif:erlang:round/1 Src Dst=d => \
+ gen_guard_bif(Fail, I, Bif, Src, Dst)
+
+gc_bif1 Fail I Bif=u$bif:erlang:trunc/1 Src Dst=d => \
+ gen_guard_bif(Fail, I, Bif, Src, Dst)
+
+i_gc_bif1 Fail Bif V=q Live D => move V x | i_gc_bif1 Fail Bif x Live D
+
+i_gc_bif1 j I s I d
+
+#
+# R13B03
+#
+on_load
diff --git a/erts/emulator/beam/packet_parser.c b/erts/emulator/beam/packet_parser.c
new file mode 100644
index 0000000000..8c8029d450
--- /dev/null
+++ b/erts/emulator/beam/packet_parser.c
@@ -0,0 +1,847 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* A protocol decoder. Simple packet length extraction as well as packet
+ * body parsing with protocol specific callback interfaces (http and ssl).
+ *
+ * Code ripped out from inet_drv.c to also be used by BIF decode_packet.
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "packet_parser.h"
+
+#include <ctype.h>
+#include "sys.h"
+
+/* #define INET_DRV_DEBUG 1 */
+#ifdef INET_DRV_DEBUG
+# define DEBUG 1
+# undef DEBUGF
+# define DEBUGF(X) printf X
+#endif
+
+#define get_int24(s) ((((unsigned char*) (s))[0] << 16) | \
+ (((unsigned char*) (s))[1] << 8) | \
+ (((unsigned char*) (s))[2]))
+
+#define get_little_int32(s) ((((unsigned char*) (s))[3] << 24) | \
+ (((unsigned char*) (s))[2] << 16) | \
+ (((unsigned char*) (s))[1] << 8) | \
+ (((unsigned char*) (s))[0]))
+
+#define put_int24(s, x) ((((unsigned char*)(s))[0] = ((x) >> 16) & 0xff), \
+ (((unsigned char*)(s))[1] = ((x) >> 8) & 0xff), \
+ (((unsigned char*)(s))[2] = (x) & 0xff))
+
+
+#if !defined(__WIN32__) && !defined(HAVE_STRNCASECMP)
+#define STRNCASECMP my_strncasecmp
+
+static int my_strncasecmp(const char *s1, const char *s2, size_t n)
+{
+ int i;
+
+ for (i=0;i<n-1 && s1[i] && s2[i] && toupper(s1[i]) == toupper(s2[i]);++i)
+ ;
+ return (toupper(s1[i]) - toupper(s2[i]));
+}
+
+
+#else
+#define STRNCASECMP strncasecmp
+#endif
+
+
+#define HTTP_HDR_HASH_SIZE 53
+#define HTTP_METH_HASH_SIZE 13
+#define HTTP_MAX_NAME_LEN 20
+
+static char tspecial[128];
+
+static const char* http_hdr_strings[] = {
+ "Cache-Control",
+ "Connection",
+ "Date",
+ "Pragma",
+ "Transfer-Encoding",
+ "Upgrade",
+ "Via",
+ "Accept",
+ "Accept-Charset",
+ "Accept-Encoding",
+ "Accept-Language",
+ "Authorization",
+ "From",
+ "Host",
+ "If-Modified-Since",
+ "If-Match",
+ "If-None-Match",
+ "If-Range",
+ "If-Unmodified-Since",
+ "Max-Forwards",
+ "Proxy-Authorization",
+ "Range",
+ "Referer",
+ "User-Agent",
+ "Age",
+ "Location",
+ "Proxy-Authenticate",
+ "Public",
+ "Retry-After",
+ "Server",
+ "Vary",
+ "Warning",
+ "Www-Authenticate",
+ "Allow",
+ "Content-Base",
+ "Content-Encoding",
+ "Content-Language",
+ "Content-Length",
+ "Content-Location",
+ "Content-Md5",
+ "Content-Range",
+ "Content-Type",
+ "Etag",
+ "Expires",
+ "Last-Modified",
+ "Accept-Ranges",
+ "Set-Cookie",
+ "Set-Cookie2",
+ "X-Forwarded-For",
+ "Cookie",
+ "Keep-Alive",
+ "Proxy-Connection",
+ NULL
+};
+
+
+static const char* http_meth_strings[] = {
+ "OPTIONS",
+ "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ "DELETE",
+ "TRACE",
+ NULL
+};
+
+static http_atom_t http_hdr_table[sizeof(http_hdr_strings)/sizeof(char*)];
+static http_atom_t http_meth_table[sizeof(http_meth_strings)/sizeof(char*)];
+
+static http_atom_t* http_hdr_hash[HTTP_HDR_HASH_SIZE];
+static http_atom_t* http_meth_hash[HTTP_METH_HASH_SIZE];
+
+#define CRNL(ptr) (((ptr)[0] == '\r') && ((ptr)[1] == '\n'))
+#define NL(ptr) ((ptr)[0] == '\n')
+#define SP(ptr) (((ptr)[0] == ' ') || ((ptr)[0] == '\t'))
+#define is_tspecial(x) ((((x) > 32) && ((x) < 128)) ? tspecial[(x)] : 1)
+
+#define hash_update(h,c) do { \
+ unsigned long __g; \
+ (h) = ((h) << 4) + (c); \
+ if ((__g = (h) & 0xf0000000)) { \
+ (h) ^= (__g >> 24); \
+ (h) ^= __g; \
+ } \
+ } while(0)
+
+static void http_hash_insert(const char* name, http_atom_t* entry,
+ http_atom_t** hash, int hsize)
+{
+ unsigned long h = 0;
+ const unsigned char* ptr = (const unsigned char*) name;
+ int ix;
+ int len = 0;
+
+ while (*ptr != '\0') {
+ hash_update(h, *ptr);
+ ptr++;
+ len++;
+ }
+ ix = h % hsize;
+
+ entry->next = hash[ix];
+ entry->h = h;
+ entry->name = name;
+ entry->len = len;
+ entry->atom = driver_mk_atom((char*)name);
+
+ hash[ix] = entry;
+}
+
+
+static int http_init(void)
+{
+ int i;
+ unsigned char* ptr;
+
+ for (i = 0; i < 33; i++)
+ tspecial[i] = 1;
+ for (i = 33; i < 127; i++)
+ tspecial[i] = 0;
+ for (ptr = (unsigned char*)"()<>@,;:\\\"/[]?={} \t"; *ptr != '\0'; ptr++)
+ tspecial[*ptr] = 1;
+
+ for (i = 0; i < HTTP_HDR_HASH_SIZE; i++)
+ http_hdr_hash[i] = NULL;
+ for (i = 0; http_hdr_strings[i] != NULL; i++) {
+ ASSERT(strlen(http_hdr_strings[i]) <= HTTP_MAX_NAME_LEN);
+ http_hdr_table[i].index = i;
+ http_hash_insert(http_hdr_strings[i],
+ &http_hdr_table[i],
+ http_hdr_hash, HTTP_HDR_HASH_SIZE);
+ }
+
+ for (i = 0; i < HTTP_METH_HASH_SIZE; i++)
+ http_meth_hash[i] = NULL;
+ for (i = 0; http_meth_strings[i] != NULL; i++) {
+ http_meth_table[i].index = i;
+ http_hash_insert(http_meth_strings[i],
+ &http_meth_table[i],
+ http_meth_hash, HTTP_METH_HASH_SIZE);
+ }
+ return 0;
+}
+
+
+#define CDR_MAGIC "GIOP"
+
+struct cdr_head {
+ unsigned char magic[4]; /* 4 bytes must be 'GIOP' */
+ unsigned char major; /* major version */
+ unsigned char minor; /* minor version */
+ unsigned char flags; /* bit 0: 0 == big endian, 1 == little endian
+ bit 1: 1 == more fragments follow */
+ unsigned char message_type; /* message type ... */
+ unsigned char message_size[4]; /* size in (flags bit 0 byte order) */
+};
+
+#define TPKT_VRSN 3
+
+struct tpkt_head {
+ unsigned char vrsn; /* contains TPKT_VRSN */
+ unsigned char reserved;
+ unsigned char packet_length[2]; /* size incl header, big-endian (?) */
+};
+
+void packet_parser_init()
+{
+ static int done = 0;
+ if (!done) {
+ done = 1;
+ http_init();
+ }
+}
+
+/* Return > 0 Total packet length.in bytes
+ * = 0 Length unknown, need more data.
+ * < 0 Error, invalid format.
+ */
+int packet_get_length(enum PacketParseType htype,
+ const char* ptr, unsigned n, /* Bytes read so far */
+ unsigned max_plen, /* Max packet length, 0=no limit */
+ unsigned trunc_len, /* Truncate (lines) if longer, 0=no limit */
+ int* statep) /* Protocol specific state */
+{
+ unsigned hlen, plen;
+
+ switch (htype) {
+ case TCP_PB_RAW:
+ if (n == 0) goto more;
+ else {
+ DEBUGF((" => nothing remain packet=%d\r\n", n));
+ return n;
+ }
+
+ case TCP_PB_1:
+ /* TCP_PB_1: [L0 | Data] */
+ hlen = 1;
+ if (n < hlen) goto more;
+ plen = get_int8(ptr);
+ goto remain;
+
+ case TCP_PB_2:
+ /* TCP_PB_2: [L1,L0 | Data] */
+ hlen = 2;
+ if (n < hlen) goto more;
+ plen = get_int16(ptr);
+ goto remain;
+
+ case TCP_PB_4:
+ /* TCP_PB_4: [L3,L2,L1,L0 | Data] */
+ hlen = 4;
+ if (n < hlen) goto more;
+ plen = get_int32(ptr);
+ goto remain;
+
+ case TCP_PB_RM:
+ /* TCP_PB_RM: [L3,L2,L1,L0 | Data]
+ ** where MSB (bit) is used to signal end of record
+ */
+ hlen = 4;
+ if (n < hlen) goto more;
+ plen = get_int32(ptr) & 0x7fffffff;
+ goto remain;
+
+ case TCP_PB_LINE_LF: {
+ /* TCP_PB_LINE_LF: [Data ... \n] */
+ const char* ptr2;
+ if ((ptr2 = memchr(ptr, '\n', n)) == NULL) {
+ if (n >= trunc_len && trunc_len!=0) { /* buffer full */
+ DEBUGF((" => line buffer full (no NL)=%d\r\n", n));
+ return trunc_len;
+ }
+ goto more;
+ }
+ else {
+ int len = (ptr2 - ptr) + 1; /* including newline */
+ if (len > trunc_len && trunc_len!=0) {
+ DEBUGF((" => truncated line=%d\r\n", trunc_len));
+ return trunc_len;
+ }
+ DEBUGF((" => nothing remain packet=%d\r\n", len));
+ return len;
+ }
+ }
+
+ case TCP_PB_ASN1: {
+ /* TCP_PB_ASN1: handles long (4 bytes) or short length format */
+ const char* tptr = ptr;
+ int length;
+ int nn = n;
+
+ if (n < 2) goto more;
+ nn--;
+ if ((*tptr++ & 0x1f) == 0x1f) { /* Long tag format */
+ while (nn && ((*tptr & 0x80) == 0x80)) {
+ tptr++;
+ nn--;
+ }
+ if (nn < 2) goto more;
+ tptr++;
+ nn--;
+ }
+
+ /* tptr now point to length field and nn characters remain */
+ length = *tptr & 0x7f;
+ if ((*tptr & 0x80) == 0x80) { /* Long length format */
+ tptr++;
+ nn--;
+ if (nn < length) goto more;
+ switch (length) {
+ case 0: plen = 0; break;
+ case 1: plen = get_int8(tptr); tptr += 1; break;
+ case 2: plen = get_int16(tptr); tptr += 2; break;
+ case 3: plen = get_int24(tptr); tptr += 3; break;
+ case 4: plen = get_int32(tptr); tptr += 4; break;
+ default: goto error; /* error */
+ }
+ }
+ else {
+ tptr++;
+ plen = length;
+ }
+ hlen = (tptr-ptr);
+ goto remain;
+ }
+
+ case TCP_PB_CDR: {
+ const struct cdr_head* hp;
+ hlen = sizeof(struct cdr_head);
+ if (n < hlen) goto more;
+ hp = (struct cdr_head*) ptr;
+ if (sys_memcmp(hp->magic, CDR_MAGIC, 4) != 0)
+ goto error;
+ if (hp->flags & 0x01) /* Byte ordering flag */
+ plen = get_little_int32(hp->message_size);
+ else
+ plen = get_int32(hp->message_size);
+ goto remain;
+ }
+
+ case TCP_PB_FCGI: {
+ const struct fcgi_head* hp;
+ hlen = sizeof(struct fcgi_head);
+ if (n < hlen) goto more;
+ hp = (struct fcgi_head*) ptr;
+ if (hp->version != FCGI_VERSION_1)
+ goto error;
+ plen = ((hp->contentLengthB1 << 8) | hp->contentLengthB0)
+ + hp->paddingLength;
+ goto remain;
+ }
+ case TCP_PB_HTTPH:
+ case TCP_PB_HTTPH_BIN:
+ *statep = !0;
+ case TCP_PB_HTTP:
+ case TCP_PB_HTTP_BIN:
+ /* TCP_PB_HTTP: data \r\n(SP data\r\n)* */
+ plen = n;
+ if (((plen == 1) && NL(ptr)) || ((plen == 2) && CRNL(ptr)))
+ goto done;
+ else {
+ const char* ptr1 = ptr;
+ int len = plen;
+
+ while (1) {
+ const char* ptr2 = memchr(ptr1, '\n', len);
+
+ if (ptr2 == NULL) {
+ if (n >= trunc_len && trunc_len!=0) { /* buffer full */
+ plen = trunc_len;
+ goto done;
+ }
+ goto more;
+ }
+ else {
+ plen = (ptr2 - ptr) + 1;
+
+ if (*statep == 0)
+ goto done;
+
+ if (plen < n) {
+ if (SP(ptr2+1) && plen>2) {
+ /* header field value continue on next line */
+ ptr1 = ptr2+1;
+ len = n - plen;
+ }
+ else
+ goto done;
+ }
+ else
+ goto more;
+ }
+ }
+ }
+ case TCP_PB_TPKT: {
+ const struct tpkt_head* hp;
+ hlen = sizeof(struct tpkt_head);
+ if (n < hlen)
+ goto more;
+ hp = (struct tpkt_head*) ptr;
+ if (hp->vrsn == TPKT_VRSN) {
+ plen = get_int16(hp->packet_length) - hlen;
+ if (plen < 0)
+ goto error;
+ }
+ else
+ goto error;
+ goto remain;
+ }
+
+ case TCP_PB_SSL_TLS:
+ hlen = 5;
+ if (n < hlen) goto more;
+ if ((ptr[0] & 0x80) && ptr[2] == 1) {
+ /* Ssl-v2 Client hello <<1:1, Len:15, 1:8, Version:16>> */
+ plen = (get_int16(&ptr[0]) & 0x7fff) - 3;
+ }
+ else {
+ /* <<ContentType:8, Version:16, Length:16>> */
+ plen = get_int16(&ptr[3]);
+ }
+ goto remain;
+
+ default:
+ DEBUGF((" => case error\r\n"));
+ return -1;
+ }
+
+more:
+ return 0;
+
+remain:
+ {
+ int tlen = hlen + plen;
+ if ((max_plen != 0 && plen > max_plen)
+ || tlen < (int)hlen) { /* wrap-around protection */
+ return -1;
+ }
+ return tlen;
+ }
+
+done:
+ return plen;
+
+error:
+ return -1;
+}
+
+
+static http_atom_t* http_hash_lookup(const char* name, int len,
+ unsigned long h,
+ http_atom_t** hash, int hsize)
+{
+ int ix = h % hsize;
+ http_atom_t* ap = hash[ix];
+
+ while (ap != NULL) {
+ if ((ap->h == h) && (ap->len == len) &&
+ (strncmp(ap->name, name, len) == 0))
+ return ap;
+ ap = ap->next;
+ }
+ return NULL;
+}
+
+static void
+http_parse_absoluteURI(PacketHttpURI* uri, const char* uri_ptr, int uri_len)
+{
+ const char* p;
+
+ if ((p = memchr(uri_ptr, '/', uri_len)) == NULL) {
+ /* host [":" port] */
+ uri->s2_ptr = "/";
+ uri->s2_len = 1;
+ }
+ else {
+ int n = (p - uri_ptr);
+ uri->s2_ptr = p;
+ uri->s2_len = uri_len - n;
+ uri_len = n;
+ }
+
+ uri->s1_ptr = uri_ptr;
+ uri->port = 0; /* undefined */
+ /* host[:port] */
+ if ((p = memchr(uri_ptr, ':', uri_len)) == NULL) {
+ uri->s1_len = uri_len;
+ }
+ else {
+ int n = (p - uri_ptr);
+ int port = 0;
+ uri->s1_len = n;
+ n = uri_len - (n+1);
+ p++;
+ while(n && isdigit((int) *p)) {
+ port = port*10 + (*p - '0');
+ n--;
+ p++;
+ }
+ if (n==0 && port!=0)
+ uri->port = port;
+ }
+}
+
+/*
+** Handle URI syntax:
+**
+** Request-URI = "*" | absoluteURI | abs_path
+** absoluteURI = scheme ":" *( uchar | reserved )
+** net_path = "//" net_loc [ abs_path ]
+** abs_path = "/" rel_path
+** rel_path = [ path ] [ ";" params ] [ "?" query ]
+** path = fsegment *( "/" segment )
+** fsegment = 1*pchar
+** segment = *pchar
+** params = param *( ";" param )
+** param = *( pchar | "/" )
+** query = *( uchar | reserved )
+**
+** http_URL = "http:" "//" host [ ":" port ] [ abs_path ]
+**
+** host = <A legal Internet host domain name
+** or IP address (in dotted-decimal form),
+** as defined by Section 2.1 of RFC 1123>
+** port = *DIGIT
+**
+** {absoluteURI, <scheme>, <host>, <port>, <path+params+query>}
+** when <scheme> = http | https
+** {scheme, <scheme>, <chars>}
+** wheb <scheme> is something else then http or https
+** {abs_path, <path>}
+**
+** <string> (unknown form)
+**
+*/
+static void http_parse_uri(PacketHttpURI* uri, const char* uri_ptr, int uri_len)
+{
+ if ((uri_len == 1) && (uri_ptr[0] == '*'))
+ uri->type = URI_STAR;
+ else if ((uri_len <= 1) || (uri_ptr[0] == '/')) {
+ uri->type = URI_ABS_PATH;
+ uri->s1_ptr = uri_ptr;
+ uri->s1_len = uri_len;
+ }
+ else if ((uri_len>=7) && (STRNCASECMP(uri_ptr, "http://", 7) == 0)) {
+ uri_len -= 7;
+ uri_ptr += 7;
+ uri->type = URI_HTTP;
+ http_parse_absoluteURI(uri, uri_ptr, uri_len);
+ }
+ else if ((uri_len>=8) && (STRNCASECMP(uri_ptr, "https://", 8) == 0)) {
+ uri_len -= 8;
+ uri_ptr += 8;
+ uri->type = URI_HTTPS;
+ http_parse_absoluteURI(uri, uri_ptr, uri_len);
+ }
+ else {
+ char* ptr;
+ if ((ptr = memchr(uri_ptr, ':', uri_len)) == NULL) {
+ uri->type = URI_STRING;
+ uri->s1_ptr = uri_ptr;
+ uri->s1_len = uri_len;
+ }
+ else {
+ int slen = ptr - uri_ptr;
+ uri->type = URI_SCHEME;
+ uri->s1_ptr = uri_ptr;
+ uri->s1_len = slen;
+ uri->s2_ptr = uri_ptr + (slen+1);
+ uri->s2_len = uri_len - (slen+1);
+ }
+ }
+}
+
+/*
+** parse http message:
+** http_eoh - end of headers
+** {http_header, Key, Value} - Key = atom() | string()
+** {http_request, Method,Url,Version}
+** {http_response, Version, Status, Message}
+** {http_error, Error-Line}
+*/
+int packet_parse_http(const char* buf, int len, int* statep,
+ PacketCallbacks* pcb, void* arg)
+{
+ const char* ptr = buf;
+ const char* p0;
+ int n = len;
+
+ /* remove trailing CRNL (accept NL as well) */
+ if ((n >= 2) && (buf[n-2] == '\r'))
+ n -= 2;
+ else if ((n >= 1) && (buf[n-1] == '\n'))
+ n -= 1;
+
+ if (*statep == 0) {
+ /* start-line = Request-Line | Status-Line */
+
+ if (n >= 5 && (strncmp(buf, "HTTP/", 5) == 0)) {
+ int major = 0;
+ int minor = 0;
+ int status = 0;
+ /* Status-Line = HTTP-Version SP
+ * Status-Code SP Reason-Phrase
+ * CRNL
+ * HTTP-Version = "HTTP" "/" 1*DIGIT "." 1*DIGIT
+ */
+ ptr += 5;
+ n -= 5;
+ p0 = ptr;
+ while (n && isdigit((int) *ptr)) {
+ major = 10*major + (*ptr - '0');
+ ptr++;
+ n--;
+ }
+ if (ptr==p0 || !n || (*ptr != '.'))
+ return -1;
+ ptr++;
+ n--;
+ p0 = ptr;
+ while (n && isdigit((int) *ptr)) {
+ minor = 10*minor + (*ptr - '0');
+ ptr++;
+ n--;
+ }
+ if (ptr==p0) return -1;
+ p0 = ptr;
+ while (n && SP(ptr)) {
+ ptr++; n--;
+ }
+ if (ptr==p0) return -1;
+
+ while (n && isdigit((int) *ptr)) {
+ status = 10*status + (*ptr - '0');
+ ptr++;
+ n--;
+ }
+ p0 = ptr;
+ while (n && SP(ptr)) {
+ ptr++; n--;
+ }
+ if (ptr==p0) return -1;
+
+ /* NOTE: the syntax allows empty reason phrases */
+ (*statep) = !0;
+
+ return pcb->http_response(arg, major, minor, status,
+ ptr, n);
+ }
+ else {
+ /* Request-Line = Method SP Request-URI SP HTTP-Version CRLF */
+ http_atom_t* meth;
+ const char* meth_ptr = buf;
+ int meth_len;
+ PacketHttpURI uri;
+ const char* uri_ptr;
+ int uri_len;
+ int major = 0;
+ int minor = 0;
+ unsigned long h = 0;
+
+ while (n && !is_tspecial((unsigned char)*ptr)) {
+ hash_update(h, (int)*ptr);
+ ptr++;
+ n--;
+ }
+ meth_len = ptr - meth_ptr;
+ if (n == 0 || meth_len == 0 || !SP(ptr)) return -1;
+
+ meth = http_hash_lookup(meth_ptr, meth_len, h,
+ http_meth_hash, HTTP_METH_HASH_SIZE);
+
+ while (n && SP(ptr)) {
+ ptr++; n--;
+ }
+ uri_ptr = ptr;
+ while (n && !SP(ptr)) {
+ ptr++; n--;
+ }
+ if ((uri_len = (ptr - uri_ptr)) == 0)
+ return -1;
+ while (n && SP(ptr)) {
+ ptr++; n--;
+ }
+ if (n == 0) {
+ (*statep) = !0;
+ http_parse_uri(&uri, uri_ptr, uri_len);
+ return pcb->http_request(arg, meth, meth_ptr, meth_len,
+ &uri, 0, 9);
+ }
+ if (n < 8)
+ return -1;
+ if (strncmp(ptr, "HTTP/", 5) != 0)
+ return -1;
+ ptr += 5;
+ n -= 5;
+
+ p0 = ptr;
+ while (n && isdigit((int) *ptr)) {
+ major = 10*major + (*ptr - '0');
+ ptr++;
+ n--;
+ }
+ if (ptr==p0 || !n || (*ptr != '.'))
+ return -1;
+ ptr++;
+ n--;
+ p0 = ptr;
+ while (n && isdigit((int) *ptr)) {
+ minor = 10*minor + (*ptr - '0');
+ ptr++;
+ n--;
+ }
+ if (ptr==p0) return -1;
+
+ (*statep) = !0;
+ http_parse_uri(&uri, uri_ptr, uri_len);
+ return pcb->http_request(arg, meth, meth_ptr, meth_len,
+ &uri, major, minor);
+ }
+ }
+ else {
+ int up = 1; /* make next char uppercase */
+ http_atom_t* name;
+ char name_buf[HTTP_MAX_NAME_LEN];
+ const char* name_ptr = name_buf;
+ int name_len;
+ unsigned long h;
+
+ if (n == 0) {
+ /* end of headers */
+ *statep = 0; /* reset state (for next request) */
+ return pcb->http_eoh(arg);
+ }
+ h = 0;
+ name_len = 0;
+ while (!is_tspecial((unsigned char)*ptr)) {
+ if (name_len < HTTP_MAX_NAME_LEN) {
+ int c = *ptr;
+ if (up) {
+ if (islower(c)) {
+ c = toupper(c);
+ }
+ up = 0;
+ }
+ else {
+ if (isupper(c))
+ c = tolower(c);
+ else if (c == '-')
+ up = 1;
+ }
+ name_buf[name_len] = c;
+ hash_update(h, c);
+ }
+ name_len++;
+ ptr++;
+ if (--n == 0) return -1;
+ }
+ while (n && SP(ptr)) { /* Skip white space before ':' */
+ ptr++; n--;
+ }
+ if (*ptr != ':') {
+ return -1;
+ }
+ if (name_len <= HTTP_MAX_NAME_LEN) {
+ name = http_hash_lookup(name_buf, name_len, h,
+ http_hdr_hash, HTTP_HDR_HASH_SIZE);
+ }
+ else {
+ /* Is it ok to return original name without case adjustments? */
+ name_ptr = buf;
+ name = NULL;
+ }
+ ptr++;
+ n--;
+ /* Skip white space after ':' */
+ while (n && SP(ptr)) {
+ ptr++; n--;
+ }
+ return pcb->http_header(arg, name, name_ptr, name_len,
+ ptr, n);
+ }
+ return -1;
+}
+
+int packet_parse_ssl(const char* buf, int len,
+ PacketCallbacks* pcb, void* arg)
+{
+ /* Check for ssl-v2 client hello */
+ if ((buf[0] & 0x80) && buf[2] == 1) {
+ unsigned major = (unsigned char) buf[3];
+ unsigned minor = (unsigned char) buf[4];
+ char prefix[4];
+ /* <<1:8,Length:24,Data/binary>> */
+ prefix[0] = 1;
+ put_int24(&prefix[1],len-3);
+ return pcb->ssl_tls(arg, 22, major, minor, buf+3, len-3, prefix, sizeof(prefix));
+ }
+ else {
+ /* ContentType (1 byte), ProtocolVersion (2 bytes), Length (2 bytes big-endian) */
+ unsigned type = (unsigned char) buf[0];
+ unsigned major = (unsigned char) buf[1];
+ unsigned minor = (unsigned char) buf[2];
+ return pcb->ssl_tls(arg, type, major, minor, buf+5, len-5, NULL, 0);
+ }
+}
+
diff --git a/erts/emulator/beam/packet_parser.h b/erts/emulator/beam/packet_parser.h
new file mode 100644
index 0000000000..1c3a9aa3da
--- /dev/null
+++ b/erts/emulator/beam/packet_parser.h
@@ -0,0 +1,181 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* A protocol decoder. Simple packet length extraction as well as packet
+ * body parsing with protocol specific callback interfaces (http and ssl).
+ */
+#ifndef __PACKET_PARSER_H__
+#define __PACKET_PARSER_H__
+
+#include <erl_driver.h>
+#include "sys.h"
+
+
+/* INET_LOPT_PACKET options */
+enum PacketParseType {
+ TCP_PB_RAW = 0,
+ TCP_PB_1 = 1,
+ TCP_PB_2 = 2,
+ TCP_PB_4 = 3,
+ TCP_PB_ASN1 = 4,
+ TCP_PB_RM = 5,
+ TCP_PB_CDR = 6,
+ TCP_PB_FCGI = 7,
+ TCP_PB_LINE_LF = 8,
+ TCP_PB_TPKT = 9,
+ TCP_PB_HTTP = 10,
+ TCP_PB_HTTPH = 11,
+ TCP_PB_SSL_TLS = 12,
+ TCP_PB_HTTP_BIN = 13,
+ TCP_PB_HTTPH_BIN = 14
+};
+
+typedef struct http_atom {
+ struct http_atom* next; /* next in bucket */
+ unsigned long h; /* stored hash value */
+ const char* name;
+ int len;
+ int index; /* index in table + bit-pos */
+ ErlDrvTermData atom; /* erlang atom rep */
+} http_atom_t;
+
+typedef struct {
+ enum {
+ URI_STAR, /* '*' */
+ URI_STRING, /* "string(s1)" */
+ URI_ABS_PATH,/* {abs_path, "path(s1)"} */
+ URI_SCHEME, /* {scheme, "scheme(s1)", "string(s2)"} */
+ URI_HTTP, /* {absoluteURI, http, "host(s1)", Port, "path(s2)"} */
+ URI_HTTPS /* {absoluteURI, https, ... */
+ } type;
+ const char* s1_ptr;
+ int s1_len;
+ const char* s2_ptr;
+ int s2_len;
+ int port; /* 0=undefined */
+}PacketHttpURI;
+
+typedef int HttpResponseMessageFn(void* arg, int major, int minor, int status,
+ const char* phrase, int phrase_len);
+typedef int HttpRequestMessageFn(void* arg, const http_atom_t* meth, const char* meth_ptr,
+ int meth_len, const PacketHttpURI*, int major, int minor);
+typedef int HttpEohMessageFn(void *arg);
+typedef int HttpHeaderMessageFn(void* arg, const http_atom_t* name, const char* name_ptr,
+ int name_len, const char* value_ptr, int value_len);
+typedef int HttpErrorMessageFn(void* arg, const char* buf, int len);
+typedef int SslTlsFn(void* arg, unsigned type, unsigned major, unsigned minor,
+ const char* data, int len, const char* prefix, int plen);
+
+typedef struct {
+ HttpResponseMessageFn* http_response;
+ HttpRequestMessageFn* http_request;
+ HttpEohMessageFn* http_eoh;
+ HttpHeaderMessageFn* http_header;
+ HttpErrorMessageFn* http_error;
+ SslTlsFn* ssl_tls;
+}PacketCallbacks;
+
+
+/* Called once at emulator start
+ */
+void packet_parser_init(void);
+
+/* Returns > 0 Total packet length.
+ * = 0 Length unknown, need more data.
+ * < 0 Error, invalid format.
+ */
+int packet_get_length(enum PacketParseType htype,
+ const char* ptr, unsigned n, /* Bytes read so far */
+ unsigned max_plen, /* Packet max length, 0=no limit */
+ unsigned trunc_len, /* Truncate (lines) if longer, 0=no limit */
+ int* statep); /* Internal protocol state */
+
+ERTS_GLB_INLINE
+void packet_get_body(enum PacketParseType htype,
+ const char** bufp, /* In: Packet header, Out: Packet body */
+ int* lenp); /* In: Packet length, Out: Body length */
+
+/* Returns 1 = Packet parsed and handled by callbacks.
+** 0 = No parsing support for this packet type
+** -1 = Error
+*/
+ERTS_GLB_INLINE
+int packet_parse(enum PacketParseType htype,
+ const char* buf, int len, /* Total packet */
+ int* statep, PacketCallbacks* pcb, void* arg);
+
+
+
+/* Internals for the inlines below: */
+
+#define FCGI_VERSION_1 1
+struct fcgi_head {
+ unsigned char version;
+ unsigned char type;
+ unsigned char requestIdB1;
+ unsigned char requestIdB0;
+ unsigned char contentLengthB1;
+ unsigned char contentLengthB0;
+ unsigned char paddingLength;
+ unsigned char reserved;
+ /* char data[] */
+ /* char padding[paddingLength] */
+};
+int packet_parse_http(const char*, int, int*, PacketCallbacks*, void*);
+int packet_parse_ssl(const char*, int, PacketCallbacks*, void*);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE
+void packet_get_body(enum PacketParseType htype, const char** bufp, int* lenp)
+{
+ switch (htype) {
+ case TCP_PB_1: *bufp += 1; *lenp -= 1; break;
+ case TCP_PB_2: *bufp += 2; *lenp -= 2; break;
+ case TCP_PB_4: *bufp += 4; *lenp -= 4; break;
+ case TCP_PB_FCGI:
+ *lenp -= ((struct fcgi_head*)*bufp)->paddingLength;
+ break;
+ default:
+ ;/* Return other packets "as is" */
+ }
+}
+
+ERTS_GLB_INLINE
+int packet_parse(enum PacketParseType htype, const char* buf, int len,
+ int* statep, PacketCallbacks* pcb, void* arg)
+{
+ switch (htype) {
+ case TCP_PB_HTTP:
+ case TCP_PB_HTTPH:
+ case TCP_PB_HTTP_BIN:
+ case TCP_PB_HTTPH_BIN:
+ if (packet_parse_http(buf, len, statep, pcb, arg) < 0)
+ pcb->http_error(arg, buf, len);
+ return 1;
+ case TCP_PB_SSL_TLS:
+ return packet_parse_ssl(buf, len, pcb, arg);
+ default:;
+ }
+ return 0;
+}
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* !__PACKET_PARSER_H__ */
+
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
new file mode 100644
index 0000000000..7ba097382a
--- /dev/null
+++ b/erts/emulator/beam/register.c
@@ -0,0 +1,655 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Manage registered processes.
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "hash.h"
+#include "atom.h"
+#include "register.h"
+
+static Hash process_reg;
+
+#define PREG_HASH_SIZE 10
+
+#define REG_HASH(term) ((HashValue) atom_val(term))
+
+static erts_smp_rwmtx_t regtab_rwmtx;
+
+#define reg_lock_init() erts_smp_rwmtx_init(&regtab_rwmtx, \
+ "reg_tab")
+#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(&regtab_rwmtx)
+#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(&regtab_rwmtx)
+#define reg_read_lock() erts_smp_rwmtx_rlock(&regtab_rwmtx)
+#define reg_write_lock() erts_smp_rwmtx_rwlock(&regtab_rwmtx)
+#define reg_read_unlock() erts_smp_rwmtx_runlock(&regtab_rwmtx)
+#define reg_write_unlock() erts_smp_rwmtx_rwunlock(&regtab_rwmtx)
+
+#ifdef ERTS_SMP
+static ERTS_INLINE void
+reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks)
+{
+ if (*c_p_locks) {
+ ASSERT(c_p);
+ ASSERT(c_p_locks);
+ ASSERT(*c_p_locks);
+
+ if (reg_try_read_lock() != EBUSY) {
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_proc_lc_might_unlock(c_p, *c_p_locks);
+#endif
+ return;
+ }
+
+ /* Release process locks in order to avoid deadlock */
+ erts_smp_proc_unlock(c_p, *c_p_locks);
+ *c_p_locks = 0;
+ }
+
+ reg_read_lock();
+}
+
+static ERTS_INLINE void
+reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks)
+{
+ if (*c_p_locks) {
+ ASSERT(c_p);
+ ASSERT(c_p_locks);
+ ASSERT(*c_p_locks);
+
+ if (reg_try_write_lock() != EBUSY) {
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_proc_lc_might_unlock(c_p, *c_p_locks);
+#endif
+ return;
+ }
+
+ /* Release process locks in order to avoid deadlock */
+ erts_smp_proc_unlock(c_p, *c_p_locks);
+ *c_p_locks = 0;
+ }
+
+ reg_write_lock();
+}
+
+static ERTS_INLINE int
+is_proc_alive(Process *p)
+{
+ int res;
+ erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock(pixlck);
+ res = !p->is_exiting;
+ erts_pix_unlock(pixlck);
+ return res;
+}
+
+#endif
+
+void register_info(int to, void *to_arg)
+{
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ reg_read_lock();
+ hash_info(to, to_arg, &process_reg);
+ if (lock)
+ reg_read_unlock();
+}
+
+static HashValue reg_hash(RegProc* obj)
+{
+ return REG_HASH(obj->name);
+}
+
+static int reg_cmp(RegProc *tmpl, RegProc *obj) {
+ return tmpl->name != obj->name;
+}
+
+static RegProc* reg_alloc(RegProc *tmpl)
+{
+ RegProc* obj = (RegProc*) erts_alloc(ERTS_ALC_T_REG_PROC, sizeof(RegProc));
+ if (!obj) {
+ erl_exit(1, "Can't allocate %d bytes of memory\n", sizeof(RegProc));
+ }
+ obj->name = tmpl->name;
+ obj->p = tmpl->p;
+ obj->pt = tmpl->pt;
+ return obj;
+}
+
+static void reg_free(RegProc *obj)
+{
+ erts_free(ERTS_ALC_T_REG_PROC, (void*) obj);
+}
+
+void init_register_table(void)
+{
+ HashFunctions f;
+
+ reg_lock_init();
+
+ f.hash = (H_FUN) reg_hash;
+ f.cmp = (HCMP_FUN) reg_cmp;
+ f.alloc = (HALLOC_FUN) reg_alloc;
+ f.free = (HFREE_FUN) reg_free;
+
+ hash_init(ERTS_ALC_T_REG_TABLE, &process_reg, "process_reg",
+ PREG_HASH_SIZE, f);
+}
+
+/*
+ * Register a process or port (can't be registered twice).
+ * Returns 0 if name, process or port is already registered.
+ *
+ * When smp support is enabled:
+ * * Assumes that main lock is locked (and only main lock)
+ * on c_p.
+ *
+ */
+int erts_register_name(Process *c_p, Eterm name, Eterm id)
+{
+ int res = 0;
+ Process *proc = NULL;
+ Port *port = NULL;
+ RegProc r, *rp;
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+
+ if (is_not_atom(name) || name == am_undefined)
+ return res;
+
+ if (c_p->id == id) /* A very common case I think... */
+ proc = c_p;
+ else {
+ if (is_not_internal_pid(id) && is_not_internal_port(id))
+ return res;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ if (is_internal_port(id)) {
+ port = erts_id2port(id, NULL, 0);
+ if (!port)
+ goto done;
+ }
+ }
+
+#ifdef ERTS_SMP
+ {
+ ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0;
+ reg_safe_write_lock(proc, &proc_locks);
+
+ if (proc && !proc_locks)
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+#endif
+
+ if (is_internal_pid(id)) {
+ if (!proc)
+ proc = erts_pid2proc(NULL, 0, id, ERTS_PROC_LOCK_MAIN);
+ r.p = proc;
+ if (!proc)
+ goto done;
+ if (proc->reg)
+ goto done;
+ r.pt = NULL;
+ }
+ else {
+ ASSERT(!INVALID_PORT(port, id));
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+ r.pt = port;
+ if (r.pt->reg)
+ goto done;
+ r.p = NULL;
+ }
+
+ r.name = name;
+
+ rp = (RegProc*) hash_put(&process_reg, (void*) &r);
+ if (proc && rp->p == proc) {
+ if (IS_TRACED_FL(proc, F_TRACE_PROCS)) {
+ trace_proc(c_p, proc, am_register, name);
+ }
+ proc->reg = rp;
+ }
+ else if (port && rp->pt == port) {
+ if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
+ trace_port(port, am_register, name);
+ }
+ port->reg = rp;
+ }
+
+ if ((rp->p && rp->p->id == id) || (rp->pt && rp->pt->id == id)) {
+ res = 1;
+ }
+
+ done:
+ reg_write_unlock();
+ if (port)
+ erts_smp_port_unlock(port);
+ if (c_p != proc) {
+ if (proc)
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ return res;
+}
+
+/*
+ *
+ * When smp support is enabled:
+ * * Assumes that main lock is locked (and only main lock)
+ * on c_p.
+ *
+ * * am_undefined is returned if c_p became exiting.
+ */
+
+Eterm
+erts_whereis_name_to_id(Process *c_p, Eterm name)
+{
+ Eterm res = am_undefined;
+ HashValue hval;
+ int ix;
+ HashBucket* b;
+#ifdef ERTS_SMP
+ ErtsProcLocks c_p_locks = c_p ? ERTS_PROC_LOCK_MAIN : 0;
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+ reg_safe_read_lock(c_p, &c_p_locks);
+ if (c_p && !c_p_locks)
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+#endif
+
+ hval = REG_HASH(name);
+ ix = hval % process_reg.size;
+ b = process_reg.bucket[ix];
+
+ /*
+ * Note: We have inlined the code from hash.c for speed.
+ */
+
+ while (b) {
+ RegProc* rp = (RegProc *) b;
+ if (rp->name == name) {
+ /*
+ * SMP NOTE: No need to lock registered entity since it cannot
+ * be removed without acquiring write reg lock and id on entity
+ * is read only.
+ */
+ if (rp->p)
+ res = rp->p->id;
+ else if (rp->pt)
+ res = rp->pt->id;
+ break;
+ }
+ b = b->next;
+ }
+
+ reg_read_unlock();
+
+ ASSERT(is_internal_pid(res) || is_internal_port(res) || res==am_undefined);
+
+ return res;
+}
+
+
+void
+erts_whereis_name(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ Eterm name,
+ Process** proc,
+ ErtsProcLocks need_locks,
+ int flags,
+ Port** port)
+{
+ RegProc* rp = NULL;
+ HashValue hval;
+ int ix;
+ HashBucket* b;
+#ifdef ERTS_SMP
+ ErtsProcLocks current_c_p_locks;
+ Port *pending_port = NULL;
+
+ if (!c_p)
+ c_p_locks = 0;
+ current_c_p_locks = c_p_locks;
+
+ restart:
+
+ reg_safe_read_lock(c_p, &current_c_p_locks);
+
+ /* Locked locks:
+ * - port lock on pending_port if pending_port != NULL
+ * - read reg lock
+ * - current_c_p_locks (either c_p_locks or 0) on c_p
+ */
+#endif
+
+ hval = REG_HASH(name);
+ ix = hval % process_reg.size;
+ b = process_reg.bucket[ix];
+
+ /*
+ * Note: We have inlined the code from hash.c for speed.
+ */
+
+ while (b) {
+ if (((RegProc *) b)->name == name) {
+ rp = (RegProc *) b;
+ break;
+ }
+ b = b->next;
+ }
+
+ if (proc) {
+ if (!rp)
+ *proc = NULL;
+ else {
+#ifdef ERTS_SMP
+ if (!rp->p)
+ *proc = NULL;
+ else {
+ if (need_locks) {
+ erts_proc_safelock(c_p,
+ current_c_p_locks,
+ c_p_locks,
+ rp->p,
+ 0,
+ need_locks);
+ current_c_p_locks = c_p_locks;
+ }
+ if ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p))
+ *proc = rp->p;
+ else {
+ if (need_locks)
+ erts_smp_proc_unlock(rp->p, need_locks);
+ *proc = NULL;
+ }
+ if (*proc && (flags & ERTS_P2P_FLG_SMP_INC_REFC))
+ erts_smp_proc_inc_refc(rp->p);
+ }
+#else
+ if (rp->p
+ && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
+ || rp->p->status != P_EXITING))
+ *proc = rp->p;
+ else
+ *proc = NULL;
+#endif
+ }
+ }
+
+ if (port) {
+ if (!rp || !rp->pt)
+ *port = NULL;
+ else {
+#ifdef ERTS_SMP
+ if (pending_port == rp->pt)
+ pending_port = NULL;
+ else {
+ if (pending_port) {
+ /* Ahh! Registered port changed while reg lock
+ was unlocked... */
+ erts_smp_port_unlock(pending_port);
+ pending_port = NULL;
+ }
+
+ if (erts_smp_port_trylock(rp->pt) == EBUSY) {
+ Eterm id = rp->pt->id; /* id read only... */
+ /* Unlock all locks, acquire port lock, and restart... */
+ if (current_c_p_locks) {
+ erts_smp_proc_unlock(c_p, current_c_p_locks);
+ current_c_p_locks = 0;
+ }
+ reg_read_unlock();
+ pending_port = erts_id2port(id, NULL, 0);
+ goto restart;
+ }
+ }
+#endif
+ *port = rp->pt;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(*port));
+ }
+ }
+
+#ifdef ERTS_SMP
+ if (c_p && !current_c_p_locks)
+ erts_smp_proc_lock(c_p, c_p_locks);
+ if (pending_port)
+ erts_smp_port_unlock(pending_port);
+#endif
+
+ reg_read_unlock();
+}
+
+Process *
+erts_whereis_process(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ Eterm name,
+ ErtsProcLocks need_locks,
+ int flags)
+{
+ Process *proc;
+ erts_whereis_name(c_p, c_p_locks, name, &proc, need_locks, flags, NULL);
+ return proc;
+}
+
+
+/*
+ * Unregister a name
+ * Return 0 if not registered
+ * Otherwise returns 1
+ *
+ */
+int erts_unregister_name(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ Port *c_prt,
+ Eterm name)
+{
+ int res = 0;
+ RegProc r, *rp;
+ Port *port = c_prt;
+#ifdef ERTS_SMP
+ ErtsProcLocks current_c_p_locks;
+
+ /*
+ * SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name',
+ * we are *not* allowed to temporarily release the lock
+ * on c_prt.
+ */
+
+ if (!c_p)
+ c_p_locks = 0;
+ current_c_p_locks = c_p_locks;
+
+ restart:
+
+ reg_safe_write_lock(c_p, &current_c_p_locks);
+#endif
+
+ r.name = name;
+ if (is_non_value(name)) {
+ /* Unregister current process name */
+ ASSERT(c_p);
+ if (c_p->reg)
+ r.name = c_p->reg->name;
+ else {
+ /* Name got unregistered while main lock was released */
+ res = 0;
+ goto done;
+ }
+ }
+
+ if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) {
+ if (rp->pt) {
+#ifdef ERTS_SMP
+ if (port != rp->pt) {
+ if (port) {
+ ERTS_SMP_LC_ASSERT(port != c_prt);
+ erts_smp_port_unlock(port);
+ port = NULL;
+ }
+
+ if (erts_smp_port_trylock(rp->pt) == EBUSY) {
+ Eterm id = rp->pt->id; /* id read only... */
+ /* Unlock all locks, acquire port lock, and restart... */
+ if (current_c_p_locks) {
+ erts_smp_proc_unlock(c_p, current_c_p_locks);
+ current_c_p_locks = 0;
+ }
+ reg_write_unlock();
+ port = erts_id2port(id, NULL, 0);
+ goto restart;
+ }
+ port = rp->pt;
+ }
+#endif
+ ERTS_SMP_LC_ASSERT(rp->pt == port && erts_lc_is_port_locked(port));
+ rp->pt->reg = NULL;
+
+ if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
+ trace_port(port, am_unregister, r.name);
+ }
+
+ } else if (rp->p) {
+ Process* p = rp->p;
+#ifdef ERTS_SMP
+ erts_proc_safelock(c_p,
+ current_c_p_locks,
+ c_p_locks,
+ rp->p,
+ 0,
+ ERTS_PROC_LOCK_MAIN);
+ current_c_p_locks = c_p_locks;
+#endif
+ p->reg = NULL;
+#ifdef ERTS_SMP
+ if (rp->p != c_p)
+ erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN);
+#endif
+ if (IS_TRACED_FL(p, F_TRACE_PROCS)) {
+ trace_proc(c_p, p, am_unregister, r.name);
+ }
+ }
+ hash_erase(&process_reg, (void*) &r);
+ res = 1;
+ }
+
+ done:
+
+ reg_write_unlock();
+ if (c_prt != port) {
+ if (port)
+ erts_smp_port_unlock(port);
+ if (c_prt)
+ erts_smp_port_lock(c_prt);
+ }
+#ifdef ERTS_SMP
+ if (c_p && !current_c_p_locks)
+ erts_smp_proc_lock(c_p, c_p_locks);
+#endif
+ return res;
+}
+
+int process_reg_size(void)
+{
+ int size;
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ reg_read_lock();
+ size = process_reg.size;
+ if (lock)
+ reg_read_unlock();
+ return size;
+}
+
+int process_reg_sz(void)
+{
+ int sz;
+ int lock = !ERTS_IS_CRASH_DUMPING;
+ if (lock)
+ reg_read_lock();
+ sz = hash_table_sz(&process_reg);
+ if (lock)
+ reg_read_unlock();
+ return sz;
+}
+
+/**********************************************************************/
+
+#include "bif.h"
+
+/* return a list of the registered processes */
+
+BIF_RETTYPE registered_0(BIF_ALIST_0)
+{
+ int i;
+ Eterm res;
+ Uint need;
+ Eterm* hp;
+ HashBucket **bucket;
+#ifdef ERTS_SMP
+ ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P);
+ reg_safe_read_lock(BIF_P, &proc_locks);
+ if (!proc_locks)
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+#endif
+
+ bucket = process_reg.bucket;
+
+ /* work out how much heap we need & maybe garb, by scanning through
+ the registered process table */
+ need = 0;
+ for (i = 0; i < process_reg.size; i++) {
+ HashBucket *b = bucket[i];
+ while (b != NULL) {
+ need += 2;
+ b = b->next;
+ }
+ }
+
+ if (need == 0) {
+ reg_read_unlock();
+ BIF_RET(NIL);
+ }
+
+ hp = HAlloc(BIF_P, need);
+
+ /* scan through again and make the list */
+ res = NIL;
+
+ for (i = 0; i < process_reg.size; i++) {
+ HashBucket *b = bucket[i];
+ while (b != NULL) {
+ RegProc *reg = (RegProc *) b;
+
+ res = CONS(hp, reg->name, res);
+ hp += 2;
+ b = b->next;
+ }
+ }
+
+ reg_read_unlock();
+
+ BIF_RET(res);
+}
diff --git a/erts/emulator/beam/register.h b/erts/emulator/beam/register.h
new file mode 100644
index 0000000000..97bab3ab71
--- /dev/null
+++ b/erts/emulator/beam/register.h
@@ -0,0 +1,66 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** Registered processes
+*/
+
+#ifndef __REGPROC_H__
+#define __REGPROC_H__
+
+#ifndef __SYS_H__
+#include "sys.h"
+#endif
+
+#ifndef __HASH_H__
+#include "hash.h"
+#endif
+
+#ifndef __PROCESS_H__
+#include "erl_process.h"
+#endif
+
+struct port;
+
+typedef struct reg_proc
+{
+ HashBucket bucket; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
+ Process *p; /* The process registerd (only one of this and
+ 'pt' is non-NULL */
+ struct port *pt; /* The port registered */
+ Eterm name; /* Atom name */
+} RegProc;
+
+int process_reg_size(void);
+int process_reg_sz(void);
+void init_register_table(void);
+void register_info(int, void *);
+int erts_register_name(Process *, Eterm, Eterm);
+Eterm erts_whereis_name_to_id(Process *, Eterm);
+void erts_whereis_name(Process *, ErtsProcLocks,
+ Eterm, Process**, ErtsProcLocks, int,
+ struct port**);
+Process *erts_whereis_process(Process *,
+ ErtsProcLocks,
+ Eterm,
+ ErtsProcLocks,
+ int);
+int erts_unregister_name(Process *, ErtsProcLocks, struct port *, Eterm);
+
+#endif
diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c
new file mode 100644
index 0000000000..21d6ce9304
--- /dev/null
+++ b/erts/emulator/beam/safe_hash.c
@@ -0,0 +1,276 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** General thread safe hash table. Simular interface as hash.h
+**
+** Author: Sverker Eriksson
+*/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "safe_hash.h"
+
+/* Currently only used by erl_check_io on Windows */
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+
+
+static ERTS_INLINE void set_size(SafeHash* h, int size)
+{
+ ASSERT(size % SAFE_HASH_LOCK_CNT == 0);
+ /* This important property allows us to lock the right mutex
+ ** without reading the table size (that can change without the lock) */
+
+ h->size_mask = size - 1;
+ ASSERT((size & h->size_mask) == 0);
+ /* An even power of 2 is just for fast bit masking */
+
+ h->grow_limit = size; /* grow table at 100% load */
+}
+
+static ERTS_INLINE int align_up_pow2(int val)
+{
+ int x = val & (val-1);
+ if (x==0) return val ? val : 1;
+ do {
+ val = x;
+ x &= x - 1;
+ }while (x);
+ return val << 1;
+}
+
+/*
+** Rehash all objects
+*/
+static void rehash(SafeHash* h, int grow_limit)
+{
+ if (erts_smp_atomic_xchg(&h->is_rehashing, 1) != 0) {
+ return; /* already in progress */
+ }
+ if (h->grow_limit == grow_limit) {
+ int i, size, bytes;
+ SafeHashBucket** new_tab;
+ SafeHashBucket** old_tab = h->tab;
+ int old_size = h->size_mask + 1;
+
+ size = old_size * 2; /* double table size */
+ bytes = size * sizeof(SafeHashBucket*);
+ new_tab = (SafeHashBucket **) erts_alloc(h->type, bytes);
+ sys_memzero(new_tab, bytes);
+
+ for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */
+ erts_smp_mtx_lock(&h->lock_vec[i].mtx);
+ }
+
+ h->tab = new_tab;
+ set_size(h, size);
+
+ for (i = 0; i < old_size; i++) {
+ SafeHashBucket* b = old_tab[i];
+ while (b != NULL) {
+ SafeHashBucket* b_next = b->next;
+ int ix = b->hvalue & h->size_mask;
+ b->next = new_tab[ix];
+ new_tab[ix] = b;
+ b = b_next;
+ }
+ }
+
+ for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
+ erts_smp_mtx_unlock(&h->lock_vec[i].mtx);
+ }
+ erts_free(h->type, (void *) old_tab);
+ }
+ /*else already done */
+ erts_smp_atomic_set(&h->is_rehashing, 0);
+}
+
+
+/*
+** Get info about hash
+*/
+void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h)
+{
+ int size;
+ int i, lock_ix;
+ int max_depth = 0;
+ int objects = 0;
+
+ for (lock_ix=0; lock_ix<SAFE_HASH_LOCK_CNT; lock_ix++) {
+ erts_smp_mtx_lock(&h->lock_vec[lock_ix].mtx);
+ size = h->size_mask + 1;
+ for (i = lock_ix; i < size; i += SAFE_HASH_LOCK_CNT) {
+ int depth = 0;
+ SafeHashBucket* b = h->tab[i];
+ while (b != NULL) {
+ objects++;
+ depth++;
+ b = b->next;
+ }
+ if (depth > max_depth)
+ max_depth = depth;
+ }
+ erts_smp_mtx_unlock(&h->lock_vec[lock_ix].mtx);
+ }
+
+ hi->name = h->name;
+ hi->size = size;
+ hi->objs = objects;
+ hi->depth = max_depth;
+}
+
+/*
+** Returns size of table in bytes. Stored objects not included.
+**/
+int safe_hash_table_sz(SafeHash *h)
+{
+ int i, size;
+ for(i=0; h->name[i]; i++);
+ i++;
+ erts_smp_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */
+ size = h->size_mask + 1;
+ erts_smp_mtx_unlock(&h->lock_vec[0].mtx);
+ return sizeof(SafeHash) + size*sizeof(SafeHashBucket*) + i;
+}
+
+/*
+** Init a pre allocated or static hash structure
+** and allocate buckets. NOT SAFE
+*/
+SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size, SafeHashFunctions fun)
+{
+ int i, bytes;
+
+ size = align_up_pow2(size);
+ bytes = size * sizeof(SafeHashBucket*);
+ h->type = type;
+ h->tab = (SafeHashBucket**) erts_alloc(h->type, bytes);
+ sys_memzero(h->tab, bytes);
+ h->name = name;
+ h->fun = fun;
+ set_size(h,size);
+ erts_smp_atomic_init(&h->is_rehashing, 0);
+ erts_smp_atomic_init(&h->nitems, 0);
+ for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
+ erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash");
+ }
+ return h;
+}
+
+
+/*
+** Find an object in the hash table
+*/
+void* safe_hash_get(SafeHash* h, void* tmpl)
+{
+ SafeHashValue hval = h->fun.hash(tmpl);
+ SafeHashBucket* b;
+ erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
+ erts_smp_mtx_lock(lock);
+ b = h->tab[hval & h->size_mask];
+
+ while(b != NULL) {
+ if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0))
+ break;
+ b = b->next;
+ }
+ erts_smp_mtx_unlock(lock);
+ return (void*) b;
+}
+
+/*
+** Find or insert an object in the hash table
+*/
+void* safe_hash_put(SafeHash* h, void* tmpl)
+{
+ int grow_limit;
+ SafeHashValue hval = h->fun.hash(tmpl);
+ SafeHashBucket* b;
+ SafeHashBucket** head;
+ erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
+ erts_smp_mtx_lock(lock);
+ head = &h->tab[hval & h->size_mask];
+ b = *head;
+ while(b != NULL) {
+ if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
+ erts_smp_mtx_unlock(lock);
+ return b;
+ }
+ b = b->next;
+ }
+
+ b = (SafeHashBucket*) h->fun.alloc(tmpl);
+ b->hvalue = hval;
+ b->next = *head;
+ *head = b;
+ grow_limit = h->grow_limit;
+ erts_smp_mtx_unlock(lock);
+ if (erts_smp_atomic_inctest(&h->nitems) > grow_limit) {
+ rehash(h, grow_limit);
+ }
+ return (void*) b;
+}
+
+/*
+** Erase hash entry return template if erased
+** return 0 if not erased
+*/
+void* safe_hash_erase(SafeHash* h, void* tmpl)
+{
+ SafeHashValue hval = h->fun.hash(tmpl);
+ SafeHashBucket* b;
+ SafeHashBucket** prevp;
+ erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
+ erts_smp_mtx_lock(lock);
+ prevp = &h->tab[hval & h->size_mask];
+ b = *prevp;
+ while(b != NULL) {
+ if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
+ *prevp = b->next;
+ erts_smp_mtx_unlock(lock);
+ erts_smp_atomic_dec(&h->nitems);
+ h->fun.free((void*)b);
+ return tmpl;
+ }
+ prevp = &b->next;
+ b = b->next;
+ }
+ erts_smp_mtx_unlock(lock);
+ return NULL;
+}
+
+/*
+** Call 'func(obj,func_arg2)' for all objects in table. NOT SAFE!!!
+*/
+void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *), void *func_arg2)
+{
+ int i;
+
+ for (i = 0; i <= h->size_mask; i++) {
+ SafeHashBucket* b = h->tab[i];
+ while (b != NULL) {
+ (*func)((void *) b, func_arg2);
+ b = b->next;
+ }
+ }
+}
+
+#endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */
+
diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h
new file mode 100644
index 0000000000..c691126ef9
--- /dev/null
+++ b/erts/emulator/beam/safe_hash.h
@@ -0,0 +1,104 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2008-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+** General thread safe hash table. Simular interface as hash.h
+**
+** Author: Sverker Eriksson
+*/
+#ifndef __SAFE_HASH_H__
+#define __SAFE_HASH_H__
+
+
+#ifndef __SYS_H__
+#include "sys.h"
+#endif
+
+#include "erl_alloc.h"
+
+
+typedef unsigned long SafeHashValue;
+
+typedef int (*SHCMP_FUN)(void*, void*);
+typedef SafeHashValue (*SH_FUN)(void*);
+typedef void* (*SHALLOC_FUN)(void*);
+typedef void (*SHFREE_FUN)(void*);
+
+/*
+** This bucket must be placed in top of
+** every object that uses hashing!!!
+** (Object*) == (Object*) &bucket
+*/
+typedef struct safe_hashbucket
+{
+ struct safe_hashbucket* next; /* Next bucket */
+ SafeHashValue hvalue; /* Store hash value for get, rehash */
+} SafeHashBucket;
+
+typedef struct safe_hashfunctions
+{
+ SH_FUN hash;
+ SHCMP_FUN cmp;
+ SHALLOC_FUN alloc;
+ SHFREE_FUN free;
+} SafeHashFunctions;
+
+typedef struct {
+ char *name;
+ int size;
+ int used;
+ int objs;
+ int depth;
+} SafeHashInfo;
+
+#define SAFE_HASH_LOCK_CNT 16
+typedef struct
+{
+ SafeHashFunctions fun; /* (C) Function block */
+ ErtsAlcType_t type; /* (C) */
+ char* name; /* (C) Table name (static, for debugging) */
+ int size_mask; /* (RW) Number of slots - 1 */
+ SafeHashBucket** tab; /* (RW) Vector of bucket pointers (objects) */
+ int grow_limit; /* (RW) Threshold for growing table */
+ erts_smp_atomic_t nitems; /* (A) Number of items in table */
+ erts_smp_atomic_t is_rehashing; /* (A) Table rehashing in progress */
+
+ union {
+ erts_smp_mtx_t mtx;
+ byte __cache_line__[64];
+ }lock_vec[SAFE_HASH_LOCK_CNT];
+
+ /* C: Constants initialized once */
+ /* RW: One lock (or is_rehashing) to read and _all_ locks to write */
+ /* A: Lockless atomics */
+} SafeHash;
+
+SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, int, SafeHashFunctions);
+
+void safe_hash_get_info(SafeHashInfo*, SafeHash*);
+int safe_hash_table_sz(SafeHash *);
+
+void* safe_hash_get(SafeHash*, void*);
+void* safe_hash_put(SafeHash*, void*);
+void* safe_hash_erase(SafeHash*, void*);
+
+void safe_hash_for_each(SafeHash*, void (*func)(void *, void *), void *);
+
+#endif /* __SAFE_HASH_H__ */
+
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
new file mode 100644
index 0000000000..71cb6a36cc
--- /dev/null
+++ b/erts/emulator/beam/sys.h
@@ -0,0 +1,1257 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef __SYS_H__
+#define __SYS_H__
+
+#if defined(VALGRIND) && !defined(NO_FPE_SIGNALS)
+# define NO_FPE_SIGNALS
+#endif
+
+/* Never use elib-malloc when purify-memory-tracing */
+#if defined(PURIFY)
+#undef ENABLE_ELIB_MALLOC
+#undef ELIB_HEAP_SBRK
+#undef ELIB_ALLOC_IS_CLIB
+#endif
+
+
+/* xxxP __VXWORKS__ */
+#ifdef VXWORKS
+#include <vxWorks.h>
+#endif
+
+#ifdef DISABLE_CHILD_WAITER_THREAD
+#undef ENABLE_CHILD_WAITER_THREAD
+#endif
+
+#if defined(ERTS_SMP) && !defined(DISABLE_CHILD_WAITER_THREAD)
+#undef ENABLE_CHILD_WAITER_THREAD
+#define ENABLE_CHILD_WAITER_THREAD 1
+#endif
+
+/* The ERTS_TIMER_TREAD #define must be visible to the
+ erl_${OS}_sys.h #include files: it controls whether
+ certain optional facilities should be defined or not. */
+#if defined(ERTS_SMP) && 0
+#define ERTS_TIMER_THREAD
+#endif
+
+#if defined (__WIN32__)
+# include "erl_win_sys.h"
+#elif defined (VXWORKS)
+# include "erl_vxworks_sys.h"
+#elif defined (_OSE_)
+# include "erl_ose_sys.h"
+#else
+# include "erl_unix_sys.h"
+#ifndef UNIX
+# define UNIX 1
+#endif
+#endif
+
+#include "erl_misc_utils.h"
+
+/*
+ * To allow building of Universal Binaries for Mac OS X,
+ * we must not depend on the endian detected by the configure script.
+ */
+#if defined(__APPLE__)
+# if defined(__BIG_ENDIAN__) && !defined(WORDS_BIGENDIAN)
+# define WORDS_BIGENDIAN 1
+# elif !defined(__BIG_ENDIAN__) && defined(WORDS_BIGENDIAN)
+# undef WORDS_BIGENDIAN
+# endif
+#endif
+
+/*
+ * Make sure we have a type for FD's (used by erl_check_io)
+ */
+
+#ifndef ERTS_SYS_FD_TYPE
+typedef int ErtsSysFdType;
+#else
+typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
+#endif
+
+#ifdef ERTS_INLINE
+# ifndef ERTS_CAN_INLINE
+# define ERTS_CAN_INLINE 1
+# endif
+#else
+# if defined(__GNUC__)
+# define ERTS_CAN_INLINE 1
+# define ERTS_INLINE __inline__
+# elif defined(__WIN32__)
+# define ERTS_CAN_INLINE 1
+# define ERTS_INLINE __inline
+# else
+# define ERTS_CAN_INLINE 0
+# define ERTS_INLINE
+# endif
+#endif
+
+#ifdef __GNUC__
+# if __GNUC__ < 3 && (__GNUC__ != 2 || __GNUC_MINOR__ < 96)
+# define ERTS_LIKELY(BOOL) (BOOL)
+# define ERTS_UNLIKELY(BOOL) (BOOL)
+# else
+# define ERTS_LIKELY(BOOL) __builtin_expect((BOOL), !0)
+# define ERTS_UNLIKELY(BOOL) __builtin_expect((BOOL), 0)
+# endif
+#else
+# define ERTS_LIKELY(BOOL) (BOOL)
+# define ERTS_UNLIKELY(BOOL) (BOOL)
+#endif
+
+#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
+# undef ERTS_CAN_INLINE
+# define ERTS_CAN_INLINE 0
+# undef ERTS_INLINE
+# define ERTS_INLINE
+#endif
+
+#if ERTS_CAN_INLINE
+#define ERTS_GLB_INLINE static ERTS_INLINE
+#else
+#define ERTS_GLB_INLINE
+#endif
+
+#if ERTS_CAN_INLINE || defined(ERTS_DO_INCL_GLB_INLINE_FUNC_DEF)
+# define ERTS_GLB_INLINE_INCL_FUNC_DEF 1
+#else
+# define ERTS_GLB_INLINE_INCL_FUNC_DEF 0
+#endif
+
+#ifndef ERTS_EXIT_AFTER_DUMP
+# define ERTS_EXIT_AFTER_DUMP exit
+#endif
+
+#ifdef DEBUG
+# define ASSERT(e) \
+ if (e) { \
+ ; \
+ } else { \
+ erl_assert_error(#e, __FILE__, __LINE__); \
+ }
+# define ASSERT_EXPR(e) \
+ ((void) ((e) ? 1 : (erl_assert_error(#e, __FILE__, __LINE__), 0)))
+void erl_assert_error(char* expr, char* file, int line);
+#else
+# define ASSERT(e)
+# define ASSERT_EXPR(e) ((void) 1)
+#endif
+
+/*
+ * Microsoft C/C++: We certainly want to use stdarg.h and prototypes.
+ * But MSC doesn't define __STDC__, unless we compile with the -Za
+ * flag (strict ANSI C, no Microsoft extension). Compiling with -Za
+ * doesn't work: some Microsoft headers fail to compile...
+ *
+ * Solution: Test if __STDC__ or _MSC_VER is defined.
+ *
+ * Note: Simply defining __STDC__ doesn't work, as some Microsoft
+ * headers will fail to compile!
+ */
+
+#include <stdarg.h>
+
+#if defined(__STDC__) || defined(_MSC_VER)
+# define EXTERN_FUNCTION(t, f, x) extern t f x
+# define FUNCTION(t, f, x) t f x
+# define _DOTS_ ...
+# define _VOID_ void
+#elif defined(__cplusplus)
+# define EXTERN_FUNCTION(f, x) extern "C" { f x }
+# define FUNCTION(t, f, x) t f x
+# define _DOTS_ ...
+# define _VOID_ void
+#else
+# define EXTERN_FUNCTION(t, f, x) extern t f (/*x*/)
+# define FUNCTION(t, f, x) t f (/*x*/)
+# define _DOTS_
+# define _VOID_
+#endif
+
+/* This isn't sys-dependent, but putting it here benefits sys.c and drivers
+ - allow use of 'const' regardless of compiler */
+
+#if !defined(__STDC__) && !defined(_MSC_VER)
+# define const
+#endif
+
+#ifdef VXWORKS
+/* Replace VxWorks' printf with a real one that does fprintf(stdout, ...) */
+EXTERN_FUNCTION(int, real_printf, (const char *fmt, ...));
+# define printf real_printf
+#endif
+
+/* In VC++, noreturn is a declspec that has to be before the types,
+ * but in GNUC it is an att ribute to be placed between return type
+ * and function name, hence __decl_noreturn <types> __noreturn <function name>
+ */
+#if __GNUC__
+# define __decl_noreturn
+# define __noreturn __attribute__((noreturn))
+# undef __deprecated
+# if __GNUC__ >= 3
+# define __deprecated __attribute__((deprecated))
+# else
+# define __deprecated
+# endif
+#else
+# if defined(__WIN32__) && defined(_MSC_VER)
+# define __noreturn
+# define __decl_noreturn __declspec(noreturn)
+# else
+# define __noreturn
+# define __decl_noreturn
+# endif
+# define __deprecated
+#endif
+
+/*
+** Data types:
+**
+** Eterm: A tagged erlang term (possibly 64 bits)
+** UInt: An unsigned integer exactly as large as an Eterm.
+** SInt: A signed integer exactly as large as an eterm and therefor large
+** enough to hold the return value of the signed_val() macro.
+** Uint32: An unsigned integer of 32 bits exactly
+** Sint32: A signed integer of 32 bits exactly
+** Uint16: An unsigned integer of 16 bits exactly
+** Sint16: A signed integer of 16 bits exactly.
+*/
+
+#if SIZEOF_VOID_P == 8
+#undef ARCH_32
+#define ARCH_64
+#elif SIZEOF_VOID_P == 4
+#define ARCH_32
+#undef ARCH_64
+#else
+#error Neither 32 nor 64 bit architecture
+#endif
+
+#if SIZEOF_VOID_P != SIZEOF_SIZE_T
+#error sizeof(void*) != sizeof(size_t)
+#endif
+
+#if SIZEOF_VOID_P == SIZEOF_LONG
+typedef unsigned long Eterm;
+typedef unsigned long Uint;
+typedef long Sint;
+#define ERTS_SIZEOF_ETERM SIZEOF_LONG
+#elif SIZEOF_VOID_P == SIZEOF_INT
+typedef unsigned int Eterm;
+typedef unsigned int Uint;
+typedef int Sint;
+#define ERTS_SIZEOF_ETERM SIZEOF_INT
+#else
+#error Found no appropriate type to use for 'Eterm', 'Uint' and 'Sint'
+#endif
+
+#ifndef HAVE_INT64
+#if SIZEOF_LONG == 8
+#define HAVE_INT64 1
+typedef unsigned long Uint64;
+typedef long Sint64;
+#elif SIZEOF_LONG_LONG == 8
+#define HAVE_INT64 1
+typedef unsigned long long Uint64;
+typedef long long Sint64;
+#else
+#define HAVE_INT64 0
+#endif
+#endif
+
+#if SIZEOF_LONG == 4
+typedef unsigned long Uint32;
+typedef long Sint32;
+#elif SIZEOF_INT == 4
+typedef unsigned int Uint32;
+typedef int Sint32;
+#else
+#error Found no appropriate type to use for 'Uint32' and 'Sint32'
+#endif
+
+#if SIZEOF_INT == 2
+typedef unsigned int Uint16;
+typedef int Sint16;
+#elif SIZEOF_SHORT == 2
+typedef unsigned short Uint16;
+typedef short Sint16;
+#else
+#error Found no appropriate type to use for 'Uint16' and 'Sint16'
+#endif
+
+#if CHAR_BIT == 8
+typedef unsigned char byte;
+#else
+#error Found no appropriate type to use for 'byte'
+#endif
+
+#if defined(ARCH_64) && !HAVE_INT64
+#error 64-bit architecture, but no appropriate type to use for Uint64 and Sint64 found
+#endif
+
+#if defined(ARCH_64)
+# define ERTS_WORD_ALIGN_PAD_SZ(X) \
+ (((size_t) 8) - (((size_t) (X)) & ((size_t) 7)))
+#elif defined(ARCH_32)
+# define ERTS_WORD_ALIGN_PAD_SZ(X) \
+ (((size_t) 4) - (((size_t) (X)) & ((size_t) 3)))
+#else
+#error "Not supported..."
+#endif
+
+#include "erl_lock_check.h"
+#include "erl_smp.h"
+
+#ifdef ERTS_WANT_BREAK_HANDLING
+# ifdef ERTS_SMP
+extern erts_smp_atomic_t erts_break_requested;
+# define ERTS_BREAK_REQUESTED ((int) erts_smp_atomic_read(&erts_break_requested))
+# else
+extern volatile int erts_break_requested;
+# define ERTS_BREAK_REQUESTED erts_break_requested
+# endif
+void erts_do_break_handling(void);
+#endif
+
+#ifdef ERTS_WANT_GOT_SIGUSR1
+# ifndef UNIX
+# define ERTS_GOT_SIGUSR1 0
+# else
+# ifdef ERTS_SMP
+extern erts_smp_atomic_t erts_got_sigusr1;
+# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic_read(&erts_got_sigusr1))
+# else
+extern volatile int erts_got_sigusr1;
+# define ERTS_GOT_SIGUSR1 erts_got_sigusr1
+# endif
+# endif
+#endif
+
+#ifdef ERTS_SMP
+extern erts_smp_atomic_t erts_writing_erl_crash_dump;
+#define ERTS_IS_CRASH_DUMPING \
+ ((int) erts_smp_atomic_read(&erts_writing_erl_crash_dump))
+#else
+extern volatile int erts_writing_erl_crash_dump;
+#define ERTS_IS_CRASH_DUMPING erts_writing_erl_crash_dump
+#endif
+
+/* Deal with memcpy() vs bcopy() etc. We want to use the mem*() functions,
+ but be able to fall back on bcopy() etc on systems that don't have
+ mem*(), but this doesn't work to well with memset()/bzero() - thus the
+ memzero() macro.
+*/
+
+/* xxxP */
+#if defined(USE_BCOPY)
+# define memcpy(a, b, c) bcopy((b), (a), (c))
+# define memcmp(a, b, c) bcmp((a), (b), (c))
+# define memzero(buf, len) bzero((buf), (len))
+#else
+# define memzero(buf, len) memset((buf), '\0', (len))
+#endif
+
+/* Stuff that is useful for port programs, drivers, etc */
+
+#ifdef ISC32 /* Too much for the Makefile... */
+# define signal sigset
+# define NO_ASINH
+# define NO_ACOSH
+# define NO_ATANH
+# define NO_FTRUNCATE
+# define SIG_SIGHOLD
+# define _POSIX_SOURCE
+# define _XOPEN_SOURCE
+#endif
+
+#ifdef QNX /* Too much for the Makefile... */
+# define SYS_SELECT_H
+# define NO_ERF
+# define NO_ERFC
+/* This definition doesn't take NaN into account, but matherr() gets those */
+# define finite(x) (fabs(x) != HUGE_VAL)
+# define USE_MATHERR
+# define HAVE_FINITE
+#endif
+
+
+#ifdef WANT_NONBLOCKING /* must define this to pull in fcntl.h/ioctl.h */
+
+/* This is really a mess... We used to use fcntl O_NDELAY, but that seems
+ to only work on SunOS 4 - in particular, on SysV-based systems
+ (including Solaris 2), it does set non-blocking mode, but causes
+ read() to return 0!! fcntl O_NONBLOCK is specified by POSIX, and
+ seems to work on most systems, with the notable exception of AIX,
+ where the old ioctl FIONBIO is the *only* one that will set a *socket*
+ in non-blocking mode - and ioctl FIONBIO on AIX *doesn't* work for
+ pipes or ttys (O_NONBLOCK does)!!! For now, we'll use FIONBIO for AIX. */
+
+# ifdef _OSE_
+static const int zero_value = 0, one_value = 1;
+# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, (char*)&zero_value)
+# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, (char*)&one_value)
+# define ERRNO_BLOCK EWOULDBLOCK
+# else
+
+# ifdef __WIN32__
+
+static unsigned long zero_value = 0, one_value = 1;
+# define SET_BLOCKING(fd) { if (ioctlsocket((fd), FIONBIO, &zero_value) != 0) fprintf(stderr, "Error setting socket to non-blocking: %d\n", WSAGetLastError()); }
+# define SET_NONBLOCKING(fd) ioctlsocket((fd), FIONBIO, &one_value)
+
+# else
+# ifdef VXWORKS
+# include <fcntl.h> /* xxxP added for O_WRONLY etc ... macro:s ... */
+# include <ioLib.h>
+static const int zero_value = 0, one_value = 1;
+# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, (int)&zero_value)
+# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, (int)&one_value)
+# define ERRNO_BLOCK EWOULDBLOCK
+
+# else
+# ifdef NB_FIONBIO /* Old BSD */
+# include <sys/ioctl.h>
+ static const int zero_value = 0, one_value = 1;
+# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, &zero_value)
+# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, &one_value)
+# define ERRNO_BLOCK EWOULDBLOCK
+# else /* !NB_FIONBIO */
+# include <fcntl.h>
+# ifdef NB_O_NDELAY /* Nothing needs this? */
+# define NB_FLAG O_NDELAY
+# ifndef ERRNO_BLOCK /* allow override (e.g. EAGAIN) via Makefile */
+# define ERRNO_BLOCK EWOULDBLOCK
+# endif
+# else /* !NB_O_NDELAY */ /* The True Way - POSIX!:-) */
+# define NB_FLAG O_NONBLOCK
+# define ERRNO_BLOCK EAGAIN
+# endif /* !NB_O_NDELAY */
+# define SET_BLOCKING(fd) fcntl((fd), F_SETFL, \
+ fcntl((fd), F_GETFL, 0) & ~NB_FLAG)
+# define SET_NONBLOCKING(fd) fcntl((fd), F_SETFL, \
+ fcntl((fd), F_GETFL, 0) | NB_FLAG)
+# endif /* !NB_FIONBIO */
+# endif /* _WXWORKS_ */
+# endif /* !__WIN32__ */
+# endif /* _OSE_ */
+#endif /* WANT_NONBLOCKING */
+
+extern erts_cpu_info_t *erts_cpuinfo; /* erl_init.c */
+
+__decl_noreturn void __noreturn erl_exit(int n, char*, ...);
+
+/* Some special erl_exit() codes: */
+#define ERTS_INTR_EXIT INT_MIN /* called from signal handler */
+#define ERTS_ABORT_EXIT (INT_MIN + 1) /* no crash dump; only abort() */
+#define ERTS_DUMP_EXIT (127) /* crash dump; then exit() */
+
+
+#ifndef ERTS_SMP
+int check_async_ready(void);
+#ifdef USE_THREADS
+void sys_async_ready(int hndl);
+int erts_register_async_ready_callback(void (*funcp)(void));
+#endif
+#endif
+
+Eterm erts_check_io_info(void *p);
+
+/* Size of misc memory allocated from system dependent code */
+Uint erts_sys_misc_mem_sz(void);
+
+/* print stuff is declared here instead of in global.h, so sys stuff won't
+ have to include global.h */
+#include "erl_printf.h"
+
+/* Io constants to erts_print and erts_putc */
+#define ERTS_PRINT_STDERR (2)
+#define ERTS_PRINT_STDOUT (1)
+#define ERTS_PRINT_INVALID (0) /* Don't want to use 0 since CBUF was 0 */
+#define ERTS_PRINT_FILE (-1)
+#define ERTS_PRINT_SBUF (-2)
+#define ERTS_PRINT_SNBUF (-3)
+#define ERTS_PRINT_DSBUF (-4)
+
+#define ERTS_PRINT_MIN ERTS_PRINT_DSBUF
+
+typedef struct {
+ char *buf;
+ size_t size;
+} erts_print_sn_buf;
+
+int erts_print(int to, void *arg, char *format, ...); /* in utils.c */
+int erts_putc(int to, void *arg, char); /* in utils.c */
+
+/* logger stuff is declared here instead of in global.h, so sys files
+ won't have to include global.h */
+
+erts_dsprintf_buf_t *erts_create_logger_dsbuf(void);
+int erts_send_info_to_logger(Eterm, erts_dsprintf_buf_t *);
+int erts_send_warning_to_logger(Eterm, erts_dsprintf_buf_t *);
+int erts_send_error_to_logger(Eterm, erts_dsprintf_buf_t *);
+int erts_send_info_to_logger_str(Eterm, char *);
+int erts_send_warning_to_logger_str(Eterm, char *);
+int erts_send_error_to_logger_str(Eterm, char *);
+int erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *);
+int erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *);
+int erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *);
+int erts_send_info_to_logger_str_nogl(char *);
+int erts_send_warning_to_logger_str_nogl(char *);
+int erts_send_error_to_logger_str_nogl(char *);
+
+typedef struct preload {
+ char *name; /* Name of module */
+ int size; /* Size of code */
+ unsigned char* code; /* Code pointer */
+} Preload;
+
+
+/*
+ * This structure contains options to all built in drivers.
+ * None of the drivers use all of the fields.
+ */
+
+/* OSE: Want process_type and priority in here as well! Needs updates in erl_bif_ports.c! */
+
+typedef struct _SysDriverOpts {
+ int ifd; /* Input file descriptor (fd driver). */
+ int ofd; /* Outputfile descriptor (fd driver). */
+ int packet_bytes; /* Number of bytes in packet header. */
+ int read_write; /* Read and write bits. */
+ int use_stdio; /* Use standard I/O: TRUE or FALSE. */
+ int redir_stderr; /* Redirect stderr to stdout: TRUE/FALSE. */
+ int hide_window; /* Hide this windows (Windows). */
+ int exit_status; /* Report exit status of subprocess. */
+ int overlapped_io; /* Only has effect on windows NT et al */
+ char *envir; /* Environment of the port process, */
+ /* in Windows format. */
+ char **argv; /* Argument vector in Unix'ish format. */
+ char *wd; /* Working directory. */
+ unsigned spawn_type; /* Bitfield of ERTS_SPAWN_DRIVER |
+ ERTS_SPAWN_EXTERNAL | both*/
+
+#ifdef _OSE_
+ enum PROCESS_TYPE process_type;
+ OSPRIORITY priority;
+#endif /* _OSE_ */
+
+} SysDriverOpts;
+
+extern char *erts_default_arg0;
+
+extern char os_type[];
+
+extern int sys_init_time(void);
+#if defined(ERTS_TIMER_THREAD)
+#define erts_deliver_time()
+#else
+extern void erts_deliver_time(void);
+#endif
+extern void erts_time_remaining(SysTimeval *);
+extern int erts_init_time_sup(void);
+extern void erts_sys_init_float(void);
+extern void erts_thread_init_float(void);
+extern void erts_thread_disable_fpe(void);
+
+ERTS_GLB_INLINE int erts_block_fpe(void);
+ERTS_GLB_INLINE void erts_unblock_fpe(int);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int erts_block_fpe(void)
+{
+ return erts_sys_block_fpe();
+}
+
+ERTS_GLB_INLINE void erts_unblock_fpe(int unmasked)
+{
+ erts_sys_unblock_fpe(unmasked);
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+/* Dynamic library/driver loading */
+typedef struct {
+ char* str;
+}ErtsSysDdllError;
+#define ERTS_SYS_DDLL_ERROR_INIT {NULL}
+extern void erts_sys_ddll_free_error(ErtsSysDdllError*);
+extern void erl_sys_ddll_init(void); /* to initialize mutexes etc */
+extern int erts_sys_ddll_open2(char *path, void **handle, ErtsSysDdllError*);
+#define erts_sys_ddll_open(P,H) erts_sys_ddll_open2(P,H,NULL)
+extern int erts_sys_ddll_open_noext(char *path, void **handle, ErtsSysDdllError*);
+extern int erts_sys_ddll_load_driver_init(void *handle, void **function);
+extern int erts_sys_ddll_load_nif_init(void *handle, void **function,ErtsSysDdllError*);
+extern int erts_sys_ddll_close2(void *handle, ErtsSysDdllError*);
+#define erts_sys_ddll_close(H) erts_sys_ddll_close2(H,NULL)
+extern void *erts_sys_ddll_call_init(void *function);
+extern void *erts_sys_ddll_call_nif_init(void *function);
+extern int erts_sys_ddll_sym2(void *handle, char *name, void **function, ErtsSysDdllError*);
+#define erts_sys_ddll_sym(H,N,F) erts_sys_ddll_sym2(H,N,F,NULL)
+extern char *erts_sys_ddll_error(int code);
+
+
+
+/*
+ * System interfaces for startup/sae code (functions found in respective sys.c)
+ */
+
+
+#ifdef ERTS_SMP
+void erts_sys_schedule_interrupt(int set);
+void erts_sys_schedule_interrupt_timed(int set, long msec);
+void erts_sys_main_thread(void);
+#else
+#define erts_sys_schedule_interrupt(Set)
+#endif
+
+extern void erts_sys_prepare_crash_dump(void);
+extern void erts_sys_pre_init(void);
+extern void erl_sys_init(void);
+extern void erl_sys_args(int *argc, char **argv);
+extern void erl_sys_schedule(int);
+#ifdef _OSE_
+extern void erl_sys_init_final(void);
+#else
+void sys_tty_reset(void);
+#endif
+
+EXTERN_FUNCTION(int, sys_max_files, (_VOID_));
+void sys_init_io(void);
+Preload* sys_preloaded(void);
+EXTERN_FUNCTION(unsigned char*, sys_preload_begin, (Preload*));
+EXTERN_FUNCTION(void, sys_preload_end, (Preload*));
+EXTERN_FUNCTION(int, sys_get_key, (int));
+void elapsed_time_both(unsigned long *ms_user, unsigned long *ms_sys,
+ unsigned long *ms_user_diff, unsigned long *ms_sys_diff);
+void wall_clock_elapsed_time_both(unsigned long *ms_total,
+ unsigned long *ms_diff);
+void get_time(int *hour, int *minute, int *second);
+void get_date(int *year, int *month, int *day);
+void get_localtime(int *year, int *month, int *day,
+ int *hour, int *minute, int *second);
+void get_universaltime(int *year, int *month, int *day,
+ int *hour, int *minute, int *second);
+int univ_to_local(Sint *year, Sint *month, Sint *day,
+ Sint *hour, Sint *minute, Sint *second);
+int local_to_univ(Sint *year, Sint *month, Sint *day,
+ Sint *hour, Sint *minute, Sint *second, int isdst);
+void get_now(Uint*, Uint*, Uint*);
+void get_sys_now(Uint*, Uint*, Uint*);
+EXTERN_FUNCTION(void, set_break_quit, (void (*)(void), void (*)(void)));
+
+void os_flavor(char*, unsigned);
+void os_version(int*, int*, int*);
+void init_getenv_state(GETENV_STATE *);
+char * getenv_string(GETENV_STATE *);
+void fini_getenv_state(GETENV_STATE *);
+
+/* xxxP */
+void init_sys_float(void);
+int sys_chars_to_double(char*, double*);
+int sys_double_to_chars(double, char*);
+void sys_get_pid(char *);
+
+/* erts_sys_putenv() returns, 0 on success and a value != 0 on failure. */
+int erts_sys_putenv(char *key_value, int sep_ix);
+/* erts_sys_getenv() returns 0 on success (length of value string in
+ *size), a value > 0 if value buffer is too small (*size is set to needed
+ size), and a value < 0 on failure. */
+int erts_sys_getenv(char *key, char *value, size_t *size);
+
+/* Easier to use, but not as efficient, environment functions */
+char *erts_read_env(char *key);
+void erts_free_read_env(void *value);
+int erts_write_env(char *key, char *value);
+
+/* utils.c */
+
+/* Options to sys_alloc_opt */
+#define SYS_ALLOC_OPT_TRIM_THRESHOLD 0
+#define SYS_ALLOC_OPT_TOP_PAD 1
+#define SYS_ALLOC_OPT_MMAP_THRESHOLD 2
+#define SYS_ALLOC_OPT_MMAP_MAX 3
+
+/* Default values to sys_alloc_opt options */
+#define ERTS_DEFAULT_TRIM_THRESHOLD (128 * 1024)
+#define ERTS_DEFAULT_TOP_PAD 0
+#define ERTS_DEFAULT_MMAP_THRESHOLD (128 * 1024)
+#define ERTS_DEFAULT_MMAP_MAX 64
+
+EXTERN_FUNCTION(int, sys_alloc_opt, (int, int));
+
+typedef struct {
+ Sint trim_threshold;
+ Sint top_pad;
+ Sint mmap_threshold;
+ Sint mmap_max;
+} SysAllocStat;
+
+EXTERN_FUNCTION(void, sys_alloc_stat, (SysAllocStat *));
+
+/* Block the whole system... */
+
+#define ERTS_BS_FLG_ALLOW_GC (((Uint32) 1) << 0)
+#define ERTS_BS_FLG_ALLOW_IO (((Uint32) 1) << 1)
+
+/* Activities... */
+typedef enum {
+ ERTS_ACTIVITY_UNDEFINED, /* Undefined activity */
+ ERTS_ACTIVITY_WAIT, /* Waiting */
+ ERTS_ACTIVITY_GC, /* Garbage collecting */
+ ERTS_ACTIVITY_IO /* I/O including message passing to erl procs */
+} erts_activity_t;
+
+#ifdef ERTS_SMP
+
+typedef enum {
+ ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
+ ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
+ ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY
+} erts_activity_error_t;
+
+typedef struct {
+ erts_smp_atomic_t do_block;
+ struct {
+ erts_smp_atomic_t wait;
+ erts_smp_atomic_t gc;
+ erts_smp_atomic_t io;
+ } in_activity;
+} erts_system_block_state_t;
+
+extern erts_system_block_state_t erts_system_block_state;
+
+int erts_is_system_blocked(erts_activity_t allowed_activities);
+void erts_block_me(void (*prepare)(void *), void (*resume)(void *), void *arg);
+void erts_register_blockable_thread(void);
+void erts_unregister_blockable_thread(void);
+void erts_note_activity_begin(erts_activity_t activity);
+void
+erts_check_block(erts_activity_t old_activity,
+ erts_activity_t new_activity,
+ int locked,
+ void (*prepare)(void *),
+ void (*resume)(void *),
+ void *arg);
+void erts_block_system(Uint32 allowed_activities);
+int erts_emergency_block_system(long timeout, Uint32 allowed_activities);
+void erts_release_system(void);
+void erts_system_block_init(void);
+void erts_set_activity_error(erts_activity_error_t, char *, int);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+void erts_lc_activity_change_begin(void);
+void erts_lc_activity_change_end(void);
+int erts_lc_is_blocking(void);
+#define ERTS_LC_IS_BLOCKING \
+ (erts_smp_pending_system_block() && erts_lc_is_blocking())
+#endif
+#endif
+
+#define erts_smp_activity_begin(NACT, PRP, RSM, ARG) \
+ erts_smp_set_activity(ERTS_ACTIVITY_UNDEFINED, \
+ (NACT), \
+ 0, \
+ (PRP), \
+ (RSM), \
+ (ARG), \
+ __FILE__, \
+ __LINE__)
+#define erts_smp_activity_change(OACT, NACT, PRP, RSM, ARG) \
+ erts_smp_set_activity((OACT), \
+ (NACT), \
+ 0, \
+ (PRP), \
+ (RSM), \
+ (ARG), \
+ __FILE__, \
+ __LINE__)
+#define erts_smp_activity_end(OACT, PRP, RSM, ARG) \
+ erts_smp_set_activity((OACT), \
+ ERTS_ACTIVITY_UNDEFINED, \
+ 0, \
+ (PRP), \
+ (RSM), \
+ (ARG), \
+ __FILE__, \
+ __LINE__)
+
+#define erts_smp_locked_activity_begin(NACT) \
+ erts_smp_set_activity(ERTS_ACTIVITY_UNDEFINED, \
+ (NACT), \
+ 1, \
+ NULL, \
+ NULL, \
+ NULL, \
+ __FILE__, \
+ __LINE__)
+#define erts_smp_locked_activity_change(OACT, NACT) \
+ erts_smp_set_activity((OACT), \
+ (NACT), \
+ 1, \
+ NULL, \
+ NULL, \
+ NULL, \
+ __FILE__, \
+ __LINE__)
+#define erts_smp_locked_activity_end(OACT) \
+ erts_smp_set_activity((OACT), \
+ ERTS_ACTIVITY_UNDEFINED, \
+ 1, \
+ NULL, \
+ NULL, \
+ NULL, \
+ __FILE__, \
+ __LINE__)
+
+
+ERTS_GLB_INLINE int erts_smp_is_system_blocked(erts_activity_t allowed_activities);
+ERTS_GLB_INLINE void erts_smp_block_system(Uint32 allowed_activities);
+ERTS_GLB_INLINE int erts_smp_emergency_block_system(long timeout,
+ Uint32 allowed_activities);
+ERTS_GLB_INLINE void erts_smp_release_system(void);
+ERTS_GLB_INLINE int erts_smp_pending_system_block(void);
+ERTS_GLB_INLINE void erts_smp_chk_system_block(void (*prepare)(void *),
+ void (*resume)(void *),
+ void *arg);
+ERTS_GLB_INLINE void
+erts_smp_set_activity(erts_activity_t old_activity,
+ erts_activity_t new_activity,
+ int locked,
+ void (*prepare)(void *),
+ void (*resume)(void *),
+ void *arg,
+ char *file,
+ int line);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+
+ERTS_GLB_INLINE int
+erts_smp_is_system_blocked(erts_activity_t allowed_activities)
+{
+#ifdef ERTS_SMP
+ return erts_is_system_blocked(allowed_activities);
+#else
+ return 1;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_block_system(Uint32 allowed_activities)
+{
+#ifdef ERTS_SMP
+ erts_block_system(allowed_activities);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_emergency_block_system(long timeout, Uint32 allowed_activities)
+{
+#ifdef ERTS_SMP
+ return erts_emergency_block_system(timeout, allowed_activities);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_release_system(void)
+{
+#ifdef ERTS_SMP
+ erts_release_system();
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_pending_system_block(void)
+{
+#ifdef ERTS_SMP
+ return erts_smp_atomic_read(&erts_system_block_state.do_block);
+#else
+ return 0;
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_smp_chk_system_block(void (*prepare)(void *),
+ void (*resume)(void *),
+ void *arg)
+{
+#ifdef ERTS_SMP
+ if (erts_smp_pending_system_block())
+ erts_block_me(prepare, resume, arg);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_set_activity(erts_activity_t old_activity,
+ erts_activity_t new_activity,
+ int locked,
+ void (*prepare)(void *),
+ void (*resume)(void *),
+ void *arg,
+ char *file,
+ int line)
+{
+#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_activity_change_begin();
+#endif
+ switch (old_activity) {
+ case ERTS_ACTIVITY_UNDEFINED:
+ break;
+ case ERTS_ACTIVITY_WAIT:
+ erts_smp_atomic_dec(&erts_system_block_state.in_activity.wait);
+ if (locked) {
+ /* You are not allowed to leave activity waiting
+ * without supplying the possibility to block
+ * unlocked.
+ */
+ erts_set_activity_error(ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
+ file, line);
+ }
+ break;
+ case ERTS_ACTIVITY_GC:
+ erts_smp_atomic_dec(&erts_system_block_state.in_activity.gc);
+ break;
+ case ERTS_ACTIVITY_IO:
+ erts_smp_atomic_dec(&erts_system_block_state.in_activity.io);
+ break;
+ default:
+ erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
+ file, line);
+ break;
+ }
+
+ /* We are not allowed to block when going to activity waiting... */
+ if (new_activity != ERTS_ACTIVITY_WAIT && erts_smp_pending_system_block())
+ erts_check_block(old_activity,new_activity,locked,prepare,resume,arg);
+
+ switch (new_activity) {
+ case ERTS_ACTIVITY_UNDEFINED:
+ break;
+ case ERTS_ACTIVITY_WAIT:
+ erts_smp_atomic_inc(&erts_system_block_state.in_activity.wait);
+ break;
+ case ERTS_ACTIVITY_GC:
+ erts_smp_atomic_inc(&erts_system_block_state.in_activity.gc);
+ break;
+ case ERTS_ACTIVITY_IO:
+ erts_smp_atomic_inc(&erts_system_block_state.in_activity.io);
+ break;
+ default:
+ erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY,
+ file, line);
+ break;
+ }
+
+ switch (new_activity) {
+ case ERTS_ACTIVITY_WAIT:
+ case ERTS_ACTIVITY_GC:
+ case ERTS_ACTIVITY_IO:
+ if (erts_smp_pending_system_block())
+ erts_note_activity_begin(new_activity);
+ break;
+ default:
+ break;
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_activity_change_end();
+#endif
+
+#endif
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
+#undef ERTS_REFC_DEBUG
+#define ERTS_REFC_DEBUG
+#endif
+
+typedef erts_smp_atomic_t erts_refc_t;
+
+ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, long val);
+ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, long min_val);
+ERTS_GLB_INLINE long erts_refc_inctest(erts_refc_t *refcp, long min_val);
+ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, long min_val);
+ERTS_GLB_INLINE long erts_refc_dectest(erts_refc_t *refcp, long min_val);
+ERTS_GLB_INLINE void erts_refc_add(erts_refc_t *refcp, long diff, long min_val);
+ERTS_GLB_INLINE long erts_refc_read(erts_refc_t *refcp, long min_val);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_refc_init(erts_refc_t *refcp, long val)
+{
+ erts_smp_atomic_init((erts_smp_atomic_t *) refcp, val);
+}
+
+ERTS_GLB_INLINE void
+erts_refc_inc(erts_refc_t *refcp, long min_val)
+{
+#ifdef ERTS_REFC_DEBUG
+ long val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
+ if (val < min_val)
+ erl_exit(ERTS_ABORT_EXIT,
+ "erts_refc_inc(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#else
+ erts_smp_atomic_inc((erts_smp_atomic_t *) refcp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_refc_inctest(erts_refc_t *refcp, long min_val)
+{
+ long val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
+#ifdef ERTS_REFC_DEBUG
+ if (val < min_val)
+ erl_exit(ERTS_ABORT_EXIT,
+ "erts_refc_inctest(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ return val;
+}
+
+ERTS_GLB_INLINE void
+erts_refc_dec(erts_refc_t *refcp, long min_val)
+{
+#ifdef ERTS_REFC_DEBUG
+ long val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
+ if (val < min_val)
+ erl_exit(ERTS_ABORT_EXIT,
+ "erts_refc_dec(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#else
+ erts_smp_atomic_dec((erts_smp_atomic_t *) refcp);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_refc_dectest(erts_refc_t *refcp, long min_val)
+{
+ long val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
+#ifdef ERTS_REFC_DEBUG
+ if (val < min_val)
+ erl_exit(ERTS_ABORT_EXIT,
+ "erts_refc_dectest(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ return val;
+}
+
+ERTS_GLB_INLINE void
+erts_refc_add(erts_refc_t *refcp, long diff, long min_val)
+{
+#ifdef ERTS_REFC_DEBUG
+ long val = erts_smp_atomic_addtest((erts_smp_atomic_t *) refcp, diff);
+ if (val < min_val)
+ erl_exit(ERTS_ABORT_EXIT,
+ "erts_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n",
+ diff, val, min_val);
+#else
+ erts_smp_atomic_add((erts_smp_atomic_t *) refcp, diff);
+#endif
+}
+
+ERTS_GLB_INLINE long
+erts_refc_read(erts_refc_t *refcp, long min_val)
+{
+ long val = erts_smp_atomic_read((erts_smp_atomic_t *) refcp);
+#ifdef ERTS_REFC_DEBUG
+ if (val < min_val)
+ erl_exit(ERTS_ABORT_EXIT,
+ "erts_refc_read(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ return val;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#ifdef ERTS_ENABLE_KERNEL_POLL
+extern int erts_use_kernel_poll;
+#endif
+
+void elib_ensure_initialized(void);
+
+
+#if (defined(VXWORKS) || defined(_OSE_))
+/* NOTE! sys_calloc2 does not exist on other
+ platforms than VxWorks and OSE */
+EXTERN_FUNCTION(void*, sys_calloc2, (Uint, Uint));
+#endif /* VXWORKS || OSE */
+
+
+#define sys_memcpy(s1,s2,n) memcpy(s1,s2,n)
+#define sys_memmove(s1,s2,n) memmove(s1,s2,n)
+#define sys_memcmp(s1,s2,n) memcmp(s1,s2,n)
+#define sys_memset(s,c,n) memset(s,c,n)
+#define sys_memzero(s, n) memset(s,'\0',n)
+#define sys_strcmp(s1,s2) strcmp(s1,s2)
+#define sys_strncmp(s1,s2,n) strncmp(s1,s2,n)
+#define sys_strcpy(s1,s2) strcpy(s1,s2)
+#define sys_strncpy(s1,s2,n) strncpy(s1,s2,n)
+#define sys_strlen(s) strlen(s)
+
+/* define function symbols (needed in sys_drv_api) */
+#define sys_fp_alloc sys_alloc
+#define sys_fp_realloc sys_realloc
+#define sys_fp_free sys_free
+#define sys_fp_memcpy memcpy
+#define sys_fp_memmove memmove
+#define sys_fp_memcmp memcmp
+#define sys_fp_memset memset
+/* #define sys_fp_memzero elib_memzero */
+#define sys_fp_strcmp strcmp
+#define sys_fp_strncmp strncmp
+#define sys_fp_strcpy strcpy
+#define sys_fp_strncpy strncpy
+#define sys_fp_strlen strlen
+
+
+/* Return codes from the nb_read and nb_write functions */
+#define FD_READY 1
+#define FD_CONTINUE 2
+#define FD_ERROR 3
+
+
+
+/* Standard set of integer macros .. */
+
+#define get_int64(s) ((((unsigned char*) (s))[0] << 56) | \
+ (((unsigned char*) (s))[1] << 48) | \
+ (((unsigned char*) (s))[2] << 40) | \
+ (((unsigned char*) (s))[3] << 32) | \
+ (((unsigned char*) (s))[4] << 24) | \
+ (((unsigned char*) (s))[5] << 16) | \
+ (((unsigned char*) (s))[6] << 8) | \
+ (((unsigned char*) (s))[7]))
+
+#define put_int64(i, s) do {((char*)(s))[0] = (char)((Sint64)(i) >> 56) & 0xff;\
+ ((char*)(s))[1] = (char)((Sint64)(i) >> 48) & 0xff;\
+ ((char*)(s))[2] = (char)((Sint64)(i) >> 40) & 0xff;\
+ ((char*)(s))[3] = (char)((Sint64)(i) >> 32) & 0xff;\
+ ((char*)(s))[4] = (char)((Sint64)(i) >> 24) & 0xff;\
+ ((char*)(s))[5] = (char)((Sint64)(i) >> 16) & 0xff;\
+ ((char*)(s))[6] = (char)((Sint64)(i) >> 8) & 0xff;\
+ ((char*)(s))[7] = (char)((Sint64)(i)) & 0xff;\
+ } while (0)
+
+#define get_int32(s) ((((unsigned char*) (s))[0] << 24) | \
+ (((unsigned char*) (s))[1] << 16) | \
+ (((unsigned char*) (s))[2] << 8) | \
+ (((unsigned char*) (s))[3]))
+
+#define put_int32(i, s) do {((char*)(s))[0] = (char)((i) >> 24) & 0xff; \
+ ((char*)(s))[1] = (char)((i) >> 16) & 0xff; \
+ ((char*)(s))[2] = (char)((i) >> 8) & 0xff; \
+ ((char*)(s))[3] = (char)(i) & 0xff;} \
+ while (0)
+
+#define get_int16(s) ((((unsigned char*) (s))[0] << 8) | \
+ (((unsigned char*) (s))[1]))
+
+
+#define put_int16(i, s) do {((char*)(s))[0] = (char)((i) >> 8) & 0xff; \
+ ((char*)(s))[1] = (char)(i) & 0xff;} \
+ while (0)
+
+#define get_int8(s) ((((unsigned char*) (s))[0] ))
+
+
+#define put_int8(i, s) do {((unsigned char*)(s))[0] = (i) & 0xff;} while (0)
+
+/*
+ * Use DEBUGF as you would use printf, but use double parentheses:
+ *
+ * DEBUGF(("Error: %s\n", error));
+ *
+ * The output will appear in a special console.
+ */
+
+#ifdef DEBUG
+EXTERN_FUNCTION(void, erl_debug, (char* format, ...));
+EXTERN_FUNCTION(void, erl_bin_write, (unsigned char *, int, int));
+
+# define DEBUGF(x) erl_debug x
+#else
+# define DEBUGF(x)
+#endif
+
+
+#ifdef VXWORKS
+/* This includes redefines of malloc etc
+ this should be done after sys_alloc, etc, above */
+# include "reclaim.h"
+/*********************Malloc and friends************************
+ * There is a problem with the naming of malloc and friends,
+ * malloc is used throughout sys.c and the resolver to mean save_alloc,
+ * but it should actually mean either sys_alloc or sys_alloc2,
+ * so the definitions from reclaim_master.h are not any
+ * good, i redefine the malloc family here, although it's quite
+ * ugly, actually it would be preferrable to use the
+ * names sys_alloc and so on throughout the offending code, but
+ * that will be saved as an later exercise...
+ * I also add an own calloc, to make the BSD resolver source happy.
+ ***************************************************************/
+/* Undefine malloc and friends */
+# ifdef malloc
+# undef malloc
+# endif
+# ifdef calloc
+# undef calloc
+# endif
+# ifdef realloc
+# undef realloc
+# endif
+# ifdef free
+# undef free
+# endif
+/* Redefine malloc and friends */
+# define malloc sys_alloc
+# define calloc sys_calloc
+# define realloc sys_realloc
+# define free sys_free
+
+#endif
+
+
+#ifdef __WIN32__
+
+void call_break_handler(void);
+char* last_error(void);
+char* win32_errorstr(int);
+
+
+#endif
+
+
+#endif
+
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
new file mode 100644
index 0000000000..a07d6a5327
--- /dev/null
+++ b/erts/emulator/beam/time.c
@@ -0,0 +1,571 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * TIMING WHEEL
+ *
+ * Timeouts kept in an wheel. A timeout is measured relative to the
+ * current slot (tiw_pos) in the wheel, and inserted at slot
+ * (tiw_pos + timeout) % TIW_SIZE. Each timeout also has a count
+ * equal to timeout/TIW_SIZE, which is needed since the time axis
+ * is wrapped arount the wheel.
+ *
+ * Several slots may be processed in one operation. If the number of
+ * slots is greater that the wheel size, the wheel is only traversed
+ * once,
+ *
+ * The following example shows a time axis where there is one timeout
+ * at each "tick", and where 1, 2, 3 ... wheel slots are released in
+ * one operation. The notation "<x" means "release all items with
+ * counts less than x".
+ *
+ * Size of wheel: 4
+ *
+ * --|----|----|----|----|----|----|----|----|----|----|----|----|----
+ * 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0 2.1 2.2 2.3 3.0
+ *
+ * 1 [ )
+ * <1 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0 2.1 2.2 2.3 2.0
+ *
+ * 2 [ )
+ * <1 <1 0.2 0.3 0.0 0.1 1.2 1.3 1.0 1.1 2.2 2.3 2.0
+ *
+ * 3 [ )
+ * <1 <1 <1 0.3 0.0 0.1 0.2 1.3 1.0 1.1 1.2 2.3 2.0
+ *
+ * 4 [ )
+ * <1 <1 <1 <1 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0
+ *
+ * 5 [ )
+ * <2 <1 <1 <1. 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0
+ *
+ * 6 [ )
+ * <2 <2 <1 <1. 0.2 0.3 0.0 0.1 1.2 1.3 1.0
+ *
+ * 7 [ )
+ * <2 <2 <2 <1. 0.3 0.0 0.1 0.2 1.3 1.0
+ *
+ * 8 [ )
+ * <2 <2 <2 <2. 0.0 0.1 0.2 0.3 1.0
+ *
+ * 9 [ )
+ * <3 <2 <2 <2. 0.1 0.2 0.3 0.0
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+#define ASSERT_NO_LOCKED_LOCKS erts_lc_check_exact(NULL, 0)
+#else
+#define ASSERT_NO_LOCKED_LOCKS
+#endif
+
+
+#if defined(ERTS_TIMER_THREAD) || 1
+/* I don't yet know why, but using a mutex instead of a spinlock
+ or spin-based rwlock avoids excessive delays at startup. */
+static erts_smp_rwmtx_t tiw_lock;
+#define tiw_read_lock() erts_smp_rwmtx_rlock(&tiw_lock)
+#define tiw_read_unlock() erts_smp_rwmtx_runlock(&tiw_lock)
+#define tiw_write_lock() erts_smp_rwmtx_rwlock(&tiw_lock)
+#define tiw_write_unlock() erts_smp_rwmtx_rwunlock(&tiw_lock)
+#define tiw_init_lock() erts_smp_rwmtx_init(&tiw_lock, "timer_wheel")
+#else
+static erts_smp_rwlock_t tiw_lock;
+#define tiw_read_lock() erts_smp_read_lock(&tiw_lock)
+#define tiw_read_unlock() erts_smp_read_unlock(&tiw_lock)
+#define tiw_write_lock() erts_smp_write_lock(&tiw_lock)
+#define tiw_write_unlock() erts_smp_write_unlock(&tiw_lock)
+#define tiw_init_lock() erts_smp_rwlock_init(&tiw_lock, "timer_wheel")
+#endif
+
+/* BEGIN tiw_lock protected variables
+**
+** The individual timer cells in tiw are also protected by the same mutex.
+*/
+
+#ifdef SMALL_MEMORY
+#define TIW_SIZE 8192
+#else
+#define TIW_SIZE 65536 /* timing wheel size (should be a power of 2) */
+#endif
+static ErlTimer** tiw; /* the timing wheel, allocated in init_time() */
+static Uint tiw_pos; /* current position in wheel */
+static Uint tiw_nto; /* number of timeouts in wheel */
+
+/* END tiw_lock protected variables */
+
+/* Actual interval time chosen by sys_init_time() */
+static int itime; /* Constant after init */
+
+#if defined(ERTS_TIMER_THREAD)
+static SysTimeval time_start; /* start of current time interval */
+static long ticks_end; /* time_start+ticks_end == time_wakeup */
+static long ticks_latest; /* delta from time_start at latest time update*/
+
+static ERTS_INLINE long time_gettimeofday(SysTimeval *now)
+{
+ long elapsed;
+
+ erts_get_timeval(now);
+ now->tv_usec = 1000 * (now->tv_usec / 1000); /* ms resolution */
+ elapsed = (1000 * (now->tv_sec - time_start.tv_sec) +
+ (now->tv_usec - time_start.tv_usec) / 1000);
+ // elapsed /= CLOCK_RESOLUTION;
+ return elapsed;
+}
+
+static long do_time_update(void)
+{
+ SysTimeval now;
+ long elapsed;
+
+ elapsed = time_gettimeofday(&now);
+ ticks_latest = elapsed;
+ return elapsed;
+}
+
+static ERTS_INLINE long do_time_read(void)
+{
+ return ticks_latest;
+}
+
+static long do_time_reset(void)
+{
+ SysTimeval now;
+ long elapsed;
+
+ elapsed = time_gettimeofday(&now);
+ time_start = now;
+ ticks_end = LONG_MAX;
+ ticks_latest = 0;
+ return elapsed;
+}
+
+static ERTS_INLINE void do_time_init(void)
+{
+ (void)do_time_reset();
+}
+
+#else
+erts_smp_atomic_t do_time; /* set at clock interrupt */
+static ERTS_INLINE long do_time_read(void) { return erts_smp_atomic_read(&do_time); }
+static ERTS_INLINE long do_time_update(void) { return do_time_read(); }
+static ERTS_INLINE void do_time_init(void) { erts_smp_atomic_init(&do_time, 0L); }
+#endif
+
+/* get the time (in units of itime) to the next timeout,
+ or -1 if there are no timeouts */
+
+static int next_time_internal(void) /* PRE: tiw_lock taken by caller */
+{
+ int i, tm, nto;
+ unsigned int min;
+ ErlTimer* p;
+ long dt;
+
+ if (tiw_nto == 0)
+ return -1; /* no timeouts in wheel */
+
+ /* start going through wheel to find next timeout */
+ tm = nto = 0;
+ min = (unsigned int) -1; /* max unsigned int */
+ i = tiw_pos;
+ do {
+ p = tiw[i];
+ while (p != NULL) {
+ nto++;
+ if (p->count == 0) {
+ /* found next timeout */
+ dt = do_time_read();
+ return ((tm >= dt) ? (tm - dt) : 0);
+ } else {
+ /* keep shortest time in 'min' */
+ if (tm + p->count*TIW_SIZE < min)
+ min = tm + p->count*TIW_SIZE;
+ }
+ p = p->next;
+ }
+ /* when we have found all timeouts the shortest time will be in min */
+ if (nto == tiw_nto) break;
+ tm++;
+ i = (i + 1) % TIW_SIZE;
+ } while (i != tiw_pos);
+ dt = do_time_read();
+ return ((min >= dt) ? (min - dt) : 0);
+}
+
+#if !defined(ERTS_TIMER_THREAD)
+/* Private export to erl_time_sup.c */
+int next_time(void)
+{
+ int ret;
+
+ tiw_write_lock();
+ (void)do_time_update();
+ ret = next_time_internal();
+ tiw_write_unlock();
+ return ret;
+}
+#endif
+
+static ERTS_INLINE void bump_timer_internal(long dt) /* PRE: tiw_lock is write-locked */
+{
+ Uint keep_pos;
+ Uint count;
+ ErlTimer *p, **prev, *timeout_head, **timeout_tail;
+ Uint dtime = (unsigned long)dt;
+
+ /* no need to bump the position if there aren't any timeouts */
+ if (tiw_nto == 0) {
+ tiw_write_unlock();
+ return;
+ }
+
+ /* if do_time > TIW_SIZE we want to go around just once */
+ count = (Uint)(dtime / TIW_SIZE) + 1;
+ keep_pos = (tiw_pos + dtime) % TIW_SIZE;
+ if (dtime > TIW_SIZE) dtime = TIW_SIZE;
+
+ timeout_head = NULL;
+ timeout_tail = &timeout_head;
+ while (dtime > 0) {
+ /* this is to decrease the counters with the right amount */
+ /* when dtime >= TIW_SIZE */
+ if (tiw_pos == keep_pos) count--;
+ prev = &tiw[tiw_pos];
+ while ((p = *prev) != NULL) {
+ if (p->count < count) { /* we have a timeout */
+ *prev = p->next; /* Remove from list */
+ tiw_nto--;
+ p->next = NULL;
+ p->active = 0; /* Make sure cancel callback
+ isn't called */
+ *timeout_tail = p; /* Insert in timeout queue */
+ timeout_tail = &p->next;
+ }
+ else {
+ /* no timeout, just decrease counter */
+ p->count -= count;
+ prev = &p->next;
+ }
+ }
+ tiw_pos = (tiw_pos + 1) % TIW_SIZE;
+ dtime--;
+ }
+ tiw_pos = keep_pos;
+
+ tiw_write_unlock();
+
+ /* Call timedout timers callbacks */
+ while (timeout_head) {
+ p = timeout_head;
+ timeout_head = p->next;
+ /* Here comes hairy use of the timer fields!
+ * They are reset without having the lock.
+ * It is assumed that no code but this will
+ * accesses any field until the ->timeout
+ * callback is called.
+ */
+ p->next = NULL;
+ p->slot = 0;
+ (*p->timeout)(p->arg);
+ }
+}
+
+#if defined(ERTS_TIMER_THREAD)
+static void timer_thread_bump_timer(void)
+{
+ tiw_write_lock();
+ bump_timer_internal(do_time_reset());
+}
+#else
+void bump_timer(long dt) /* dt is value from do_time */
+{
+ tiw_write_lock();
+ bump_timer_internal(dt);
+}
+#endif
+
+Uint
+erts_timer_wheel_memory_size(void)
+{
+ return (Uint) TIW_SIZE * sizeof(ErlTimer*);
+}
+
+#if defined(ERTS_TIMER_THREAD)
+static struct erts_iwait *timer_thread_iwait;
+
+static int timer_thread_setup_delay(SysTimeval *rem_time)
+{
+ long elapsed;
+ int ticks;
+
+ tiw_write_lock();
+ elapsed = do_time_update();
+ ticks = next_time_internal();
+ if (ticks == -1) /* timer queue empty */
+ ticks = 100*1000*1000;
+ if (elapsed > ticks)
+ elapsed = ticks;
+ ticks -= elapsed;
+ //ticks *= CLOCK_RESOLUTION;
+ rem_time->tv_sec = ticks / 1000;
+ rem_time->tv_usec = 1000 * (ticks % 1000);
+ ticks_end = ticks;
+ tiw_write_unlock();
+ return ticks;
+}
+
+static void *timer_thread_start(void *ignore)
+{
+ SysTimeval delay;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_set_thread_name("timer");
+#endif
+ erts_register_blockable_thread();
+
+ for(;;) {
+ if (timer_thread_setup_delay(&delay)) {
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ ASSERT_NO_LOCKED_LOCKS;
+ erts_iwait_wait(timer_thread_iwait, &delay);
+ ASSERT_NO_LOCKED_LOCKS;
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ }
+ else
+ erts_smp_chk_system_block(NULL, NULL, NULL);
+ timer_thread_bump_timer();
+ ASSERT_NO_LOCKED_LOCKS;
+ }
+ /*NOTREACHED*/
+ return NULL;
+}
+
+static ERTS_INLINE void timer_thread_post_insert(Uint ticks)
+{
+ if ((Sint)ticks < ticks_end)
+ erts_iwait_interrupt(timer_thread_iwait);
+}
+
+static void timer_thread_init(void)
+{
+ erts_thr_opts_t opts = ERTS_THR_OPTS_DEFAULT_INITER;
+ erts_tid_t tid;
+
+ opts->detached = 1;
+
+ timer_thread_iwait = erts_iwait_init();
+ erts_thr_create(&tid, timer_thread_start, NULL, &opts);
+}
+
+#else
+static ERTS_INLINE void timer_thread_post_insert(Uint ticks) { }
+static ERTS_INLINE void timer_thread_init(void) { }
+#endif
+
+/* this routine links the time cells into a free list at the start
+ and sets the time queue as empty */
+void
+init_time(void)
+{
+ int i;
+
+ /* system dependent init; must be done before do_time_init()
+ if timer thread is enabled */
+ itime = erts_init_time_sup();
+
+ tiw_init_lock();
+
+ tiw = (ErlTimer**) erts_alloc(ERTS_ALC_T_TIMER_WHEEL,
+ TIW_SIZE * sizeof(ErlTimer*));
+ for(i = 0; i < TIW_SIZE; i++)
+ tiw[i] = NULL;
+ do_time_init();
+ tiw_pos = tiw_nto = 0;
+
+ timer_thread_init();
+}
+
+/*
+** Insert a process into the time queue, with a timeout 't'
+*/
+static void
+insert_timer(ErlTimer* p, Uint t)
+{
+ Uint tm;
+ Uint64 ticks;
+
+ /* The current slot (tiw_pos) in timing wheel is the next slot to be
+ * be processed. Hence no extra time tick is needed.
+ *
+ * (x + y - 1)/y is precisely the "number of bins" formula.
+ */
+ ticks = (t + itime - 1) / itime;
+
+ /*
+ * Ticks must be a Uint64, or the addition may overflow here,
+ * resulting in an incorrect value for p->count below.
+ */
+ ticks += do_time_update(); /* Add backlog of unprocessed time */
+
+ /* calculate slot */
+ tm = (ticks + tiw_pos) % TIW_SIZE;
+ p->slot = (Uint) tm;
+ p->count = (Uint) (ticks / TIW_SIZE);
+
+ /* insert at head of list at slot */
+ p->next = tiw[tm];
+ tiw[tm] = p;
+ tiw_nto++;
+
+ timer_thread_post_insert(ticks);
+}
+
+void
+erl_set_timer(ErlTimer* p, ErlTimeoutProc timeout, ErlCancelProc cancel,
+ void* arg, Uint t)
+{
+ erts_deliver_time();
+ tiw_write_lock();
+ if (p->active) { /* XXX assert ? */
+ tiw_write_unlock();
+ return;
+ }
+ p->timeout = timeout;
+ p->cancel = cancel;
+ p->arg = arg;
+ p->active = 1;
+ insert_timer(p, t);
+ tiw_write_unlock();
+#if defined(ERTS_SMP) && !defined(ERTS_TIMER_THREAD)
+ if (t <= (Uint) LONG_MAX)
+ erts_sys_schedule_interrupt_timed(1, (long) t);
+#endif
+}
+
+void
+erl_cancel_timer(ErlTimer* p)
+{
+ ErlTimer *tp;
+ ErlTimer **prev;
+
+ tiw_write_lock();
+ if (!p->active) { /* allow repeated cancel (drivers) */
+ tiw_write_unlock();
+ return;
+ }
+ /* find p in linked list at slot p->slot and remove it */
+ prev = &tiw[p->slot];
+ while ((tp = *prev) != NULL) {
+ if (tp == p) {
+ *prev = p->next; /* Remove from list */
+ tiw_nto--;
+ p->next = NULL;
+ p->slot = p->count = 0;
+ p->active = 0;
+ if (p->cancel != NULL) {
+ tiw_write_unlock();
+ (*p->cancel)(p->arg);
+ } else {
+ tiw_write_unlock();
+ }
+ return;
+ } else {
+ prev = &tp->next;
+ }
+ }
+ tiw_write_unlock();
+}
+
+/*
+ Returns the amount of time left in ms until the timer 'p' is triggered.
+ 0 is returned if 'p' isn't active.
+ 0 is returned also if the timer is overdue (i.e., would have triggered
+ immediately if it hadn't been cancelled).
+*/
+Uint
+time_left(ErlTimer *p)
+{
+ Uint left;
+ long dt;
+
+ tiw_read_lock();
+
+ if (!p->active) {
+ tiw_read_unlock();
+ return 0;
+ }
+
+ if (p->slot < tiw_pos)
+ left = (p->count + 1) * TIW_SIZE + p->slot - tiw_pos;
+ else
+ left = p->count * TIW_SIZE + p->slot - tiw_pos;
+ dt = do_time_read();
+ if (left < dt)
+ left = 0;
+ else
+ left -= dt;
+
+ tiw_read_unlock();
+
+ return left * itime;
+}
+
+#ifdef DEBUG
+
+void p_slpq()
+{
+ int i;
+ ErlTimer* p;
+
+ tiw_read_lock();
+
+ /* print the whole wheel, starting at the current position */
+ erts_printf("\ntiw_pos = %d tiw_nto %d\n", tiw_pos, tiw_nto);
+ i = tiw_pos;
+ if (tiw[i] != NULL) {
+ erts_printf("%d:\n", i);
+ for(p = tiw[i]; p != NULL; p = p->next) {
+ erts_printf(" (count %d, slot %d)\n",
+ p->count, p->slot);
+ }
+ }
+ for(i = (i+1)%TIW_SIZE; i != tiw_pos; i = (i+1)%TIW_SIZE) {
+ if (tiw[i] != NULL) {
+ erts_printf("%d:\n", i);
+ for(p = tiw[i]; p != NULL; p = p->next) {
+ erts_printf(" (count %d, slot %d)\n",
+ p->count, p->slot);
+ }
+ }
+ }
+
+ tiw_read_unlock();
+}
+
+#endif /* DEBUG */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
new file mode 100644
index 0000000000..be442fa480
--- /dev/null
+++ b/erts/emulator/beam/utils.c
@@ -0,0 +1,4053 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERTS_DO_INCL_GLB_INLINE_FUNC_DEF
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "erl_process.h"
+#include "big.h"
+#include "bif.h"
+#include "erl_binary.h"
+#include "erl_bits.h"
+#include "packet_parser.h"
+#define ERTS_WANT_DB_INTERNAL__
+#include "erl_db.h"
+#include "erl_threads.h"
+#include "register.h"
+#include "dist.h"
+#include "erl_printf.h"
+#include "erl_threads.h"
+#include "erl_smp.h"
+#include "erl_time.h"
+
+#undef M_TRIM_THRESHOLD
+#undef M_TOP_PAD
+#undef M_MMAP_THRESHOLD
+#undef M_MMAP_MAX
+
+#if !defined(ELIB_ALLOC_IS_CLIB) && defined(__GLIBC__) && defined(HAVE_MALLOC_H)
+#include <malloc.h>
+#endif
+
+#if defined(ELIB_ALLOC_IS_CLIB) || !defined(HAVE_MALLOPT)
+#undef HAVE_MALLOPT
+#define HAVE_MALLOPT 0
+#endif
+
+/* profile_scheduler mini message queue */
+
+#ifdef ERTS_TIMER_THREAD
+/* A timer thread is not welcomed with this lock violation work around.
+ * - Bj�rn-Egil
+ */
+#error Timer thread may not be enabled due to lock violation.
+#endif
+
+typedef struct {
+ Uint scheduler_id;
+ Uint no_schedulers;
+ Uint Ms;
+ Uint s;
+ Uint us;
+ Eterm state;
+} profile_sched_msg;
+
+typedef struct {
+ profile_sched_msg msg[2];
+ Uint n;
+} profile_sched_msg_q;
+
+#ifdef ERTS_SMP
+
+static void
+dispatch_profile_msg_q(profile_sched_msg_q *psmq)
+{
+ int i = 0;
+ profile_sched_msg *msg = NULL;
+ ASSERT(psmq != NULL);
+ for (i = 0; i < psmq->n; i++) {
+ msg = &(psmq->msg[i]);
+ profile_scheduler_q(make_small(msg->scheduler_id), msg->state, am_undefined, msg->Ms, msg->s, msg->us);
+ }
+}
+
+#endif
+
+Eterm*
+erts_heap_alloc(Process* p, Uint need)
+{
+ ErlHeapFragment* bp;
+ Eterm* htop;
+ Uint n;
+#if defined(DEBUG) || defined(CHECK_FOR_HOLES)
+ Uint i;
+#endif
+
+ n = need;
+#ifdef DEBUG
+ n++;
+#endif
+ bp = (ErlHeapFragment*)
+ ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP_FRAG,
+ sizeof(ErlHeapFragment) + ((n-1)*sizeof(Eterm)));
+
+#ifdef DEBUG
+ n--;
+#endif
+
+#if defined(DEBUG)
+ for (i = 0; i <= n; i++) {
+ bp->mem[i] = ERTS_HOLE_MARKER;
+ }
+#elif defined(CHECK_FOR_HOLES)
+ for (i = 0; i < n; i++) {
+ bp->mem[i] = ERTS_HOLE_MARKER;
+ }
+#endif
+
+ /*
+ * When we have created a heap fragment, we are no longer allowed
+ * to store anything more on the heap.
+ */
+ htop = HEAP_TOP(p);
+ if (htop < HEAP_LIMIT(p)) {
+ *htop = make_pos_bignum_header(HEAP_LIMIT(p)-htop-1);
+ HEAP_TOP(p) = HEAP_LIMIT(p);
+ }
+
+ bp->next = MBUF(p);
+ MBUF(p) = bp;
+ bp->size = n;
+ MBUF_SIZE(p) += n;
+ bp->off_heap.mso = NULL;
+#ifndef HYBRID /* FIND ME! */
+ bp->off_heap.funs = NULL;
+#endif
+ bp->off_heap.externals = NULL;
+ bp->off_heap.overhead = 0;
+
+ return bp->mem;
+}
+
+void erts_arith_shrink(Process* p, Eterm* hp)
+{
+#if defined(CHECK_FOR_HOLES)
+ ErlHeapFragment* hf;
+
+ /*
+ * We must find the heap fragment that hp points into.
+ * If we are unlucky, we might have to search through
+ * a large part of the list. We'll hope that will not
+ * happen too often.
+ */
+ for (hf = MBUF(p); hf != 0; hf = hf->next) {
+ if (hp - hf->mem < (unsigned long)hf->size) {
+ /*
+ * We are not allowed to changed hf->size (because the
+ * size must be correct when deallocating). Therefore,
+ * clear out the uninitialized part of the heap fragment.
+ */
+ Eterm* to = hf->mem + hf->size;
+ while (hp < to) {
+ *hp++ = NIL;
+ }
+ break;
+ }
+ }
+#endif
+}
+
+#ifdef CHECK_FOR_HOLES
+Eterm*
+erts_set_hole_marker(Eterm* ptr, Uint sz)
+{
+ Eterm* p = ptr;
+ int i;
+
+ for (i = 0; i < sz; i++) {
+ *p++ = ERTS_HOLE_MARKER;
+ }
+ return ptr;
+}
+#endif
+
+/*
+ * Helper function for the ESTACK macros defined in global.h.
+ */
+void
+erl_grow_stack(Eterm** start, Eterm** sp, Eterm** end)
+{
+ Uint old_size = (*end - *start);
+ Uint new_size = old_size * 2;
+ Uint sp_offs = *sp - *start;
+ if (new_size > 2 * DEF_ESTACK_SIZE) {
+ *start = erts_realloc(ERTS_ALC_T_ESTACK, (void *) *start, new_size*sizeof(Eterm));
+ } else {
+ Eterm* new_ptr = erts_alloc(ERTS_ALC_T_ESTACK, new_size*sizeof(Eterm));
+ sys_memcpy(new_ptr, *start, old_size*sizeof(Eterm));
+ *start = new_ptr;
+ }
+ *end = *start + new_size;
+ *sp = *start + sp_offs;
+}
+
+/* CTYPE macros */
+
+#define LATIN1
+
+#define IS_DIGIT(c) ((c) >= '0' && (c) <= '9')
+#ifdef LATIN1
+#define IS_LOWER(c) (((c) >= 'a' && (c) <= 'z') \
+ || ((c) >= 128+95 && (c) <= 255 && (c) != 247))
+#define IS_UPPER(c) (((c) >= 'A' && (c) <= 'Z') \
+ || ((c) >= 128+64 && (c) <= 128+94 && (c) != 247-32))
+#else
+#define IS_LOWER(c) ((c) >= 'a' && (c) <= 'z')
+#define IS_UPPER(c) ((c) >= 'A' && (c) <= 'Z')
+#endif
+
+#define IS_ALNUM(c) (IS_DIGIT(c) || IS_LOWER(c) || IS_UPPER(c))
+
+/* We don't include 160 (non-breaking space). */
+#define IS_SPACE(c) (c == ' ' || c == '\n' || c == '\t' || c == '\r')
+
+#ifdef LATIN1
+#define IS_CNTRL(c) ((c) < ' ' || (c) == 127 \
+ || ((c) >= 128 && (c) < 128+32))
+#else
+/* Treat all non-ASCII as control characters */
+#define IS_CNTRL(c) ((c) < ' ' || (c) >= 127)
+#endif
+
+#define IS_PRINT(c) (!IS_CNTRL(c))
+
+/*
+ * Calculate length of a list.
+ * Returns -1 if not a proper list (i.e. not terminated with NIL)
+ */
+int
+list_length(Eterm list)
+{
+ int i = 0;
+
+ while(is_list(list)) {
+ i++;
+ list = CDR(list_val(list));
+ }
+ if (is_not_nil(list)) {
+ return -1;
+ }
+ return i;
+}
+
+Uint erts_fit_in_bits(Uint n)
+{
+ Uint i;
+
+ i = 0;
+ while (n > 0) {
+ i++;
+ n >>= 1;
+ }
+ return i;
+}
+
+int
+erts_print(int to, void *arg, char *format, ...)
+{
+ int res;
+ va_list arg_list;
+ va_start(arg_list, format);
+
+ if (to < ERTS_PRINT_MIN)
+ res = -EINVAL;
+ else {
+ switch (to) {
+ case ERTS_PRINT_STDOUT:
+ res = erts_vprintf(format, arg_list);
+ break;
+ case ERTS_PRINT_STDERR:
+ res = erts_vfprintf(stderr, format, arg_list);
+ break;
+ case ERTS_PRINT_FILE:
+ res = erts_vfprintf((FILE *) arg, format, arg_list);
+ break;
+ case ERTS_PRINT_SBUF:
+ res = erts_vsprintf((char *) arg, format, arg_list);
+ break;
+ case ERTS_PRINT_SNBUF:
+ res = erts_vsnprintf(((erts_print_sn_buf *) arg)->buf,
+ ((erts_print_sn_buf *) arg)->size,
+ format,
+ arg_list);
+ break;
+ case ERTS_PRINT_DSBUF:
+ res = erts_vdsprintf((erts_dsprintf_buf_t *) arg, format, arg_list);
+ break;
+ case ERTS_PRINT_INVALID:
+ res = -EINVAL;
+ break;
+ default:
+ res = erts_vfdprintf((int) to, format, arg_list);
+ break;
+ }
+ }
+
+ va_end(arg_list);
+ return res;
+}
+
+int
+erts_putc(int to, void *arg, char c)
+{
+ return erts_print(to, arg, "%c", c);
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Some Erlang term building utility functions (to be used when performance *
+ * isn't critical). *
+ * *
+ * Add more functions like these here (and function prototypes in global.h) *
+ * when needed. *
+ * *
+\* */
+
+Eterm
+erts_bld_atom(Uint **hpp, Uint *szp, char *str)
+{
+ if (hpp)
+ return am_atom_put(str, sys_strlen(str));
+ else
+ return THE_NON_VALUE;
+}
+
+Eterm
+erts_bld_uint(Uint **hpp, Uint *szp, Uint ui)
+{
+ Eterm res = THE_NON_VALUE;
+ if (IS_USMALL(0, ui)) {
+ if (hpp)
+ res = make_small(ui);
+ }
+ else {
+ if (szp)
+ *szp += BIG_UINT_HEAP_SIZE;
+ if (hpp) {
+ res = uint_to_big(ui, *hpp);
+ *hpp += BIG_UINT_HEAP_SIZE;
+ }
+ }
+ return res;
+}
+
+Eterm
+erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64)
+{
+ Eterm res = THE_NON_VALUE;
+ if (IS_USMALL(0, ui64)) {
+ if (hpp)
+ res = make_small((Uint) ui64);
+ }
+ else {
+ if (szp)
+ *szp = ERTS_UINT64_HEAP_SIZE(ui64);
+ if (hpp)
+ res = erts_uint64_to_big(ui64, hpp);
+ }
+ return res;
+}
+
+Eterm
+erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64)
+{
+ Eterm res = THE_NON_VALUE;
+ if (IS_SSMALL(si64)) {
+ if (hpp)
+ res = make_small((Sint) si64);
+ }
+ else {
+ if (szp)
+ *szp = ERTS_SINT64_HEAP_SIZE(si64);
+ if (hpp)
+ res = erts_sint64_to_big(si64, hpp);
+ }
+ return res;
+}
+
+
+Eterm
+erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr)
+{
+ Eterm res = THE_NON_VALUE;
+ if (szp)
+ *szp += 2;
+ if (hpp) {
+ res = CONS(*hpp, car, cdr);
+ *hpp += 2;
+ }
+ return res;
+}
+
+Eterm
+erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...)
+{
+ Eterm res = THE_NON_VALUE;
+
+ ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
+
+ if (szp)
+ *szp += arity + 1;
+ if (hpp) {
+ res = make_tuple(*hpp);
+ *((*hpp)++) = make_arityval(arity);
+
+ if (arity > 0) {
+ Uint i;
+ va_list argp;
+
+ va_start(argp, arity);
+ for (i = 0; i < arity; i++) {
+ *((*hpp)++) = va_arg(argp, Eterm);
+ }
+ va_end(argp);
+ }
+ }
+ return res;
+}
+
+
+Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[])
+{
+ Eterm res = THE_NON_VALUE;
+ /*
+ * Note callers expect that 'terms' is *not* accessed if hpp == NULL.
+ */
+
+ ASSERT(arity < (((Uint)1) << (sizeof(Uint)*8 - _HEADER_ARITY_OFFS)));
+
+ if (szp)
+ *szp += arity + 1;
+ if (hpp) {
+
+ res = make_tuple(*hpp);
+ *((*hpp)++) = make_arityval(arity);
+
+ if (arity > 0) {
+ Uint i;
+ for (i = 0; i < arity; i++)
+ *((*hpp)++) = terms[i];
+ }
+ }
+ return res;
+}
+
+Eterm
+erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len)
+{
+ Eterm res = THE_NON_VALUE;
+ Sint i = len;
+ if (szp)
+ *szp += len*2;
+ if (hpp) {
+ res = NIL;
+ while (--i >= 0) {
+ res = CONS(*hpp, make_small(str[i]), res);
+ *hpp += 2;
+ }
+ }
+ return res;
+}
+
+Eterm
+erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[])
+{
+ Eterm list = THE_NON_VALUE;
+ if (szp)
+ *szp += 2*length;
+ if (hpp) {
+ Sint i = length;
+ list = NIL;
+
+ while (--i >= 0) {
+ list = CONS(*hpp, terms[i], list);
+ *hpp += 2;
+ }
+ }
+ return list;
+}
+
+Eterm
+erts_bld_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm terms1[], Uint terms2[])
+{
+ Eterm res = THE_NON_VALUE;
+ if (szp)
+ *szp += 5*length;
+ if (hpp) {
+ Sint i = length;
+ res = NIL;
+
+ while (--i >= 0) {
+ res = CONS(*hpp+3, TUPLE2(*hpp, terms1[i], terms2[i]), res);
+ *hpp += 5;
+ }
+ }
+ return res;
+}
+
+Eterm
+erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm atoms[], Uint uints[])
+{
+ Sint i;
+ Eterm res = THE_NON_VALUE;
+ if (szp) {
+ *szp += 5*length;
+ i = length;
+ while (--i >= 0) {
+ if (!IS_USMALL(0, uints[i]))
+ *szp += BIG_UINT_HEAP_SIZE;
+ }
+ }
+ if (hpp) {
+ i = length;
+ res = NIL;
+
+ while (--i >= 0) {
+ Eterm ui;
+
+ if (IS_USMALL(0, uints[i]))
+ ui = make_small(uints[i]);
+ else {
+ ui = uint_to_big(uints[i], *hpp);
+ *hpp += BIG_UINT_HEAP_SIZE;
+ }
+
+ res = CONS(*hpp+3, TUPLE2(*hpp, atoms[i], ui), res);
+ *hpp += 5;
+ }
+ }
+ return res;
+}
+
+Eterm
+erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
+ Eterm atoms[], Uint uints1[], Uint uints2[])
+{
+ Sint i;
+ Eterm res = THE_NON_VALUE;
+ if (szp) {
+ *szp += 6*length;
+ i = length;
+ while (--i >= 0) {
+ if (!IS_USMALL(0, uints1[i]))
+ *szp += BIG_UINT_HEAP_SIZE;
+ if (!IS_USMALL(0, uints2[i]))
+ *szp += BIG_UINT_HEAP_SIZE;
+ }
+ }
+ if (hpp) {
+ i = length;
+ res = NIL;
+
+ while (--i >= 0) {
+ Eterm ui1;
+ Eterm ui2;
+
+ if (IS_USMALL(0, uints1[i]))
+ ui1 = make_small(uints1[i]);
+ else {
+ ui1 = uint_to_big(uints1[i], *hpp);
+ *hpp += BIG_UINT_HEAP_SIZE;
+ }
+
+ if (IS_USMALL(0, uints2[i]))
+ ui2 = make_small(uints2[i]);
+ else {
+ ui2 = uint_to_big(uints2[i], *hpp);
+ *hpp += BIG_UINT_HEAP_SIZE;
+ }
+
+ res = CONS(*hpp+4, TUPLE3(*hpp, atoms[i], ui1, ui2), res);
+ *hpp += 6;
+ }
+ }
+ return res;
+}
+
+/* *\
+ * *
+\* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* make a hash index from an erlang term */
+
+/*
+** There are three hash functions.
+** make_broken_hash: the one used for backward compatibility
+** is called from the bif erlang:hash/2. Should never be used
+** as it a) hashes only a part of binaries, b) hashes bignums really poorly,
+** c) hashes bignums differently on different endian processors and d) hashes
+** small integers with different weights on different bytes.
+**
+** make_hash: A hash function that will give the same values for the same
+** terms regardless of the internal representation. Small integers are
+** hashed using the same algorithm as bignums and bignums are hashed
+** independent of the CPU endianess.
+** Make_hash also hashes pids, ports and references like 32 bit numbers
+** (but with different constants).
+** make_hash() is called from the bif erlang:phash/2
+**
+** The idea behind the hash algorithm is to produce values suitable for
+** linear dynamic hashing. We cannot choose the range at all while hashing
+** (it's not even supplied to the hashing functions). The good old algorithm
+** [H = H*C+X mod M, where H is the hash value, C is a "random" constant(or M),
+** M is the range, preferably a prime, and X is each byte value] is therefore
+** modified to:
+** H = H*C+X mod 2^32, where C is a large prime. This gives acceptable
+** "spreading" of the hashes, so that later modulo calculations also will give
+** acceptable "spreading" in the range.
+** We really need to hash on bytes, otherwise the
+** upper bytes of a word will be less significant than the lower ones. That's
+** not acceptable at all. For internal use one could maybe optimize by using
+** another hash function, that is less strict but faster. That is, however, not
+** implemented.
+**
+** Short semi-formal description of make_hash:
+**
+** In make_hash, the number N is treated like this:
+** Abs(N) is hashed bytewise with the least significant byte, B(0), first.
+** The number of bytes (J) to calculate hash on in N is
+** (the number of _32_ bit words needed to store the unsigned
+** value of abs(N)) * 4.
+** X = FUNNY_NUMBER2
+** If N < 0, Y = FUNNY_NUMBER4 else Y = FUNNY_NUMBER3.
+** The hash value is Y*h(J) mod 2^32 where h(J) is calculated like
+** h(0) = <initial hash>
+** h(i) = h(i-i)*X + B(i-1)
+** The above should hold regardless of internal representation.
+** Pids are hashed like small numbers but with differrent constants, as are
+** ports.
+** References are hashed like ports but only on the least significant byte.
+** Binaries are hashed on all bytes (not on the 15 first as in
+** make_broken_hash()).
+** Bytes in lists (possibly text strings) use a simpler multiplication inlined
+** in the handling of lists, that is an optimization.
+** Everything else is like in the old hash (make_broken_hash()).
+**
+** make_hash2() is faster than make_hash, in particular for bignums
+** and binaries, and produces better hash values.
+*/
+
+/* some prime numbers just above 2 ^ 28 */
+
+#define FUNNY_NUMBER1 268440163
+#define FUNNY_NUMBER2 268439161
+#define FUNNY_NUMBER3 268435459
+#define FUNNY_NUMBER4 268436141
+#define FUNNY_NUMBER5 268438633
+#define FUNNY_NUMBER6 268437017
+#define FUNNY_NUMBER7 268438039
+#define FUNNY_NUMBER8 268437511
+#define FUNNY_NUMBER9 268439627
+#define FUNNY_NUMBER10 268440479
+#define FUNNY_NUMBER11 268440577
+#define FUNNY_NUMBER12 268440581
+
+static Uint32
+hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash)
+{
+ byte* ptr;
+ Uint bitoffs;
+ Uint bitsize;
+
+ ERTS_GET_BINARY_BYTES(bin, ptr, bitoffs, bitsize);
+ if (bitoffs == 0) {
+ while (sz--) {
+ hash = hash*FUNNY_NUMBER1 + *ptr++;
+ }
+ if (bitsize > 0) {
+ byte b = *ptr;
+
+ b >>= 8 - bitsize;
+ hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
+ }
+ } else {
+ Uint previous = *ptr++;
+ Uint b;
+ Uint lshift = bitoffs;
+ Uint rshift = 8 - lshift;
+
+ while (sz--) {
+ b = (previous << lshift) & 0xFF;
+ previous = *ptr++;
+ b |= previous >> rshift;
+ hash = hash*FUNNY_NUMBER1 + b;
+ }
+ if (bitsize > 0) {
+ b = (previous << lshift) & 0xFF;
+ previous = *ptr++;
+ b |= previous >> rshift;
+
+ b >>= 8 - bitsize;
+ hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize;
+ }
+ }
+ return hash;
+}
+
+Uint32 make_hash(Eterm term_arg)
+{
+ DECLARE_ESTACK(stack);
+ Eterm term = term_arg;
+ Eterm hash = 0;
+ unsigned op;
+
+ /* Must not collide with the real tag_val_def's: */
+#define MAKE_HASH_TUPLE_OP 0x10
+#define MAKE_HASH_FUN_OP 0x11
+#define MAKE_HASH_CDR_PRE_OP 0x12
+#define MAKE_HASH_CDR_POST_OP 0x13
+
+ /*
+ ** Convenience macro for calculating a bytewise hash on an unsigned 32 bit
+ ** integer.
+ ** If the endianess is known, we could be smarter here,
+ ** but that gives no significant speedup (on a sparc at least)
+ */
+#define UINT32_HASH_STEP(Expr, Prime1) \
+ do { \
+ Uint32 x = (Uint32) (Expr); \
+ hash = \
+ (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \
+ ((x >> 8) & 0xFF)) * (Prime1) + \
+ ((x >> 16) & 0xFF)) * (Prime1) + \
+ (x >> 24)); \
+ } while(0)
+
+#define UINT32_HASH_RET(Expr, Prime1, Prime2) \
+ UINT32_HASH_STEP(Expr, Prime1); \
+ hash = hash * (Prime2); \
+ break
+
+
+ /*
+ * Significant additions needed for real 64 bit port with larger fixnums.
+ */
+
+ /*
+ * Note, for the simple 64bit port, not utilizing the
+ * larger word size this function will work without modification.
+ */
+tail_recur:
+ op = tag_val_def(term);
+
+ for (;;) {
+ switch (op) {
+ case NIL_DEF:
+ hash = hash*FUNNY_NUMBER3 + 1;
+ break;
+ case ATOM_DEF:
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(term))->slot.bucket.hvalue);
+ break;
+ case SMALL_DEF:
+ {
+ Sint y1 = signed_val(term);
+ Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
+
+ UINT32_HASH_STEP(y2, FUNNY_NUMBER2);
+#ifdef ARCH_64
+ if (y2 >> 32)
+ UINT32_HASH_STEP(y2 >> 32, FUNNY_NUMBER2);
+#endif
+ hash *= (y1 < 0 ? FUNNY_NUMBER4 : FUNNY_NUMBER3);
+ break;
+ }
+ case BINARY_DEF:
+ {
+ Uint sz = binary_size(term);
+
+ hash = hash_binary_bytes(term, sz, hash);
+ hash = hash*FUNNY_NUMBER4 + sz;
+ break;
+ }
+ case EXPORT_DEF:
+ {
+ Export* ep = (Export *) (export_val(term))[1];
+
+ hash = hash * FUNNY_NUMBER11 + ep->code[2];
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
+ break;
+ }
+
+ case FUN_DEF:
+ {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(term);
+ Uint num_free = funp->num_free;
+
+ hash = hash * FUNNY_NUMBER10 + num_free;
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
+ hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
+ hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
+ if (num_free > 0) {
+ if (num_free > 1) {
+ ESTACK_PUSH3(stack, (Eterm) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
+ }
+ term = funp->env[0];
+ goto tail_recur;
+ }
+ break;
+ }
+ case PID_DEF:
+ UINT32_HASH_RET(internal_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
+ case EXTERNAL_PID_DEF:
+ UINT32_HASH_RET(external_pid_number(term),FUNNY_NUMBER5,FUNNY_NUMBER6);
+ case PORT_DEF:
+ UINT32_HASH_RET(internal_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
+ case EXTERNAL_PORT_DEF:
+ UINT32_HASH_RET(external_port_number(term),FUNNY_NUMBER9,FUNNY_NUMBER10);
+ case REF_DEF:
+ UINT32_HASH_RET(internal_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
+ case EXTERNAL_REF_DEF:
+ UINT32_HASH_RET(external_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10);
+ case FLOAT_DEF:
+ {
+ FloatDef ff;
+ GET_DOUBLE(term, ff);
+ hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
+ break;
+ }
+
+ case MAKE_HASH_CDR_PRE_OP:
+ term = ESTACK_POP(stack);
+ if (is_not_list(term)) {
+ ESTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
+ goto tail_recur;
+ }
+ /* fall through */
+ case LIST_DEF:
+ {
+ Eterm* list = list_val(term);
+ while(is_byte(*list)) {
+ /* Optimization for strings.
+ ** Note that this hash is different from a 'small' hash,
+ ** as multiplications on a Sparc is so slow.
+ */
+ hash = hash*FUNNY_NUMBER2 + unsigned_val(*list);
+
+ if (is_not_list(CDR(list))) {
+ ESTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
+ term = CDR(list);
+ goto tail_recur;
+ }
+ list = list_val(CDR(list));
+ }
+ ESTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
+ term = CAR(list);
+ goto tail_recur;
+ }
+ case MAKE_HASH_CDR_POST_OP:
+ hash *= FUNNY_NUMBER8;
+ break;
+
+ case BIG_DEF:
+ /* Note that this is the exact same thing as the hashing of smalls.*/
+ {
+ Eterm* ptr = big_val(term);
+ Uint n = BIG_SIZE(ptr);
+ Uint k = n-1;
+ ErtsDigit d;
+ int is_neg = BIG_SIGN(ptr);
+ Uint i;
+ int j;
+
+ for (i = 0; i < k; i++) {
+ d = BIG_DIGIT(ptr, i);
+ for(j = 0; j < sizeof(ErtsDigit); ++j) {
+ hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
+ d >>= 8;
+ }
+ }
+ d = BIG_DIGIT(ptr, k);
+ k = sizeof(ErtsDigit);
+#ifdef ARCH_64
+ if (!(d >> 32))
+ k /= 2;
+#endif
+ for(j = 0; j < (int)k; ++j) {
+ hash = (hash*FUNNY_NUMBER2) + (d & 0xff);
+ d >>= 8;
+ }
+ hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3;
+ break;
+ }
+ case TUPLE_DEF:
+ {
+ Eterm* ptr = tuple_val(term);
+ Uint arity = arityval(*ptr);
+
+ ESTACK_PUSH3(stack, arity, (Eterm)(ptr+1), arity);
+ op = MAKE_HASH_TUPLE_OP;
+ }/*fall through*/
+ case MAKE_HASH_TUPLE_OP:
+ case MAKE_HASH_FUN_OP:
+ {
+ Uint i = ESTACK_POP(stack);
+ Eterm* ptr = (Eterm*) ESTACK_POP(stack);
+ if (i != 0) {
+ term = *ptr;
+ ESTACK_PUSH3(stack, (Eterm)(ptr+1), i-1, op);
+ goto tail_recur;
+ }
+ if (op == MAKE_HASH_TUPLE_OP) {
+ Uint32 arity = ESTACK_POP(stack);
+ hash = hash*FUNNY_NUMBER9 + arity;
+ }
+ break;
+ }
+
+ default:
+ erl_exit(1, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op);
+ return 0;
+ }
+ if (ESTACK_ISEMPTY(stack)) break;
+ op = ESTACK_POP(stack);
+ }
+ DESTROY_ESTACK(stack);
+ return hash;
+
+#undef UINT32_HASH_STEP
+#undef UINT32_HASH_RET
+}
+
+
+
+/* Hash function suggested by Bob Jenkins. */
+
+#define MIX(a,b,c) \
+do { \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<<8); \
+ c -= a; c -= b; c ^= (b>>13); \
+ a -= b; a -= c; a ^= (c>>12); \
+ b -= c; b -= a; b ^= (a<<16); \
+ c -= a; c -= b; c ^= (b>>5); \
+ a -= b; a -= c; a ^= (c>>3); \
+ b -= c; b -= a; b ^= (a<<10); \
+ c -= a; c -= b; c ^= (b>>15); \
+} while(0)
+
+#define HCONST 0x9e3779b9UL /* the golden ratio; an arbitrary value */
+
+Uint32
+block_hash(byte *k, unsigned length, Uint32 initval)
+{
+ Uint32 a,b,c;
+ unsigned len;
+
+ /* Set up the internal state */
+ len = length;
+ a = b = HCONST;
+ c = initval; /* the previous hash value */
+
+ while (len >= 12)
+ {
+ a += (k[0] +((Uint32)k[1]<<8) +((Uint32)k[2]<<16) +((Uint32)k[3]<<24));
+ b += (k[4] +((Uint32)k[5]<<8) +((Uint32)k[6]<<16) +((Uint32)k[7]<<24));
+ c += (k[8] +((Uint32)k[9]<<8) +((Uint32)k[10]<<16)+((Uint32)k[11]<<24));
+ MIX(a,b,c);
+ k += 12; len -= 12;
+ }
+
+ c += length;
+ switch(len) /* all the case statements fall through */
+ {
+ case 11: c+=((Uint32)k[10]<<24);
+ case 10: c+=((Uint32)k[9]<<16);
+ case 9 : c+=((Uint32)k[8]<<8);
+ /* the first byte of c is reserved for the length */
+ case 8 : b+=((Uint32)k[7]<<24);
+ case 7 : b+=((Uint32)k[6]<<16);
+ case 6 : b+=((Uint32)k[5]<<8);
+ case 5 : b+=k[4];
+ case 4 : a+=((Uint32)k[3]<<24);
+ case 3 : a+=((Uint32)k[2]<<16);
+ case 2 : a+=((Uint32)k[1]<<8);
+ case 1 : a+=k[0];
+ /* case 0: nothing left to add */
+ }
+ MIX(a,b,c);
+ return c;
+}
+
+Uint32
+make_hash2(Eterm term)
+{
+ Uint32 hash;
+ Eterm tmp_big[2];
+
+/* (HCONST * {2, ..., 14}) mod 2^32 */
+#define HCONST_2 0x3c6ef372UL
+#define HCONST_3 0xdaa66d2bUL
+#define HCONST_4 0x78dde6e4UL
+#define HCONST_5 0x1715609dUL
+#define HCONST_6 0xb54cda56UL
+#define HCONST_7 0x5384540fUL
+#define HCONST_8 0xf1bbcdc8UL
+#define HCONST_9 0x8ff34781UL
+#define HCONST_10 0x2e2ac13aUL
+#define HCONST_11 0xcc623af3UL
+#define HCONST_12 0x6a99b4acUL
+#define HCONST_13 0x08d12e65UL
+#define HCONST_14 0xa708a81eUL
+#define HCONST_15 0x454021d7UL
+
+#define UINT32_HASH_2(Expr1, Expr2, AConst) \
+ do { \
+ Uint32 a,b; \
+ a = AConst + (Uint32) (Expr1); \
+ b = AConst + (Uint32) (Expr2); \
+ MIX(a,b,hash); \
+ } while(0)
+
+#define UINT32_HASH(Expr, AConst) UINT32_HASH_2(Expr, 0, AConst)
+
+#define SINT32_HASH(Expr, AConst) \
+ do { \
+ Sint32 y = (Sint32) (Expr); \
+ if (y < 0) { \
+ UINT32_HASH(-y, AConst); \
+ /* Negative numbers are unnecessarily mixed twice. */ \
+ } \
+ UINT32_HASH(y, AConst); \
+ } while(0)
+
+#define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2)
+
+ /* Optimization. Simple cases before declaration of estack. */
+ if (primary_tag(term) == TAG_PRIMARY_IMMED1) {
+ switch (term & _TAG_IMMED1_MASK) {
+ case _TAG_IMMED1_IMMED2:
+ switch (term & _TAG_IMMED2_MASK) {
+ case _TAG_IMMED2_ATOM:
+ /* Fast, but the poor hash value should be mixed. */
+ return atom_tab(atom_val(term))->slot.bucket.hvalue;
+ }
+ break;
+ case _TAG_IMMED1_SMALL:
+ {
+ Sint x = signed_val(term);
+
+ if (SMALL_BITS > 28 && !IS_SSMALL28(x)) {
+ term = small_to_big(x, tmp_big);
+ break;
+ }
+ hash = 0;
+ SINT32_HASH(x, HCONST);
+ return hash;
+ }
+ }
+ };
+ {
+ Eterm tmp;
+ DECLARE_ESTACK(s);
+
+ hash = 0;
+ for (;;) {
+ switch (primary_tag(term)) {
+ case TAG_PRIMARY_LIST:
+ {
+ int c = 0;
+ Uint32 sh = 0;
+ Eterm* ptr = list_val(term);
+ while (is_byte(*ptr)) {
+ /* Optimization for strings. */
+ sh = (sh << 8) + unsigned_val(*ptr);
+ if (c == 3) {
+ UINT32_HASH(sh, HCONST_4);
+ c = sh = 0;
+ } else {
+ c++;
+ }
+ term = CDR(ptr);
+ if (is_not_list(term))
+ break;
+ ptr = list_val(term);
+ }
+ if (c > 0)
+ UINT32_HASH(sh, HCONST_4);
+ if (is_list(term)) {
+ term = *ptr;
+ tmp = *++ptr;
+ ESTACK_PUSH(s, tmp);
+ }
+ }
+ break;
+ case TAG_PRIMARY_BOXED:
+ {
+ Eterm hdr = *boxed_val(term);
+ ASSERT(is_header(hdr));
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG:
+ {
+ int i;
+ int arity = header_arity(hdr);
+ Eterm* elem = tuple_val(term);
+ UINT32_HASH(arity, HCONST_9);
+ if (arity == 0) /* Empty tuple */
+ goto hash2_common;
+ for (i = arity; i >= 2; i--) {
+ tmp = elem[i];
+ ESTACK_PUSH(s, tmp);
+ }
+ term = elem[1];
+ }
+ break;
+ case EXPORT_SUBTAG:
+ {
+ Export* ep = (Export *) (export_val(term))[1];
+
+ UINT32_HASH_2
+ (ep->code[2],
+ atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue,
+ HCONST);
+ UINT32_HASH
+ (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue,
+ HCONST_14);
+ goto hash2_common;
+ }
+
+ case FUN_SUBTAG:
+ {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(term);
+ Uint num_free = funp->num_free;
+
+ UINT32_HASH_2
+ (num_free,
+ atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue,
+ HCONST);
+ UINT32_HASH_2
+ (funp->fe->old_index, funp->fe->old_uniq, HCONST);
+ if (num_free == 0) {
+ goto hash2_common;
+ } else {
+ Eterm* bptr = funp->env + num_free - 1;
+ while (num_free-- > 1) {
+ term = *bptr--;
+ ESTACK_PUSH(s, term);
+ }
+ term = *bptr;
+ }
+ }
+ break;
+ case REFC_BINARY_SUBTAG:
+ case HEAP_BINARY_SUBTAG:
+ case SUB_BINARY_SUBTAG:
+ {
+ byte* bptr;
+ unsigned sz = binary_size(term);
+ Uint32 con = HCONST_13 + hash;
+ Uint bitoffs;
+ Uint bitsize;
+
+ ERTS_GET_BINARY_BYTES(term, bptr, bitoffs, bitsize);
+ if (sz == 0 && bitsize == 0) {
+ hash = con;
+ } else {
+ if (bitoffs == 0) {
+ hash = block_hash(bptr, sz, con);
+ if (bitsize > 0) {
+ UINT32_HASH_2(bitsize, (bptr[sz] >> (8 - bitsize)),
+ HCONST_15);
+ }
+ } else {
+ byte* buf = (byte *) erts_alloc(ERTS_ALC_T_TMP,
+ sz + (bitsize != 0));
+ erts_copy_bits(bptr, bitoffs, 1, buf, 0, 1, sz*8+bitsize);
+ hash = block_hash(buf, sz, con);
+ if (bitsize > 0) {
+ UINT32_HASH_2(bitsize, (buf[sz] >> (8 - bitsize)),
+ HCONST_15);
+ }
+ erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ }
+ }
+ goto hash2_common;
+ }
+ break;
+ case POS_BIG_SUBTAG:
+ case NEG_BIG_SUBTAG:
+ {
+ Eterm* ptr = big_val(term);
+ Uint i = 0;
+ Uint n = BIG_SIZE(ptr);
+ Uint32 con = BIG_SIGN(ptr) ? HCONST_10 : HCONST_11;
+#if D_EXP == 16
+ do {
+ Uint32 x, y;
+ x = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ x += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
+ y = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ y += (Uint32)(i < n ? BIG_DIGIT(ptr, i++) : 0) << 16;
+ UINT32_HASH_2(x, y, con);
+ } while (i < n);
+#elif D_EXP == 32
+ do {
+ Uint32 x, y;
+ x = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ y = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ UINT32_HASH_2(x, y, con);
+ } while (i < n);
+#elif D_EXP == 64
+ do {
+ Uint t;
+ Uint32 x, y;
+ t = i < n ? BIG_DIGIT(ptr, i++) : 0;
+ x = t & 0xffffffff;
+ y = t >> 32;
+ UINT32_HASH_2(x, y, con);
+ } while (i < n);
+#else
+#error "unsupported D_EXP size"
+#endif
+ goto hash2_common;
+ }
+ break;
+ case REF_SUBTAG:
+ /* All parts of the ref should be hashed. */
+ UINT32_HASH(internal_ref_numbers(term)[0], HCONST_7);
+ goto hash2_common;
+ break;
+ case EXTERNAL_REF_SUBTAG:
+ /* All parts of the ref should be hashed. */
+ UINT32_HASH(external_ref_numbers(term)[0], HCONST_7);
+ goto hash2_common;
+ break;
+ case EXTERNAL_PID_SUBTAG:
+ /* Only 15 bits are hashed. */
+ UINT32_HASH(external_pid_number(term), HCONST_5);
+ goto hash2_common;
+ case EXTERNAL_PORT_SUBTAG:
+ /* Only 15 bits are hashed. */
+ UINT32_HASH(external_port_number(term), HCONST_6);
+ goto hash2_common;
+ case FLOAT_SUBTAG:
+ {
+ FloatDef ff;
+ GET_DOUBLE(term, ff);
+#if defined(WORDS_BIGENDIAN)
+ UINT32_HASH_2(ff.fw[0], ff.fw[1], HCONST_12);
+#else
+ UINT32_HASH_2(ff.fw[1], ff.fw[0], HCONST_12);
+#endif
+ goto hash2_common;
+ }
+ break;
+
+ default:
+ erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
+ }
+ }
+ break;
+ case TAG_PRIMARY_IMMED1:
+ switch (term & _TAG_IMMED1_MASK) {
+ case _TAG_IMMED1_PID:
+ /* Only 15 bits are hashed. */
+ UINT32_HASH(internal_pid_number(term), HCONST_5);
+ goto hash2_common;
+ case _TAG_IMMED1_PORT:
+ /* Only 15 bits are hashed. */
+ UINT32_HASH(internal_port_number(term), HCONST_6);
+ goto hash2_common;
+ case _TAG_IMMED1_IMMED2:
+ switch (term & _TAG_IMMED2_MASK) {
+ case _TAG_IMMED2_ATOM:
+ if (hash == 0)
+ /* Fast, but the poor hash value should be mixed. */
+ hash = atom_tab(atom_val(term))->slot.bucket.hvalue;
+ else
+ UINT32_HASH(atom_tab(atom_val(term))->slot.bucket.hvalue,
+ HCONST_3);
+ goto hash2_common;
+ case _TAG_IMMED2_NIL:
+ if (hash == 0)
+ hash = 3468870702UL;
+ else
+ UINT32_HASH(NIL_DEF, HCONST_2);
+ goto hash2_common;
+ default:
+ erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
+ }
+ case _TAG_IMMED1_SMALL:
+ {
+ Sint x = signed_val(term);
+
+ if (SMALL_BITS > 28 && !IS_SSMALL28(x)) {
+ term = small_to_big(x, tmp_big);
+ break;
+ }
+ SINT32_HASH(x, HCONST);
+ goto hash2_common;
+ }
+ }
+ break;
+ default:
+ erl_exit(1, "Invalid tag in make_hash2(0x%X)\n", term);
+ hash2_common:
+ if (ESTACK_ISEMPTY(s)) {
+ DESTROY_ESTACK(s);
+ return hash;
+ }
+ term = ESTACK_POP(s);
+ }
+ }
+ }
+#undef UINT32_HASH_2
+#undef UINT32_HASH
+#undef SINT32_HASH
+}
+
+#undef HCONST
+#undef MIX
+
+
+Uint32 make_broken_hash(Eterm term)
+{
+ Uint32 hash = 0;
+ DECLARE_ESTACK(stack);
+ unsigned op;
+tail_recur:
+ op = tag_val_def(term);
+ for (;;) {
+ switch (op) {
+ case NIL_DEF:
+ hash = hash*FUNNY_NUMBER3 + 1;
+ break;
+ case ATOM_DEF:
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(term))->slot.bucket.hvalue);
+ break;
+ case SMALL_DEF:
+#ifdef ARCH_64
+ {
+ Sint y1 = signed_val(term);
+ Uint y2 = y1 < 0 ? -(Uint)y1 : y1;
+ Uint32 y3 = (Uint32) (y2 >> 32);
+ int arity = 1;
+
+#if defined(WORDS_BIGENDIAN)
+ if (!IS_SSMALL28(y1))
+ { /* like a bignum */
+ Uint32 y4 = (Uint32) y2;
+ hash = hash*FUNNY_NUMBER2 + ((y4 << 16) | (y4 >> 16));
+ if (y3)
+ {
+ hash = hash*FUNNY_NUMBER2 + ((y3 << 16) | (y3 >> 16));
+ arity++;
+ }
+ hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
+ } else {
+ hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
+ }
+#else
+ if (!IS_SSMALL28(y1))
+ { /* like a bignum */
+ hash = hash*FUNNY_NUMBER2 + ((Uint32) y2);
+ if (y3)
+ {
+ hash = hash*FUNNY_NUMBER2 + y3;
+ arity++;
+ }
+ hash = hash * (y1 < 0 ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
+ } else {
+ hash = hash*FUNNY_NUMBER2 + (((Uint) y1) & 0xfffffff);
+ }
+#endif
+ }
+#else
+ hash = hash*FUNNY_NUMBER2 + unsigned_val(term);
+#endif
+ break;
+
+ case BINARY_DEF:
+ {
+ size_t sz = binary_size(term);
+ size_t i = (sz < 15) ? sz : 15;
+
+ hash = hash_binary_bytes(term, i, hash);
+ hash = hash*FUNNY_NUMBER4 + sz;
+ break;
+ }
+
+ case EXPORT_DEF:
+ {
+ Export* ep = (Export *) (export_val(term))[1];
+
+ hash = hash * FUNNY_NUMBER11 + ep->code[2];
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue);
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue);
+ break;
+ }
+
+ case FUN_DEF:
+ {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(term);
+ Uint num_free = funp->num_free;
+
+ hash = hash * FUNNY_NUMBER10 + num_free;
+ hash = hash*FUNNY_NUMBER1 +
+ (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue);
+ hash = hash*FUNNY_NUMBER2 + funp->fe->old_index;
+ hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq;
+ if (num_free > 0) {
+ if (num_free > 1) {
+ ESTACK_PUSH3(stack, (Eterm) &funp->env[1], (num_free-1), MAKE_HASH_FUN_OP);
+ }
+ term = funp->env[0];
+ goto tail_recur;
+ }
+ break;
+ }
+
+ case PID_DEF:
+ hash = hash*FUNNY_NUMBER5 + internal_pid_number(term);
+ break;
+ case EXTERNAL_PID_DEF:
+ hash = hash*FUNNY_NUMBER5 + external_pid_number(term);
+ break;
+ case PORT_DEF:
+ hash = hash*FUNNY_NUMBER9 + internal_port_number(term);
+ break;
+ case EXTERNAL_PORT_DEF:
+ hash = hash*FUNNY_NUMBER9 + external_port_number(term);
+ break;
+ case REF_DEF:
+ hash = hash*FUNNY_NUMBER9 + internal_ref_numbers(term)[0];
+ break;
+ case EXTERNAL_REF_DEF:
+ hash = hash*FUNNY_NUMBER9 + external_ref_numbers(term)[0];
+ break;
+ case FLOAT_DEF:
+ {
+ FloatDef ff;
+ GET_DOUBLE(term, ff);
+ hash = hash*FUNNY_NUMBER6 + (ff.fw[0] ^ ff.fw[1]);
+ }
+ break;
+
+ case MAKE_HASH_CDR_PRE_OP:
+ term = ESTACK_POP(stack);
+ if (is_not_list(term)) {
+ ESTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP);
+ goto tail_recur;
+ }
+ /*fall through*/
+ case LIST_DEF:
+ {
+ Eterm* list = list_val(term);
+ ESTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP);
+ term = CAR(list);
+ goto tail_recur;
+ }
+
+ case MAKE_HASH_CDR_POST_OP:
+ hash *= FUNNY_NUMBER8;
+ break;
+
+ case BIG_DEF:
+ {
+ Eterm* ptr = big_val(term);
+ int is_neg = BIG_SIGN(ptr);
+ Uint arity = BIG_ARITY(ptr);
+ Uint i = arity;
+ ptr++;
+#if D_EXP == 16
+ /* hash over 32 bit LE */
+
+ while(i--) {
+ hash = hash*FUNNY_NUMBER2 + *ptr++;
+ }
+#elif D_EXP == 32
+
+#if defined(WORDS_BIGENDIAN)
+ while(i--) {
+ Uint d = *ptr++;
+ hash = hash*FUNNY_NUMBER2 + ((d << 16) | (d >> 16));
+ }
+#else
+ while(i--) {
+ hash = hash*FUNNY_NUMBER2 + *ptr++;
+ }
+#endif
+
+#elif D_EXP == 64
+ {
+ Uint32 h = 0, l;
+#if defined(WORDS_BIGENDIAN)
+ while(i--) {
+ Uint d = *ptr++;
+ l = d & 0xffffffff;
+ h = d >> 32;
+ hash = hash*FUNNY_NUMBER2 + ((l << 16) | (l >> 16));
+ if (h || i)
+ hash = hash*FUNNY_NUMBER2 + ((h << 16) | (h >> 16));
+ }
+#else
+ while(i--) {
+ Uint d = *ptr++;
+ l = d & 0xffffffff;
+ h = d >> 32;
+ hash = hash*FUNNY_NUMBER2 + l;
+ if (h || i)
+ hash = hash*FUNNY_NUMBER2 + h;
+ }
+#endif
+ /* adjust arity to match 32 bit mode */
+ arity = (arity << 1) - (h == 0);
+ }
+
+#else
+#error "unsupported D_EXP size"
+#endif
+ hash = hash * (is_neg ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity;
+ }
+ break;
+
+ case TUPLE_DEF:
+ {
+ Eterm* ptr = tuple_val(term);
+ Uint arity = arityval(*ptr);
+
+ ESTACK_PUSH3(stack, arity, (Eterm)(ptr+1), arity);
+ op = MAKE_HASH_TUPLE_OP;
+ }/*fall through*/
+ case MAKE_HASH_TUPLE_OP:
+ case MAKE_HASH_FUN_OP:
+ {
+ Uint i = ESTACK_POP(stack);
+ Eterm* ptr = (Eterm*) ESTACK_POP(stack);
+ if (i != 0) {
+ term = *ptr;
+ ESTACK_PUSH3(stack, (Eterm)(ptr+1), i-1, op);
+ goto tail_recur;
+ }
+ if (op == MAKE_HASH_TUPLE_OP) {
+ Uint32 arity = ESTACK_POP(stack);
+ hash = hash*FUNNY_NUMBER9 + arity;
+ }
+ break;
+ }
+
+ default:
+ erl_exit(1, "Invalid tag in make_broken_hash\n");
+ return 0;
+ }
+ if (ESTACK_ISEMPTY(stack)) break;
+ op = ESTACK_POP(stack);
+ }
+
+ DESTROY_ESTACK(stack);
+ return hash;
+
+#undef MAKE_HASH_TUPLE_OP
+#undef MAKE_HASH_FUN_OP
+#undef MAKE_HASH_CDR_PRE_OP
+#undef MAKE_HASH_CDR_POST_OP
+}
+
+static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
+{
+ /* error_logger !
+ {notify,{info_msg,gleader,{emulator,"~s~n",[<message as list>]}}} |
+ {notify,{error,gleader,{emulator,"~s~n",[<message as list>]}}} |
+ {notify,{warning_msg,gleader,{emulator,"~s~n",[<message as list>}]}} */
+ Eterm* hp;
+ Uint sz;
+ Uint gl_sz;
+ Eterm gl;
+ Eterm list,plist,format,tuple1,tuple2,tuple3;
+ ErlOffHeap *ohp;
+ ErlHeapFragment *bp = NULL;
+#if !defined(ERTS_SMP)
+ Process *p;
+#endif
+
+ ASSERT(is_atom(tag));
+
+ if (len <= 0) {
+ return -1;
+ }
+
+#ifndef ERTS_SMP
+ if (
+#ifdef USE_THREADS
+ !erts_get_scheduler_data() || /* Must be scheduler thread */
+#endif
+ (p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0)) == NULL
+ || p->status == P_RUNNING) {
+ /* buf *always* points to a null terminated string */
+ erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
+ tag, buf);
+ return 0;
+ }
+ /* So we have an error logger, lets build the message */
+#endif
+ gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
+ sz = len * 2 /* message list */+ 2 /* cons surrounding message list */
+ + gl_sz +
+ 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */ +
+ 8 /* "~s~n" */;
+
+#ifndef ERTS_SMP
+ if (sz <= HeapWordsLeft(p)) {
+ ohp = &MSO(p);
+ hp = HEAP_TOP(p);
+ HEAP_TOP(p) += sz;
+ } else {
+#endif
+ bp = new_message_buffer(sz);
+ ohp = &bp->off_heap;
+ hp = bp->mem;
+#ifndef ERTS_SMP
+ }
+#endif
+ gl = (is_nil(gleader)
+ ? am_noproc
+ : (IS_CONST(gleader)
+ ? gleader
+ : copy_struct(gleader,gl_sz,&hp,ohp)));
+ list = buf_to_intlist(&hp, buf, len, NIL);
+ plist = CONS(hp,list,NIL);
+ hp += 2;
+ format = buf_to_intlist(&hp, "~s~n", 4, NIL);
+ tuple1 = TUPLE3(hp, am_emulator, format, plist);
+ hp += 4;
+ tuple2 = TUPLE3(hp, tag, gl, tuple1);
+ hp += 4;
+ tuple3 = TUPLE2(hp, am_notify, tuple2);
+#ifdef HARDDEBUG
+ erts_fprintf(stderr, "%T\n", tuple3);
+#endif
+#ifdef ERTS_SMP
+ {
+ Eterm from = erts_get_current_pid();
+ if (is_not_internal_pid(from))
+ from = NIL;
+ erts_queue_error_logger_message(from, tuple3, bp);
+ }
+#else
+ erts_queue_message(p, NULL /* only used for smp build */, bp, tuple3, NIL);
+#endif
+ return 0;
+}
+
+static ERTS_INLINE int
+send_info_to_logger(Eterm gleader, char *buf, int len)
+{
+ return do_send_to_logger(am_info_msg, gleader, buf, len);
+}
+
+static ERTS_INLINE int
+send_warning_to_logger(Eterm gleader, char *buf, int len)
+{
+ Eterm tag;
+ switch (erts_error_logger_warnings) {
+ case am_info: tag = am_info_msg; break;
+ case am_warning: tag = am_warning_msg; break;
+ default: tag = am_error; break;
+ }
+ return do_send_to_logger(tag, gleader, buf, len);
+}
+
+static ERTS_INLINE int
+send_error_to_logger(Eterm gleader, char *buf, int len)
+{
+ return do_send_to_logger(am_error, gleader, buf, len);
+}
+
+#define LOGGER_DSBUF_INC_SZ 256
+
+static erts_dsprintf_buf_t *
+grow_logger_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
+{
+ size_t size;
+ size_t free_size = dsbufp->size - dsbufp->str_len;
+
+ ASSERT(dsbufp && dsbufp->str);
+
+ if (need <= free_size)
+ return dsbufp;
+
+ size = need - free_size + LOGGER_DSBUF_INC_SZ;
+ size = (((size + LOGGER_DSBUF_INC_SZ - 1) / LOGGER_DSBUF_INC_SZ)
+ * LOGGER_DSBUF_INC_SZ);
+ size += dsbufp->size;
+ ASSERT(dsbufp->str_len + need <= size);
+ dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_LOGGER_DSBUF,
+ (void *) dsbufp->str,
+ size);
+ dsbufp->size = size;
+ return dsbufp;
+}
+
+erts_dsprintf_buf_t *
+erts_create_logger_dsbuf(void)
+{
+ erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_logger_dsbuf);
+ erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
+ sizeof(erts_dsprintf_buf_t));
+ sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
+ dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_LOGGER_DSBUF,
+ LOGGER_DSBUF_INC_SZ);
+ dsbufp->str[0] = '\0';
+ dsbufp->size = LOGGER_DSBUF_INC_SZ;
+ return dsbufp;
+}
+
+static ERTS_INLINE void
+destroy_logger_dsbuf(erts_dsprintf_buf_t *dsbufp)
+{
+ ASSERT(dsbufp && dsbufp->str);
+ erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp->str);
+ erts_free(ERTS_ALC_T_LOGGER_DSBUF, (void *) dsbufp);
+}
+
+int
+erts_send_info_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
+{
+ int res;
+ res = send_info_to_logger(gleader, dsbufp->str, dsbufp->str_len);
+ destroy_logger_dsbuf(dsbufp);
+ return res;
+}
+
+int
+erts_send_warning_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
+{
+ int res;
+ res = send_warning_to_logger(gleader, dsbufp->str, dsbufp->str_len);
+ destroy_logger_dsbuf(dsbufp);
+ return res;
+}
+
+int
+erts_send_error_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
+{
+ int res;
+ res = send_error_to_logger(gleader, dsbufp->str, dsbufp->str_len);
+ destroy_logger_dsbuf(dsbufp);
+ return res;
+}
+
+int
+erts_send_info_to_logger_str(Eterm gleader, char *str)
+{
+ return send_info_to_logger(gleader, str, sys_strlen(str));
+}
+
+int
+erts_send_warning_to_logger_str(Eterm gleader, char *str)
+{
+ return send_warning_to_logger(gleader, str, sys_strlen(str));
+}
+
+int
+erts_send_error_to_logger_str(Eterm gleader, char *str)
+{
+ return send_error_to_logger(gleader, str, sys_strlen(str));
+}
+
+int
+erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
+{
+ return erts_send_info_to_logger(NIL, dsbuf);
+}
+
+int
+erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
+{
+ return erts_send_warning_to_logger(NIL, dsbuf);
+}
+
+int
+erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *dsbuf)
+{
+ return erts_send_error_to_logger(NIL, dsbuf);
+}
+
+int
+erts_send_info_to_logger_str_nogl(char *str)
+{
+ return erts_send_info_to_logger_str(NIL, str);
+}
+
+int
+erts_send_warning_to_logger_str_nogl(char *str)
+{
+ return erts_send_warning_to_logger_str(NIL, str);
+}
+
+int
+erts_send_error_to_logger_str_nogl(char *str)
+{
+ return erts_send_error_to_logger_str(NIL, str);
+}
+
+
+#define TMP_DSBUF_INC_SZ 256
+
+static erts_dsprintf_buf_t *
+grow_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp, size_t need)
+{
+ size_t size;
+ size_t free_size = dsbufp->size - dsbufp->str_len;
+
+ ASSERT(dsbufp);
+
+ if (need <= free_size)
+ return dsbufp;
+ size = need - free_size + TMP_DSBUF_INC_SZ;
+ size = ((size + TMP_DSBUF_INC_SZ - 1)/TMP_DSBUF_INC_SZ)*TMP_DSBUF_INC_SZ;
+ size += dsbufp->size;
+ ASSERT(dsbufp->str_len + need <= size);
+ dsbufp->str = (char *) erts_realloc(ERTS_ALC_T_TMP_DSBUF,
+ (void *) dsbufp->str,
+ size);
+ dsbufp->size = size;
+ return dsbufp;
+}
+
+erts_dsprintf_buf_t *
+erts_create_tmp_dsbuf(Uint size)
+{
+ Uint init_size = size ? size : TMP_DSBUF_INC_SZ;
+ erts_dsprintf_buf_t init = ERTS_DSPRINTF_BUF_INITER(grow_tmp_dsbuf);
+ erts_dsprintf_buf_t *dsbufp = erts_alloc(ERTS_ALC_T_TMP_DSBUF,
+ sizeof(erts_dsprintf_buf_t));
+ sys_memcpy((void *) dsbufp, (void *) &init, sizeof(erts_dsprintf_buf_t));
+ dsbufp->str = (char *) erts_alloc(ERTS_ALC_T_TMP_DSBUF, init_size);
+ dsbufp->str[0] = '\0';
+ dsbufp->size = init_size;
+ return dsbufp;
+}
+
+void
+erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *dsbufp)
+{
+ if (dsbufp->str)
+ erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp->str);
+ erts_free(ERTS_ALC_T_TMP_DSBUF, (void *) dsbufp);
+}
+
+
+/* eq and cmp are written as separate functions a eq is a little faster */
+
+/*
+ * Test for equality of two terms.
+ * Returns 0 if not equal, or a non-zero value otherwise.
+ */
+
+int eq(Eterm a, Eterm b)
+{
+ DECLARE_ESTACK(stack);
+ Sint sz;
+ Eterm* aa;
+ Eterm* bb;
+
+tailrecur:
+ if (a == b) goto pop_next;
+tailrecur_ne:
+
+ switch (primary_tag(a)) {
+ case TAG_PRIMARY_LIST:
+ if (is_list(b)) {
+ Eterm* aval = list_val(a);
+ Eterm* bval = list_val(b);
+ while (1) {
+ Eterm atmp = CAR(aval);
+ Eterm btmp = CAR(bval);
+ if (atmp != btmp) {
+ ESTACK_PUSH2(stack,CDR(bval),CDR(aval));
+ a = atmp;
+ b = btmp;
+ goto tailrecur_ne;
+ }
+ atmp = CDR(aval);
+ btmp = CDR(bval);
+ if (atmp == btmp) {
+ goto pop_next;
+ }
+ if (is_not_list(atmp) || is_not_list(btmp)) {
+ a = atmp;
+ b = btmp;
+ goto tailrecur_ne;
+ }
+ aval = list_val(atmp);
+ bval = list_val(btmp);
+ }
+ }
+ break; /* not equal */
+
+ case TAG_PRIMARY_BOXED:
+ {
+ Eterm hdr = *boxed_val(a);
+ switch (hdr & _TAG_HEADER_MASK) {
+ case ARITYVAL_SUBTAG:
+ {
+ aa = tuple_val(a);
+ if (!is_boxed(b) || *boxed_val(b) != *aa)
+ goto not_equal;
+ bb = tuple_val(b);
+ if ((sz = arityval(*aa)) == 0) goto pop_next;
+ ++aa;
+ ++bb;
+ goto term_array;
+ }
+ case REFC_BINARY_SUBTAG:
+ case HEAP_BINARY_SUBTAG:
+ case SUB_BINARY_SUBTAG:
+ {
+ byte* a_ptr;
+ byte* b_ptr;
+ size_t a_size;
+ size_t b_size;
+ Uint a_bitsize;
+ Uint b_bitsize;
+ Uint a_bitoffs;
+ Uint b_bitoffs;
+
+ if (is_not_binary(b)) {
+ goto not_equal;
+ }
+ a_size = binary_size(a);
+ b_size = binary_size(b);
+ if (a_size != b_size) {
+ goto not_equal;
+ }
+ ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
+ ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
+ if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
+ if (sys_memcmp(a_ptr, b_ptr, a_size) == 0) goto pop_next;
+ } else if (a_bitsize == b_bitsize) {
+ if (erts_cmp_bits(a_ptr, a_bitoffs, b_ptr, b_bitoffs,
+ (a_size << 3) + a_bitsize) == 0) goto pop_next;
+ }
+ break; /* not equal */
+ }
+ case EXPORT_SUBTAG:
+ {
+ if (is_export(b)) {
+ Export* a_exp = (Export *) (export_val(a))[1];
+ Export* b_exp = (Export *) (export_val(b))[1];
+ if (a_exp == b_exp) goto pop_next;
+ }
+ break; /* not equal */
+ }
+ case FUN_SUBTAG:
+ {
+ ErlFunThing* f1;
+ ErlFunThing* f2;
+
+ if (is_not_fun(b))
+ goto not_equal;
+ f1 = (ErlFunThing *) fun_val(a);
+ f2 = (ErlFunThing *) fun_val(b);
+ if (f1->fe->module != f2->fe->module ||
+ f1->fe->old_index != f2->fe->old_index ||
+ f1->fe->old_uniq != f2->fe->old_uniq ||
+ f1->num_free != f2->num_free) {
+ goto not_equal;
+ }
+ if ((sz = f1->num_free) == 0) goto pop_next;
+ aa = f1->env;
+ bb = f2->env;
+ goto term_array;
+ }
+
+ case EXTERNAL_PID_SUBTAG:
+ case EXTERNAL_PORT_SUBTAG: {
+ ExternalThing *ap;
+ ExternalThing *bp;
+
+ if(is_not_external(b))
+ goto not_equal;
+
+ ap = external_thing_ptr(a);
+ bp = external_thing_ptr(b);
+
+ if(ap->header == bp->header && ap->node == bp->node) {
+ ASSERT(1 == external_data_words(a));
+ ASSERT(1 == external_data_words(b));
+
+ if (ap->data.ui[0] == bp->data.ui[0]) goto pop_next;
+ }
+ break; /* not equal */
+ }
+ case EXTERNAL_REF_SUBTAG: {
+ /*
+ * Observe!
+ * When comparing refs we need to compare ref numbers
+ * (32-bit words) *not* ref data words.
+ */
+ Uint32 *anum;
+ Uint32 *bnum;
+ Uint common_len;
+ Uint alen;
+ Uint blen;
+ Uint i;
+
+ if(is_not_external_ref(b))
+ goto not_equal;
+
+ if(external_node(a) != external_node(b))
+ goto not_equal;
+
+ anum = external_ref_numbers(a);
+ bnum = external_ref_numbers(b);
+ alen = external_ref_no_of_numbers(a);
+ blen = external_ref_no_of_numbers(b);
+
+ goto ref_common;
+ case REF_SUBTAG:
+
+ if (is_not_internal_ref(b))
+ goto not_equal;
+ alen = internal_ref_no_of_numbers(a);
+ blen = internal_ref_no_of_numbers(b);
+ anum = internal_ref_numbers(a);
+ bnum = internal_ref_numbers(b);
+
+ ref_common:
+ ASSERT(alen > 0 && blen > 0);
+
+ if (anum[0] != bnum[0])
+ goto not_equal;
+
+ if (alen == 3 && blen == 3) {
+ /* Most refs are of length 3 */
+ if (anum[1] == bnum[1] && anum[2] == bnum[2]) {
+ goto pop_next;
+ } else {
+ goto not_equal;
+ }
+ }
+
+ common_len = alen;
+ if (blen < alen)
+ common_len = blen;
+
+ for (i = 1; i < common_len; i++)
+ if (anum[i] != bnum[i])
+ goto not_equal;
+
+ if(alen != blen) {
+
+ if (alen > blen) {
+ for (i = common_len; i < alen; i++)
+ if (anum[i] != 0)
+ goto not_equal;
+ }
+ else {
+ for (i = common_len; i < blen; i++)
+ if (bnum[i] != 0)
+ goto not_equal;
+ }
+ }
+ goto pop_next;
+ }
+ case POS_BIG_SUBTAG:
+ case NEG_BIG_SUBTAG:
+ {
+ int i;
+
+ if (is_not_big(b))
+ goto not_equal;
+ aa = big_val(a); /* get pointer to thing */
+ bb = big_val(b);
+ if (*aa != *bb)
+ goto not_equal;
+ i = BIG_ARITY(aa);
+ while(i--) {
+ if (*++aa != *++bb)
+ goto not_equal;
+ }
+ goto pop_next;
+ }
+ case FLOAT_SUBTAG:
+ {
+ FloatDef af;
+ FloatDef bf;
+
+ if (is_float(b)) {
+ GET_DOUBLE(a, af);
+ GET_DOUBLE(b, bf);
+ if (af.fd == bf.fd) goto pop_next;
+ }
+ break; /* not equal */
+ }
+ }
+ break;
+ }
+ }
+ goto not_equal;
+
+
+term_array: /* arrays in 'aa' and 'bb', length in 'sz' */
+ ASSERT(sz != 0);
+ {
+ Eterm* ap = aa;
+ Eterm* bp = bb;
+ Sint i = sz;
+ for (;;) {
+ if (*ap != *bp) break;
+ if (--i == 0) goto pop_next;
+ ++ap;
+ ++bp;
+ }
+ a = *ap;
+ b = *bp;
+ if (is_both_immed(a,b)) {
+ goto not_equal;
+ }
+ if (i > 1) { /* push the rest */
+ ESTACK_PUSH3(stack, i-1, (Eterm)(bp+1),
+ ((Eterm)(ap+1)) | TAG_PRIMARY_HEADER);
+ /* We (ab)use TAG_PRIMARY_HEADER to recognize a term_array */
+ }
+ goto tailrecur_ne;
+ }
+
+pop_next:
+ if (!ESTACK_ISEMPTY(stack)) {
+ Eterm something = ESTACK_POP(stack);
+ if (primary_tag(something) == TAG_PRIMARY_HEADER) { /* a term_array */
+ aa = (Eterm*) something;
+ bb = (Eterm*) ESTACK_POP(stack);
+ sz = ESTACK_POP(stack);
+ goto term_array;
+ }
+ a = something;
+ b = ESTACK_POP(stack);
+ goto tailrecur;
+ }
+
+ DESTROY_ESTACK(stack);
+ return 1;
+
+not_equal:
+ DESTROY_ESTACK(stack);
+ return 0;
+}
+
+
+/*
+ * Lexically compare two strings of bytes (string s1 length l1 and s2 l2).
+ *
+ * s1 < s2 return -1
+ * s1 = s2 return 0
+ * s1 > s2 return +1
+ */
+static int cmpbytes(byte *s1, int l1, byte *s2, int l2)
+{
+ int i;
+ i = 0;
+ while((i < l1) && (i < l2)) {
+ if (s1[i] < s2[i]) return(-1);
+ if (s1[i] > s2[i]) return(1);
+ i++;
+ }
+ if (l1 < l2) return(-1);
+ if (l1 > l2) return(1);
+ return(0);
+}
+
+
+/*
+ * Compare objects.
+ * Returns 0 if equal, a negative value if a < b, or a positive number a > b.
+ *
+ * According to the Erlang Standard, types are orderered as follows:
+ * numbers < (characters) < atoms < refs < funs < ports < pids <
+ * tuples < [] < conses < binaries.
+ *
+ * Note that characters are currently not implemented.
+ *
+ */
+
+
+#define float_comp(x,y) (((x)<(y)) ? -1 : (((x)==(y)) ? 0 : 1))
+
+static int cmp_atoms(Eterm a, Eterm b)
+{
+ Atom *aa = atom_tab(atom_val(a));
+ Atom *bb = atom_tab(atom_val(b));
+ int diff = aa->ord0 - bb->ord0;
+ if (diff)
+ return diff;
+ return cmpbytes(aa->name+3, aa->len-3,
+ bb->name+3, bb->len-3);
+}
+
+Sint cmp(Eterm a, Eterm b)
+{
+ DECLARE_ESTACK(stack);
+ Eterm* aa;
+ Eterm* bb;
+ int i;
+ Sint j;
+ int a_tag;
+ int b_tag;
+ ErlNode *anode;
+ ErlNode *bnode;
+ Uint adata;
+ Uint bdata;
+ Uint alen;
+ Uint blen;
+ Uint32 *anum;
+ Uint32 *bnum;
+
+#define RETURN_NEQ(cmp) { j=(cmp); ASSERT(j != 0); goto not_equal; }
+#define ON_CMP_GOTO(cmp) if ((j=(cmp)) == 0) goto pop_next; else goto not_equal
+
+#undef CMP_NODES
+#define CMP_NODES(AN, BN) \
+ do { \
+ if((AN) != (BN)) { \
+ if((AN)->sysname != (BN)->sysname) \
+ RETURN_NEQ(cmp_atoms((AN)->sysname, (BN)->sysname)); \
+ ASSERT((AN)->creation != (BN)->creation); \
+ RETURN_NEQ(((AN)->creation < (BN)->creation) ? -1 : 1); \
+ } \
+ } while (0)
+
+
+tailrecur:
+ if (a == b) { /* Equal values or pointers. */
+ goto pop_next;
+ }
+tailrecur_ne:
+
+ /* deal with majority (?) cases by brute-force */
+ if (is_atom(a)) {
+ if (is_atom(b)) {
+ ON_CMP_GOTO(cmp_atoms(a, b));
+ }
+ } else if (is_both_small(a, b)) {
+ ON_CMP_GOTO(signed_val(a) - signed_val(b));
+ }
+
+ /*
+ * Take care of cases where the types are the same.
+ */
+
+ a_tag = 42; /* Suppress warning */
+ switch (primary_tag(a)) {
+ case TAG_PRIMARY_IMMED1:
+ switch ((a & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_IMMED1_PORT >> _TAG_PRIMARY_SIZE):
+ if (is_internal_port(b)) {
+ bnode = erts_this_node;
+ bdata = internal_port_data(b);
+ } else if (is_external_port(b)) {
+ bnode = external_port_node(b);
+ bdata = external_port_data(b);
+ } else {
+ a_tag = PORT_DEF;
+ goto mixed_types;
+ }
+ anode = erts_this_node;
+ adata = internal_port_data(a);
+
+ port_common:
+ CMP_NODES(anode, bnode);
+ ON_CMP_GOTO((Sint)(adata - bdata));
+
+ case (_TAG_IMMED1_PID >> _TAG_PRIMARY_SIZE):
+ if (is_internal_pid(b)) {
+ bnode = erts_this_node;
+ bdata = internal_pid_data(b);
+ } else if (is_external_pid(b)) {
+ bnode = external_pid_node(b);
+ bdata = external_pid_data(b);
+ } else {
+ a_tag = PID_DEF;
+ goto mixed_types;
+ }
+ anode = erts_this_node;
+ adata = internal_pid_data(a);
+
+ pid_common:
+ if (adata != bdata) {
+ RETURN_NEQ(adata < bdata ? -1 : 1);
+ }
+ CMP_NODES(anode, bnode);
+ goto pop_next;
+ case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE):
+ a_tag = SMALL_DEF;
+ goto mixed_types;
+ case (_TAG_IMMED1_IMMED2 >> _TAG_PRIMARY_SIZE): {
+ switch ((a & _TAG_IMMED2_MASK) >> _TAG_IMMED1_SIZE) {
+ case (_TAG_IMMED2_ATOM >> _TAG_IMMED1_SIZE):
+ a_tag = ATOM_DEF;
+ goto mixed_types;
+ case (_TAG_IMMED2_NIL >> _TAG_IMMED1_SIZE):
+ a_tag = NIL_DEF;
+ goto mixed_types;
+ }
+ }
+ }
+ case TAG_PRIMARY_LIST:
+ if (is_not_list(b)) {
+ a_tag = LIST_DEF;
+ goto mixed_types;
+ }
+ aa = list_val(a);
+ bb = list_val(b);
+ while (1) {
+ Eterm atmp = CAR(aa);
+ Eterm btmp = CAR(bb);
+ if (atmp != btmp) {
+ ESTACK_PUSH2(stack,CDR(bb),CDR(aa));
+ a = atmp;
+ b = btmp;
+ goto tailrecur_ne;
+ }
+ atmp = CDR(aa);
+ btmp = CDR(bb);
+ if (atmp == btmp) {
+ goto pop_next;
+ }
+ if (is_not_list(atmp) || is_not_list(btmp)) {
+ a = atmp;
+ b = btmp;
+ goto tailrecur_ne;
+ }
+ aa = list_val(atmp);
+ bb = list_val(btmp);
+ }
+ case TAG_PRIMARY_BOXED:
+ {
+ Eterm ahdr = *boxed_val(a);
+ switch ((ahdr & _TAG_HEADER_MASK) >> _TAG_PRIMARY_SIZE) {
+ case (_TAG_HEADER_ARITYVAL >> _TAG_PRIMARY_SIZE):
+ if (is_not_tuple(b)) {
+ a_tag = TUPLE_DEF;
+ goto mixed_types;
+ }
+ aa = tuple_val(a);
+ bb = tuple_val(b);
+ /* compare the arities */
+ i = arityval(ahdr); /* get the arity*/
+ if (i != arityval(*bb)) {
+ RETURN_NEQ((int)(i - arityval(*bb)));
+ }
+ if (i == 0) {
+ goto pop_next;
+ }
+ ++aa;
+ ++bb;
+ goto term_array;
+
+ case (_TAG_HEADER_FLOAT >> _TAG_PRIMARY_SIZE):
+ if (is_not_float(b)) {
+ a_tag = FLOAT_DEF;
+ goto mixed_types;
+ } else {
+ FloatDef af;
+ FloatDef bf;
+
+ GET_DOUBLE(a, af);
+ GET_DOUBLE(b, bf);
+ ON_CMP_GOTO(float_comp(af.fd, bf.fd));
+ }
+ case (_TAG_HEADER_POS_BIG >> _TAG_PRIMARY_SIZE):
+ case (_TAG_HEADER_NEG_BIG >> _TAG_PRIMARY_SIZE):
+ if (is_not_big(b)) {
+ a_tag = BIG_DEF;
+ goto mixed_types;
+ }
+ ON_CMP_GOTO(big_comp(a, b));
+ case (_TAG_HEADER_EXPORT >> _TAG_PRIMARY_SIZE):
+ if (is_not_export(b)) {
+ a_tag = EXPORT_DEF;
+ goto mixed_types;
+ } else {
+ Export* a_exp = (Export *) (export_val(a))[1];
+ Export* b_exp = (Export *) (export_val(b))[1];
+
+ if ((j = cmp_atoms(a_exp->code[0], b_exp->code[0])) != 0) {
+ RETURN_NEQ(j);
+ }
+ if ((j = cmp_atoms(a_exp->code[1], b_exp->code[1])) != 0) {
+ RETURN_NEQ(j);
+ }
+ ON_CMP_GOTO((Sint) a_exp->code[2] - (Sint) b_exp->code[2]);
+ }
+ break;
+ case (_TAG_HEADER_FUN >> _TAG_PRIMARY_SIZE):
+ if (is_not_fun(b)) {
+ a_tag = FUN_DEF;
+ goto mixed_types;
+ } else {
+ ErlFunThing* f1 = (ErlFunThing *) fun_val(a);
+ ErlFunThing* f2 = (ErlFunThing *) fun_val(b);
+ Sint diff;
+
+ diff = cmpbytes(atom_tab(atom_val(f1->fe->module))->name,
+ atom_tab(atom_val(f1->fe->module))->len,
+ atom_tab(atom_val(f2->fe->module))->name,
+ atom_tab(atom_val(f2->fe->module))->len);
+ if (diff != 0) {
+ RETURN_NEQ(diff);
+ }
+ diff = f1->fe->old_index - f2->fe->old_index;
+ if (diff != 0) {
+ RETURN_NEQ(diff);
+ }
+ diff = f1->fe->old_uniq - f2->fe->old_uniq;
+ if (diff != 0) {
+ RETURN_NEQ(diff);
+ }
+ diff = f1->num_free - f2->num_free;
+ if (diff != 0) {
+ RETURN_NEQ(diff);
+ }
+ i = f1->num_free;
+ if (i == 0) goto pop_next;
+ aa = f1->env;
+ bb = f2->env;
+ goto term_array;
+ }
+ case (_TAG_HEADER_EXTERNAL_PID >> _TAG_PRIMARY_SIZE):
+ if (is_internal_pid(b)) {
+ bnode = erts_this_node;
+ bdata = internal_pid_data(b);
+ } else if (is_external_pid(b)) {
+ bnode = external_pid_node(b);
+ bdata = external_pid_data(b);
+ } else {
+ a_tag = EXTERNAL_PID_DEF;
+ goto mixed_types;
+ }
+ anode = external_pid_node(a);
+ adata = external_pid_data(a);
+ goto pid_common;
+ case (_TAG_HEADER_EXTERNAL_PORT >> _TAG_PRIMARY_SIZE):
+ if (is_internal_port(b)) {
+ bnode = erts_this_node;
+ bdata = internal_port_data(b);
+ } else if (is_external_port(b)) {
+ bnode = external_port_node(b);
+ bdata = external_port_data(b);
+ } else {
+ a_tag = EXTERNAL_PORT_DEF;
+ goto mixed_types;
+ }
+ anode = external_port_node(a);
+ adata = external_port_data(a);
+ goto port_common;
+ case (_TAG_HEADER_REF >> _TAG_PRIMARY_SIZE):
+ /*
+ * Note! When comparing refs we need to compare ref numbers
+ * (32-bit words), *not* ref data words.
+ */
+
+ if (is_internal_ref(b)) {
+ bnode = erts_this_node;
+ bnum = internal_ref_numbers(b);
+ blen = internal_ref_no_of_numbers(b);
+ } else if(is_external_ref(b)) {
+ bnode = external_ref_node(b);
+ bnum = external_ref_numbers(b);
+ blen = external_ref_no_of_numbers(b);
+ } else {
+ a_tag = REF_DEF;
+ goto mixed_types;
+ }
+ anode = erts_this_node;
+ anum = internal_ref_numbers(a);
+ alen = internal_ref_no_of_numbers(a);
+
+ ref_common:
+ CMP_NODES(anode, bnode);
+
+ ASSERT(alen > 0 && blen > 0);
+ if (alen != blen) {
+ if (alen > blen) {
+ do {
+ if (anum[alen - 1] != 0)
+ RETURN_NEQ(1);
+ alen--;
+ } while (alen > blen);
+ }
+ else {
+ do {
+ if (bnum[blen - 1] != 0)
+ RETURN_NEQ(-1);
+ blen--;
+ } while (alen < blen);
+ }
+ }
+
+ ASSERT(alen == blen);
+ for (i = (Sint) alen - 1; i >= 0; i--)
+ if (anum[i] != bnum[i])
+ RETURN_NEQ((Sint32) (anum[i] - bnum[i]));
+ goto pop_next;
+ case (_TAG_HEADER_EXTERNAL_REF >> _TAG_PRIMARY_SIZE):
+ if (is_internal_ref(b)) {
+ bnode = erts_this_node;
+ bnum = internal_ref_numbers(b);
+ blen = internal_ref_no_of_numbers(b);
+ } else if (is_external_ref(b)) {
+ bnode = external_ref_node(b);
+ bnum = external_ref_numbers(b);
+ blen = external_ref_no_of_numbers(b);
+ } else {
+ a_tag = EXTERNAL_REF_DEF;
+ goto mixed_types;
+ }
+ anode = external_ref_node(a);
+ anum = external_ref_numbers(a);
+ alen = external_ref_no_of_numbers(a);
+ goto ref_common;
+ default:
+ /* Must be a binary */
+ ASSERT(is_binary(a));
+ if (is_not_binary(b)) {
+ a_tag = BINARY_DEF;
+ goto mixed_types;
+ } else {
+ Uint a_size = binary_size(a);
+ Uint b_size = binary_size(b);
+ Uint a_bitsize;
+ Uint b_bitsize;
+ Uint a_bitoffs;
+ Uint b_bitoffs;
+ Uint min_size;
+ int cmp;
+ byte* a_ptr;
+ byte* b_ptr;
+ ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
+ ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
+ if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
+ min_size = (a_size < b_size) ? a_size : b_size;
+ if ((cmp = sys_memcmp(a_ptr, b_ptr, min_size)) != 0) {
+ RETURN_NEQ(cmp);
+ }
+ }
+ else {
+ a_size = (a_size << 3) + a_bitsize;
+ b_size = (b_size << 3) + b_bitsize;
+ min_size = (a_size < b_size) ? a_size : b_size;
+ if ((cmp = erts_cmp_bits(a_ptr,a_bitoffs,
+ b_ptr,b_bitoffs,min_size)) != 0) {
+ RETURN_NEQ(cmp);
+ }
+ }
+ ON_CMP_GOTO((Sint)(a_size - b_size));
+ }
+ }
+ }
+ }
+
+ /*
+ * Take care of the case that the tags are different.
+ */
+
+ mixed_types:
+ b_tag = tag_val_def(b);
+
+ {
+ FloatDef f1, f2;
+ Eterm big;
+ Eterm big_buf[2];
+
+ switch(_NUMBER_CODE(a_tag, b_tag)) {
+ case SMALL_BIG:
+ big = small_to_big(signed_val(a), big_buf);
+ j = big_comp(big, b);
+ break;
+ case SMALL_FLOAT:
+ f1.fd = signed_val(a);
+ GET_DOUBLE(b, f2);
+ j = float_comp(f1.fd, f2.fd);
+ break;
+ case BIG_SMALL:
+ big = small_to_big(signed_val(b), big_buf);
+ j = big_comp(a, big);
+ break;
+ case BIG_FLOAT:
+ if (big_to_double(a, &f1.fd) < 0) {
+ j = big_sign(a) ? -1 : 1;
+ } else {
+ GET_DOUBLE(b, f2);
+ j = float_comp(f1.fd, f2.fd);
+ }
+ break;
+ case FLOAT_SMALL:
+ GET_DOUBLE(a, f1);
+ f2.fd = signed_val(b);
+ j = float_comp(f1.fd, f2.fd);
+ break;
+ case FLOAT_BIG:
+ if (big_to_double(b, &f2.fd) < 0) {
+ j = big_sign(b) ? 1 : -1;
+ } else {
+ GET_DOUBLE(a, f1);
+ j = float_comp(f1.fd, f2.fd);
+ }
+ break;
+ default:
+ j = b_tag - a_tag;
+ }
+ }
+ if (j == 0) {
+ goto pop_next;
+ } else {
+ goto not_equal;
+ }
+
+term_array: /* arrays in 'aa' and 'bb', length in 'i' */
+ ASSERT(i>0);
+ while (--i) {
+ a = *aa++;
+ b = *bb++;
+ if (a != b) {
+ if (is_atom(a) && is_atom(b)) {
+ if ((j = cmp_atoms(a, b)) != 0) {
+ goto not_equal;
+ }
+ } else if (is_both_small(a, b)) {
+ if ((j = signed_val(a)-signed_val(b)) != 0) {
+ goto not_equal;
+ }
+ } else {
+ /* (ab)Use TAG_PRIMARY_HEADER to recognize a term_array */
+ ESTACK_PUSH3(stack, i, (Eterm)bb, (Eterm)aa | TAG_PRIMARY_HEADER);
+ goto tailrecur_ne;
+ }
+ }
+ }
+ a = *aa;
+ b = *bb;
+ goto tailrecur;
+
+pop_next:
+ if (!ESTACK_ISEMPTY(stack)) {
+ Eterm something = ESTACK_POP(stack);
+ if (primary_tag(something) == TAG_PRIMARY_HEADER) { /* a term_array */
+ aa = (Eterm*) something;
+ bb = (Eterm*) ESTACK_POP(stack);
+ i = ESTACK_POP(stack);
+ goto term_array;
+ }
+ a = something;
+ b = ESTACK_POP(stack);
+ goto tailrecur;
+ }
+
+ DESTROY_ESTACK(stack);
+ return 0;
+
+not_equal:
+ DESTROY_ESTACK(stack);
+ return j;
+
+#undef CMP_NODES
+}
+
+
+void
+erts_cleanup_externals(ExternalThing *etp)
+{
+ ExternalThing *tetp;
+
+ tetp = etp;
+
+ while(tetp) {
+ erts_deref_node_entry(tetp->node);
+ tetp = tetp->next;
+ }
+}
+
+Eterm
+store_external_or_ref_(Uint **hpp, ExternalThing **etpp, Eterm ns)
+{
+ Uint i;
+ Uint size;
+ Uint *from_hp;
+ Uint *to_hp = *hpp;
+
+ ASSERT(is_external(ns) || is_internal_ref(ns));
+
+ if(is_external(ns)) {
+ from_hp = external_val(ns);
+ size = thing_arityval(*from_hp) + 1;
+ *hpp += size;
+
+ for(i = 0; i < size; i++)
+ to_hp[i] = from_hp[i];
+
+ erts_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
+
+ ((ExternalThing *) to_hp)->next = *etpp;
+ *etpp = (ExternalThing *) to_hp;
+
+ return make_external(to_hp);
+ }
+
+ /* Internal ref */
+ from_hp = internal_ref_val(ns);
+
+ size = thing_arityval(*from_hp) + 1;
+
+ *hpp += size;
+
+ for(i = 0; i < size; i++)
+ to_hp[i] = from_hp[i];
+
+ return make_internal_ref(to_hp);
+}
+
+Eterm
+store_external_or_ref_in_proc_(Process *proc, Eterm ns)
+{
+ Uint sz;
+ Uint *hp;
+
+ ASSERT(is_external(ns) || is_internal_ref(ns));
+
+ sz = NC_HEAP_SIZE(ns);
+ ASSERT(sz > 0);
+ hp = HAlloc(proc, sz);
+ return store_external_or_ref_(&hp, &MSO(proc).externals, ns);
+}
+
+void bin_write(int to, void *to_arg, byte* buf, int sz)
+{
+ int i;
+
+ for (i=0;i<sz;i++) {
+ if (IS_DIGIT(buf[i]))
+ erts_print(to, to_arg, "%d,", buf[i]);
+ else if (IS_PRINT(buf[i])) {
+ erts_print(to, to_arg, "%c,", buf[i]);
+ }
+ else
+ erts_print(to, to_arg, "%d,", buf[i]);
+ }
+ erts_putc(to, to_arg, '\n');
+}
+
+/* Fill buf with the contents of bytelist list
+ return number of chars in list or -1 for error */
+
+int
+intlist_to_buf(Eterm list, char *buf, int len)
+{
+ Eterm* listptr;
+ int sz = 0;
+
+ if (is_nil(list))
+ return 0;
+ if (is_not_list(list))
+ return -1;
+ listptr = list_val(list);
+
+ while (sz < len) {
+ if (!is_byte(*listptr))
+ return -1;
+ buf[sz++] = unsigned_val(*listptr);
+ if (is_nil(*(listptr + 1)))
+ return(sz);
+ if (is_not_list(*(listptr + 1)))
+ return -1;
+ listptr = list_val(*(listptr + 1));
+ }
+ return -1; /* not enough space */
+}
+
+/*
+** Convert an integer to a byte list
+** return pointer to converted stuff (need not to be at start of buf!)
+*/
+char* Sint_to_buf(Sint n, struct Sint_buf *buf)
+{
+ char* p = &buf->s[sizeof(buf->s)-1];
+ int sign = 0;
+
+ *p-- = '\0'; /* null terminate */
+ if (n == 0)
+ *p-- = '0';
+ else if (n < 0) {
+ sign = 1;
+ n = -n;
+ }
+
+ while (n != 0) {
+ *p-- = (n % 10) + '0';
+ n /= 10;
+ }
+ if (sign)
+ *p-- = '-';
+ return p+1;
+}
+
+/* Build a list of integers in some safe memory area
+** Memory must be pre allocated prio call 2*len in size
+** hp is a pointer to the "heap" pointer on return
+** this pointer is updated to point after the list
+*/
+
+Eterm
+buf_to_intlist(Eterm** hpp, char *buf, int len, Eterm tail)
+{
+ Eterm* hp = *hpp;
+
+ buf += (len-1);
+ while(len > 0) {
+ tail = CONS(hp, make_small((byte)*buf), tail);
+ hp += 2;
+ buf--;
+ len--;
+ }
+ *hpp = hp;
+ return tail;
+}
+
+/*
+** Write io list in to a buffer.
+**
+** An iolist is defined as:
+**
+** iohead ::= Binary
+** | Byte (i.e integer in range [0..255]
+** | iolist
+** ;
+**
+** iotail ::= []
+** | Binary (added by tony)
+** | iolist
+** ;
+**
+** iolist ::= []
+** | Binary
+** | [ iohead | iotail]
+** ;
+**
+** Return remaining bytes in buffer on success
+** -1 on overflow
+** -2 on type error (including that result would not be a whole number of bytes)
+*/
+
+int io_list_to_buf(Eterm obj, char* buf, int len)
+{
+ Eterm* objp;
+ DECLARE_ESTACK(s);
+ goto L_again;
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ if (len == 0) {
+ goto L_overflow;
+ }
+ *buf++ = unsigned_val(obj);
+ len--;
+ } else if (is_binary(obj)) {
+ byte* bptr;
+ size_t size = binary_size(obj);
+ Uint bitsize;
+ Uint bitoffs;
+ Uint num_bits;
+
+ if (len < size) {
+ goto L_overflow;
+ }
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ if (bitsize != 0) {
+ goto L_type_error;
+ }
+ num_bits = 8*size;
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
+ buf += size;
+ len -= size;
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+
+ obj = CDR(objp);
+ if (is_list(obj)) {
+ goto L_iter_list; /* on tail */
+ } else if (is_binary(obj)) {
+ byte* bptr;
+ size_t size = binary_size(obj);
+ Uint bitsize;
+ Uint bitoffs;
+ Uint num_bits;
+ if (len < size) {
+ goto L_overflow;
+ }
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ if (bitsize != 0) {
+ goto L_type_error;
+ }
+ num_bits = 8*size;
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
+ buf += size;
+ len -= size;
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ } else if (is_binary(obj)) {
+ byte* bptr;
+ size_t size = binary_size(obj);
+ Uint bitsize;
+ Uint bitoffs;
+ Uint num_bits;
+ if (len < size) {
+ goto L_overflow;
+ }
+ ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize);
+ if (bitsize != 0) {
+ goto L_type_error;
+ }
+ num_bits = 8*size;
+ copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits);
+ buf += size;
+ len -= size;
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ DESTROY_ESTACK(s);
+ return len;
+
+ L_type_error:
+ DESTROY_ESTACK(s);
+ return -2;
+
+ L_overflow:
+ DESTROY_ESTACK(s);
+ return -1;
+}
+
+int io_list_len(Eterm obj)
+{
+ Eterm* objp;
+ Sint len = 0;
+ DECLARE_ESTACK(s);
+ goto L_again;
+
+ while (!ESTACK_ISEMPTY(s)) {
+ obj = ESTACK_POP(s);
+ L_again:
+ if (is_list(obj)) {
+ L_iter_list:
+ objp = list_val(obj);
+ /* Head */
+ obj = CAR(objp);
+ if (is_byte(obj)) {
+ len++;
+ } else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ len += binary_size(obj);
+ } else if (is_list(obj)) {
+ ESTACK_PUSH(s, CDR(objp));
+ goto L_iter_list; /* on head */
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ /* Tail */
+ obj = CDR(objp);
+ if (is_list(obj))
+ goto L_iter_list; /* on tail */
+ else if (is_binary(obj) && binary_bitsize(obj) == 0) {
+ len += binary_size(obj);
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ } else if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */
+ len += binary_size(obj);
+ } else if (is_not_nil(obj)) {
+ goto L_type_error;
+ }
+ }
+
+ DESTROY_ESTACK(s);
+ return len;
+
+ L_type_error:
+ DESTROY_ESTACK(s);
+ return -1;
+}
+
+/* return 0 if item is not a non-empty flat list of bytes */
+int
+is_string(Eterm list)
+{
+ int len = 0;
+
+ while(is_list(list)) {
+ Eterm* consp = list_val(list);
+ Eterm hd = CAR(consp);
+
+ if (!is_byte(hd))
+ return 0;
+ len++;
+ list = CDR(consp);
+ }
+ if (is_nil(list))
+ return len;
+ return 0;
+}
+
+#ifdef ERTS_SMP
+
+/*
+ * Process and Port timers in smp case
+ */
+
+ERTS_SCHED_PREF_PRE_ALLOC_IMPL(ptimer_pre, ErtsSmpPTimer, 1000)
+
+#define ERTS_PTMR_FLGS_ALLCD_SIZE \
+ 2
+#define ERTS_PTMR_FLGS_ALLCD_MASK \
+ ((((Uint32) 1) << ERTS_PTMR_FLGS_ALLCD_SIZE) - 1)
+
+#define ERTS_PTMR_FLGS_PREALLCD ((Uint32) 1)
+#define ERTS_PTMR_FLGS_SLALLCD ((Uint32) 2)
+#define ERTS_PTMR_FLGS_LLALLCD ((Uint32) 3)
+#define ERTS_PTMR_FLG_CANCELLED (((Uint32) 1) << (ERTS_PTMR_FLGS_ALLCD_SIZE+0))
+
+static void
+init_ptimers(void)
+{
+ init_ptimer_pre_alloc();
+}
+
+static ERTS_INLINE void
+free_ptimer(ErtsSmpPTimer *ptimer)
+{
+ switch (ptimer->timer.flags & ERTS_PTMR_FLGS_ALLCD_MASK) {
+ case ERTS_PTMR_FLGS_PREALLCD:
+ (void) ptimer_pre_free(ptimer);
+ break;
+ case ERTS_PTMR_FLGS_SLALLCD:
+ erts_free(ERTS_ALC_T_SL_PTIMER, (void *) ptimer);
+ break;
+ case ERTS_PTMR_FLGS_LLALLCD:
+ erts_free(ERTS_ALC_T_LL_PTIMER, (void *) ptimer);
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "Internal error: Bad ptimer alloc type\n");
+ break;
+ }
+}
+
+/* Callback for process timeout cancelled */
+static void
+ptimer_cancelled(ErtsSmpPTimer *ptimer)
+{
+ free_ptimer(ptimer);
+}
+
+/* Callback for process timeout */
+static void
+ptimer_timeout(ErtsSmpPTimer *ptimer)
+{
+ if (is_internal_pid(ptimer->timer.id)) {
+ Process *p;
+ p = erts_pid2proc_opt(NULL,
+ 0,
+ ptimer->timer.id,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ if (p) {
+ if (!p->is_exiting
+ && !(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) {
+ ASSERT(*ptimer->timer.timer_ref == ptimer);
+ *ptimer->timer.timer_ref = NULL;
+ (*ptimer->timer.timeout_func)(p);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ }
+ }
+ else {
+ Port *p;
+ ASSERT(is_internal_port(ptimer->timer.id));
+ p = erts_id2port_sflgs(ptimer->timer.id,
+ NULL,
+ 0,
+ ERTS_PORT_SFLGS_DEAD);
+ if (p) {
+ if (!(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) {
+ ASSERT(*ptimer->timer.timer_ref == ptimer);
+ *ptimer->timer.timer_ref = NULL;
+ (*ptimer->timer.timeout_func)(p);
+ }
+ erts_port_release(p);
+ }
+ }
+ free_ptimer(ptimer);
+}
+
+void
+erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
+ Eterm id,
+ ErlTimeoutProc timeout_func,
+ Uint timeout)
+{
+ ErtsSmpPTimer *res = ptimer_pre_alloc();
+ if (res)
+ res->timer.flags = ERTS_PTMR_FLGS_PREALLCD;
+ else {
+ if (timeout < ERTS_ALC_MIN_LONG_LIVED_TIME) {
+ res = erts_alloc(ERTS_ALC_T_SL_PTIMER, sizeof(ErtsSmpPTimer));
+ res->timer.flags = ERTS_PTMR_FLGS_SLALLCD;
+ }
+ else {
+ res = erts_alloc(ERTS_ALC_T_LL_PTIMER, sizeof(ErtsSmpPTimer));
+ res->timer.flags = ERTS_PTMR_FLGS_LLALLCD;
+ }
+ }
+ res->timer.timeout_func = timeout_func;
+ res->timer.timer_ref = timer_ref;
+ res->timer.id = id;
+ res->timer.tm.active = 0; /* MUST be initalized */
+
+ ASSERT(!*timer_ref);
+
+ *timer_ref = res;
+
+ erl_set_timer(&res->timer.tm,
+ (ErlTimeoutProc) ptimer_timeout,
+ (ErlCancelProc) ptimer_cancelled,
+ (void*) res,
+ timeout);
+}
+
+void
+erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer)
+{
+ if (ptimer) {
+ ASSERT(*ptimer->timer.timer_ref == ptimer);
+ *ptimer->timer.timer_ref = NULL;
+ ptimer->timer.flags |= ERTS_PTMR_FLG_CANCELLED;
+ erl_cancel_timer(&ptimer->timer.tm);
+ }
+}
+
+#endif
+
+static Sint trim_threshold;
+static Sint top_pad;
+static Sint mmap_threshold;
+static Sint mmap_max;
+
+Uint tot_bin_allocated;
+
+void erts_init_utils(void)
+{
+#ifdef ERTS_SMP
+ init_ptimers();
+#endif
+}
+
+void erts_init_utils_mem(void)
+{
+ trim_threshold = -1;
+ top_pad = -1;
+ mmap_threshold = -1;
+ mmap_max = -1;
+}
+
+int
+sys_alloc_opt(int opt, int value)
+{
+#if HAVE_MALLOPT
+ Sint m_opt;
+ Sint *curr_val;
+
+ switch(opt) {
+ case SYS_ALLOC_OPT_TRIM_THRESHOLD:
+#ifdef M_TRIM_THRESHOLD
+ m_opt = M_TRIM_THRESHOLD;
+ curr_val = &trim_threshold;
+ break;
+#else
+ return 0;
+#endif
+ case SYS_ALLOC_OPT_TOP_PAD:
+#ifdef M_TOP_PAD
+ m_opt = M_TOP_PAD;
+ curr_val = &top_pad;
+ break;
+#else
+ return 0;
+#endif
+ case SYS_ALLOC_OPT_MMAP_THRESHOLD:
+#ifdef M_MMAP_THRESHOLD
+ m_opt = M_MMAP_THRESHOLD;
+ curr_val = &mmap_threshold;
+ break;
+#else
+ return 0;
+#endif
+ case SYS_ALLOC_OPT_MMAP_MAX:
+#ifdef M_MMAP_MAX
+ m_opt = M_MMAP_MAX;
+ curr_val = &mmap_max;
+ break;
+#else
+ return 0;
+#endif
+ default:
+ return 0;
+ }
+
+ if(mallopt(m_opt, value)) {
+ *curr_val = (Sint) value;
+ return 1;
+ }
+
+#endif /* #if HAVE_MALLOPT */
+
+ return 0;
+}
+
+void
+sys_alloc_stat(SysAllocStat *sasp)
+{
+ sasp->trim_threshold = trim_threshold;
+ sasp->top_pad = top_pad;
+ sasp->mmap_threshold = mmap_threshold;
+ sasp->mmap_max = mmap_max;
+
+}
+
+#ifdef ERTS_SMP
+
+/* Local system block state */
+
+struct {
+ int emergency;
+ long emergency_timeout;
+ erts_smp_cnd_t watchdog_cnd;
+ erts_smp_tid_t watchdog_tid;
+ int threads_to_block;
+ int have_blocker;
+ erts_smp_tid_t blocker_tid;
+ int recursive_block;
+ Uint32 allowed_activities;
+ erts_smp_tsd_key_t blockable_key;
+ erts_smp_mtx_t mtx;
+ erts_smp_cnd_t cnd;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ int activity_changing;
+ int checking;
+#endif
+} system_block_state;
+
+/* Global system block state */
+erts_system_block_state_t erts_system_block_state;
+
+
+static ERTS_INLINE int
+is_blockable_thread(void)
+{
+ return erts_smp_tsd_get(system_block_state.blockable_key) != NULL;
+}
+
+static ERTS_INLINE int
+is_blocker(void)
+{
+ return (system_block_state.have_blocker
+ && erts_smp_equal_tids(system_block_state.blocker_tid,
+ erts_smp_thr_self()));
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+int
+erts_lc_is_blocking(void)
+{
+ int res;
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ res = erts_smp_pending_system_block() && is_blocker();
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+ return res;
+}
+#endif
+
+static ERTS_INLINE void
+block_me(void (*prepare)(void *),
+ void (*resume)(void *),
+ void *arg,
+ int mtx_locked,
+ int want_to_block,
+ int update_act_changing,
+ profile_sched_msg_q *psmq)
+{
+ if (prepare)
+ (*prepare)(arg);
+
+ /* Locks might be held... */
+
+ if (!mtx_locked)
+ erts_smp_mtx_lock(&system_block_state.mtx);
+
+ if (erts_smp_pending_system_block() && !is_blocker()) {
+ int is_blockable = is_blockable_thread();
+ ASSERT(is_blockable);
+
+ if (is_blockable)
+ system_block_state.threads_to_block--;
+
+ if (erts_system_profile_flags.scheduler && psmq) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp) {
+ profile_sched_msg *msg = NULL;
+
+ ASSERT(psmq->n < 2);
+ msg = &((psmq->msg)[psmq->n]);
+ msg->scheduler_id = esdp->no;
+ get_now(&(msg->Ms), &(msg->s), &(msg->us));
+ msg->no_schedulers = 0;
+ msg->state = am_inactive;
+ psmq->n++;
+ }
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (update_act_changing)
+ system_block_state.activity_changing--;
+#endif
+
+ erts_smp_cnd_broadcast(&system_block_state.cnd);
+
+ do {
+ erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
+ } while (erts_smp_pending_system_block()
+ && !(want_to_block && !system_block_state.have_blocker));
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (update_act_changing)
+ system_block_state.activity_changing++;
+#endif
+ if (erts_system_profile_flags.scheduler && psmq) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp) {
+ profile_sched_msg *msg = NULL;
+
+ ASSERT(psmq->n < 2);
+ msg = &((psmq->msg)[psmq->n]);
+ msg->scheduler_id = esdp->no;
+ get_now(&(msg->Ms), &(msg->s), &(msg->us));
+ msg->no_schedulers = 0;
+ msg->state = am_active;
+ psmq->n++;
+ }
+ }
+
+ if (is_blockable)
+ system_block_state.threads_to_block++;
+ }
+
+ if (!mtx_locked)
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+
+ if (resume)
+ (*resume)(arg);
+}
+
+void
+erts_block_me(void (*prepare)(void *),
+ void (*resume)(void *),
+ void *arg)
+{
+ profile_sched_msg_q psmq;
+ psmq.n = 0;
+ if (prepare)
+ (*prepare)(arg);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0); /* No locks should be locked */
+#endif
+
+ block_me(NULL, NULL, NULL, 0, 0, 0, &psmq);
+
+ if (erts_system_profile_flags.scheduler && psmq.n > 0)
+ dispatch_profile_msg_q(&psmq);
+
+ if (resume)
+ (*resume)(arg);
+}
+
+void
+erts_register_blockable_thread(void)
+{
+ profile_sched_msg_q psmq;
+ psmq.n = 0;
+ if (!is_blockable_thread()) {
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ system_block_state.threads_to_block++;
+ erts_smp_tsd_set(system_block_state.blockable_key,
+ (void *) &erts_system_block_state);
+
+ /* Someone might be waiting for us to block... */
+ if (erts_smp_pending_system_block())
+ block_me(NULL, NULL, NULL, 1, 0, 0, &psmq);
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+
+ if (erts_system_profile_flags.scheduler && psmq.n > 0)
+ dispatch_profile_msg_q(&psmq);
+ }
+}
+
+void
+erts_unregister_blockable_thread(void)
+{
+ if (is_blockable_thread()) {
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ system_block_state.threads_to_block--;
+ ASSERT(system_block_state.threads_to_block >= 0);
+ erts_smp_tsd_set(system_block_state.blockable_key, NULL);
+
+ /* Someone might be waiting for us to block... */
+ if (erts_smp_pending_system_block())
+ erts_smp_cnd_broadcast(&system_block_state.cnd);
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+ }
+}
+
+void
+erts_note_activity_begin(erts_activity_t activity)
+{
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ if (erts_smp_pending_system_block()) {
+ Uint32 broadcast = 0;
+ switch (activity) {
+ case ERTS_ACTIVITY_GC:
+ broadcast = (system_block_state.allowed_activities
+ & ERTS_BS_FLG_ALLOW_GC);
+ break;
+ case ERTS_ACTIVITY_IO:
+ broadcast = (system_block_state.allowed_activities
+ & ERTS_BS_FLG_ALLOW_IO);
+ break;
+ case ERTS_ACTIVITY_WAIT:
+ broadcast = 1;
+ break;
+ default:
+ abort();
+ break;
+ }
+ if (broadcast)
+ erts_smp_cnd_broadcast(&system_block_state.cnd);
+ }
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+}
+
+void
+erts_check_block(erts_activity_t old_activity,
+ erts_activity_t new_activity,
+ int locked,
+ void (*prepare)(void *),
+ void (*resume)(void *),
+ void *arg)
+{
+ int do_block;
+ profile_sched_msg_q psmq;
+
+ psmq.n = 0;
+ if (!locked && prepare)
+ (*prepare)(arg);
+
+ erts_smp_mtx_lock(&system_block_state.mtx);
+
+ /* First check if it is ok to block... */
+ if (!locked)
+ do_block = 1;
+ else {
+ switch (old_activity) {
+ case ERTS_ACTIVITY_UNDEFINED:
+ do_block = 0;
+ break;
+ case ERTS_ACTIVITY_GC:
+ do_block = (system_block_state.allowed_activities
+ & ERTS_BS_FLG_ALLOW_GC);
+ break;
+ case ERTS_ACTIVITY_IO:
+ do_block = (system_block_state.allowed_activities
+ & ERTS_BS_FLG_ALLOW_IO);
+ break;
+ case ERTS_ACTIVITY_WAIT:
+ /* You are not allowed to leave activity waiting
+ * without supplying the possibility to block
+ * unlocked.
+ */
+ erts_set_activity_error(ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED,
+ __FILE__, __LINE__);
+ do_block = 0;
+ break;
+ default:
+ erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
+ __FILE__, __LINE__);
+ do_block = 0;
+ break;
+ }
+ }
+
+ if (do_block) {
+ /* ... then check if it is necessary to block... */
+
+ switch (new_activity) {
+ case ERTS_ACTIVITY_UNDEFINED:
+ do_block = 1;
+ break;
+ case ERTS_ACTIVITY_GC:
+ do_block = !(system_block_state.allowed_activities
+ & ERTS_BS_FLG_ALLOW_GC);
+ break;
+ case ERTS_ACTIVITY_IO:
+ do_block = !(system_block_state.allowed_activities
+ & ERTS_BS_FLG_ALLOW_IO);
+ break;
+ case ERTS_ACTIVITY_WAIT:
+ /* No need to block if we are going to wait */
+ do_block = 0;
+ break;
+ default:
+ erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY,
+ __FILE__, __LINE__);
+ break;
+ }
+ }
+
+ if (do_block) {
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (!locked) {
+ /* Only system_block_state.mtx should be held */
+ erts_lc_check_exact(&system_block_state.mtx.lc, 1);
+ }
+#endif
+
+ block_me(NULL, NULL, NULL, 1, 0, 1, &psmq);
+
+ }
+
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+
+ if (erts_system_profile_flags.scheduler && psmq.n > 0)
+ dispatch_profile_msg_q(&psmq);
+
+ if (!locked && resume)
+ (*resume)(arg);
+}
+
+
+
+void
+erts_set_activity_error(erts_activity_error_t error, char *file, int line)
+{
+ switch (error) {
+ case ERTS_ACT_ERR_LEAVE_WAIT_UNLOCKED:
+ erl_exit(1, "%s:%d: Fatal error: Leaving activity waiting without "
+ "supplying the possibility to block unlocked.",
+ file, line);
+ break;
+ case ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY:
+ erl_exit(1, "%s:%d: Fatal error: Leaving unknown activity.",
+ file, line);
+ break;
+ case ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY:
+ erl_exit(1, "%s:%d: Fatal error: Leaving unknown activity.",
+ file, line);
+ break;
+ default:
+ erl_exit(1, "%s:%d: Internal error in erts_smp_set_activity()",
+ file, line);
+ break;
+ }
+
+}
+
+
+static ERTS_INLINE int
+threads_not_under_control(void)
+{
+ int res = system_block_state.threads_to_block;
+
+ /* Waiting is always an allowed activity... */
+ res -= erts_smp_atomic_read(&erts_system_block_state.in_activity.wait);
+
+ if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_GC)
+ res -= erts_smp_atomic_read(&erts_system_block_state.in_activity.gc);
+
+ if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_IO)
+ res -= erts_smp_atomic_read(&erts_system_block_state.in_activity.io);
+
+ if (res < 0) {
+ ASSERT(0);
+ return 0;
+ }
+ return res;
+}
+
+/*
+ * erts_block_system() blocks all threads registered as blockable.
+ * It doesn't return until either all threads have blocked (0 is returned)
+ * or it has timed out (ETIMEDOUT) is returned.
+ *
+ * If allowed activities == 0, blocked threads will release all locks
+ * before blocking.
+ *
+ * If allowed_activities is != 0, erts_block_system() will allow blockable
+ * threads to continue executing as long as they are doing an allowed
+ * activity. When they are done with the allowed activity they will block,
+ * *but* they will block holding locks. Therefore, the thread calling
+ * erts_block_system() must *not* try to aquire any locks that might be
+ * held by blocked threads holding locks from allowed activities.
+ *
+ * Currently allowed_activities are:
+ * * ERTS_BS_FLG_ALLOW_GC Thread continues with garbage
+ * collection and blocks with
+ * main process lock on current
+ * process locked.
+ * * ERTS_BS_FLG_ALLOW_IO Thread continues with I/O
+ */
+
+void
+erts_block_system(Uint32 allowed_activities)
+{
+ int do_block;
+ profile_sched_msg_q psmq;
+
+ psmq.n = 0;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0); /* No locks should be locked */
+#endif
+
+ erts_smp_mtx_lock(&system_block_state.mtx);
+
+ do_block = erts_smp_pending_system_block();
+ if (do_block
+ && system_block_state.have_blocker
+ && erts_smp_equal_tids(system_block_state.blocker_tid,
+ erts_smp_thr_self())) {
+ ASSERT(system_block_state.recursive_block >= 0);
+ system_block_state.recursive_block++;
+
+ /* You are not allowed to restrict allowed activites
+ in a recursive block! */
+ ERTS_SMP_LC_ASSERT((system_block_state.allowed_activities
+ & ~allowed_activities) == 0);
+ }
+ else {
+
+ erts_smp_atomic_inc(&erts_system_block_state.do_block);
+
+ /* Someone else might be waiting for us to block... */
+ if (do_block) {
+ do_block_me:
+ block_me(NULL, NULL, NULL, 1, 1, 0, &psmq);
+ }
+
+ ASSERT(!system_block_state.have_blocker);
+ system_block_state.have_blocker = 1;
+ system_block_state.blocker_tid = erts_smp_thr_self();
+ system_block_state.allowed_activities = allowed_activities;
+
+ if (is_blockable_thread())
+ system_block_state.threads_to_block--;
+
+ while (threads_not_under_control() && !system_block_state.emergency)
+ erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
+
+ if (system_block_state.emergency) {
+ system_block_state.have_blocker = 0;
+ goto do_block_me;
+ }
+ }
+
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+
+ if (erts_system_profile_flags.scheduler && psmq.n > 0 )
+ dispatch_profile_msg_q(&psmq);
+}
+
+/*
+ * erts_emergency_block_system() should only be called when we are
+ * about to write a crash dump...
+ */
+
+int
+erts_emergency_block_system(long timeout, Uint32 allowed_activities)
+{
+ int res = 0;
+ long another_blocker;
+
+ erts_smp_mtx_lock(&system_block_state.mtx);
+
+ if (system_block_state.emergency) {
+ /* Argh... */
+ res = EINVAL;
+ goto done;
+ }
+
+ another_blocker = erts_smp_pending_system_block();
+ system_block_state.emergency = 1;
+ erts_smp_atomic_inc(&erts_system_block_state.do_block);
+
+ if (another_blocker) {
+ if (is_blocker()) {
+ erts_smp_atomic_dec(&erts_system_block_state.do_block);
+ res = 0;
+ goto done;
+ }
+ /* kick the other blocker */
+ erts_smp_cnd_broadcast(&system_block_state.cnd);
+ while (system_block_state.have_blocker)
+ erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
+ }
+
+ ASSERT(!system_block_state.have_blocker);
+ system_block_state.have_blocker = 1;
+ system_block_state.blocker_tid = erts_smp_thr_self();
+ system_block_state.allowed_activities = allowed_activities;
+
+ if (is_blockable_thread())
+ system_block_state.threads_to_block--;
+
+ if (timeout < 0) {
+ while (threads_not_under_control())
+ erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
+ }
+ else {
+ system_block_state.emergency_timeout = timeout;
+ erts_smp_cnd_signal(&system_block_state.watchdog_cnd);
+
+ while (system_block_state.emergency_timeout >= 0
+ && threads_not_under_control()) {
+ erts_smp_cnd_wait(&system_block_state.cnd,
+ &system_block_state.mtx);
+ }
+ }
+ done:
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+ return res;
+}
+
+void
+erts_release_system(void)
+{
+ long do_block;
+ profile_sched_msg_q psmq;
+
+ psmq.n = 0;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0); /* No locks should be locked */
+#endif
+
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ ASSERT(is_blocker());
+
+ ASSERT(system_block_state.recursive_block >= 0);
+
+ if (system_block_state.recursive_block)
+ system_block_state.recursive_block--;
+ else {
+ do_block = erts_smp_atomic_dectest(&erts_system_block_state.do_block);
+ system_block_state.have_blocker = 0;
+ if (is_blockable_thread())
+ system_block_state.threads_to_block++;
+ else
+ do_block = 0;
+
+ /* Someone else might be waiting for us to block... */
+ if (do_block)
+ block_me(NULL, NULL, NULL, 1, 0, 0, &psmq);
+ else
+ erts_smp_cnd_broadcast(&system_block_state.cnd);
+ }
+
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+
+ if (erts_system_profile_flags.scheduler && psmq.n > 0)
+ dispatch_profile_msg_q(&psmq);
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+
+void
+erts_lc_activity_change_begin(void)
+{
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ system_block_state.activity_changing++;
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+}
+
+void
+erts_lc_activity_change_end(void)
+{
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ system_block_state.activity_changing--;
+ if (system_block_state.checking && !system_block_state.activity_changing)
+ erts_smp_cnd_broadcast(&system_block_state.cnd);
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+}
+
+#endif
+
+int
+erts_is_system_blocked(erts_activity_t allowed_activities)
+{
+ int blkd;
+
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ blkd = (erts_smp_pending_system_block()
+ && system_block_state.have_blocker
+ && erts_smp_equal_tids(system_block_state.blocker_tid,
+ erts_smp_thr_self())
+ && !(system_block_state.allowed_activities & ~allowed_activities));
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (blkd) {
+ system_block_state.checking = 1;
+ while (system_block_state.activity_changing)
+ erts_smp_cnd_wait(&system_block_state.cnd, &system_block_state.mtx);
+ system_block_state.checking = 0;
+ blkd = !threads_not_under_control();
+ }
+#endif
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+ return blkd;
+}
+
+static void *
+emergency_watchdog(void *unused)
+{
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ while (1) {
+ long timeout;
+ while (system_block_state.emergency_timeout < 0)
+ erts_smp_cnd_wait(&system_block_state.watchdog_cnd, &system_block_state.mtx);
+ timeout = system_block_state.emergency_timeout;
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+
+ if (erts_disable_tolerant_timeofday)
+ erts_milli_sleep(timeout);
+ else {
+ SysTimeval to;
+ erts_get_timeval(&to);
+ to.tv_sec += timeout / 1000;
+ to.tv_usec += timeout % 1000;
+
+ while (1) {
+ SysTimeval curr;
+ erts_milli_sleep(timeout);
+ erts_get_timeval(&curr);
+ if (curr.tv_sec > to.tv_sec
+ || (curr.tv_sec == to.tv_sec && curr.tv_usec >= to.tv_usec)) {
+ break;
+ }
+ timeout = (to.tv_sec - curr.tv_sec)*1000;
+ timeout += (to.tv_usec - curr.tv_usec)/1000;
+ }
+ }
+
+ erts_smp_mtx_lock(&system_block_state.mtx);
+ system_block_state.emergency_timeout = -1;
+ erts_smp_cnd_broadcast(&system_block_state.cnd);
+ }
+ erts_smp_mtx_unlock(&system_block_state.mtx);
+ return NULL;
+}
+
+void
+erts_system_block_init(void)
+{
+ erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ /* Local state... */
+ system_block_state.emergency = 0;
+ system_block_state.emergency_timeout = -1;
+ erts_smp_cnd_init(&system_block_state.watchdog_cnd);
+ system_block_state.threads_to_block = 0;
+ system_block_state.have_blocker = 0;
+ /* system_block_state.block_tid */
+ system_block_state.recursive_block = 0;
+ system_block_state.allowed_activities = 0;
+ erts_smp_tsd_key_create(&system_block_state.blockable_key);
+ erts_smp_mtx_init(&system_block_state.mtx, "system_block");
+ erts_smp_cnd_init(&system_block_state.cnd);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ system_block_state.activity_changing = 0;
+ system_block_state.checking = 0;
+#endif
+
+ thr_opts.suggested_stack_size = 8;
+ erts_smp_thr_create(&system_block_state.watchdog_tid,
+ emergency_watchdog,
+ NULL,
+ &thr_opts);
+
+ /* Global state... */
+
+ erts_smp_atomic_init(&erts_system_block_state.do_block, 0L);
+ erts_smp_atomic_init(&erts_system_block_state.in_activity.wait, 0L);
+ erts_smp_atomic_init(&erts_system_block_state.in_activity.gc, 0L);
+ erts_smp_atomic_init(&erts_system_block_state.in_activity.io, 0L);
+
+ /* Make sure blockable threads unregister when exiting... */
+ erts_smp_install_exit_handler(erts_unregister_blockable_thread);
+}
+
+
+#endif /* #ifdef ERTS_SMP */
+
+char *
+erts_read_env(char *key)
+{
+ size_t value_len = 256;
+ char *value = erts_alloc(ERTS_ALC_T_TMP, value_len);
+ int res;
+ while (1) {
+ res = erts_sys_getenv(key, value, &value_len);
+ if (res <= 0)
+ break;
+ value = erts_realloc(ERTS_ALC_T_TMP, value, value_len);
+ }
+ if (res != 0) {
+ erts_free(ERTS_ALC_T_TMP, value);
+ return NULL;
+ }
+ return value;
+}
+
+void
+erts_free_read_env(void *value)
+{
+ if (value)
+ erts_free(ERTS_ALC_T_TMP, value);
+}
+
+int
+erts_write_env(char *key, char *value)
+{
+ int ix, res;
+ size_t key_len = sys_strlen(key), value_len = sys_strlen(value);
+ char *key_value = erts_alloc_fnf(ERTS_ALC_T_TMP,
+ key_len + 1 + value_len + 1);
+ if (!key_value) {
+ errno = ENOMEM;
+ return -1;
+ }
+ sys_memcpy((void *) key_value, (void *) key, key_len);
+ ix = key_len;
+ key_value[ix++] = '=';
+ sys_memcpy((void *) key_value, (void *) value, value_len);
+ ix += value_len;
+ key_value[ix] = '\0';
+ res = erts_sys_putenv(key_value, key_len);
+ erts_free(ERTS_ALC_T_TMP, key_value);
+ return res;
+}
+
+#ifdef DEBUG
+/*
+ * Handy functions when using a debugger - don't use in the code!
+ */
+
+void upp(buf,sz)
+byte* buf;
+int sz;
+{
+ bin_write(ERTS_PRINT_STDERR,NULL,buf,sz);
+}
+
+void pat(Eterm atom)
+{
+ upp(atom_tab(atom_val(atom))->name,
+ atom_tab(atom_val(atom))->len);
+}
+
+
+void pinfo()
+{
+ process_info(ERTS_PRINT_STDOUT, NULL);
+}
+
+
+void pp(p)
+Process *p;
+{
+ if(p)
+ print_process_info(ERTS_PRINT_STDERR, NULL, p);
+}
+
+void ppi(Eterm pid)
+{
+ pp(erts_pid2proc_unlocked(pid));
+}
+
+void td(Eterm x)
+{
+ erts_fprintf(stderr, "%T\n", x);
+}
+
+void
+ps(Process* p, Eterm* stop)
+{
+ Eterm* sp = STACK_START(p) - 1;
+
+ if (stop <= STACK_END(p)) {
+ stop = STACK_END(p) + 1;
+ }
+
+ while(sp >= stop) {
+ erts_printf("%p: %.75T\n", sp, *sp);
+ sp--;
+ }
+}
+#endif
+
+
diff --git a/erts/emulator/beam/version.h b/erts/emulator/beam/version.h
new file mode 100644
index 0000000000..3952c751b7
--- /dev/null
+++ b/erts/emulator/beam/version.h
@@ -0,0 +1,19 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+extern char erlversion[], erlhost[], erldate[];